1 |
commit: ab2870232ca91c7537058c64caf2012d0a2945d8 |
2 |
Author: Sam James <sam <AT> gentoo <DOT> org> |
3 |
AuthorDate: Fri Dec 30 20:19:15 2022 +0000 |
4 |
Commit: Sam James <sam <AT> gentoo <DOT> org> |
5 |
CommitDate: Fri Dec 30 20:21:12 2022 +0000 |
6 |
URL: https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=ab287023 |
7 |
|
8 |
dev-db/mongodb: fix build w/ boost 1.81 |
9 |
|
10 |
Closes: https://bugs.gentoo.org/887037 |
11 |
Signed-off-by: Sam James <sam <AT> gentoo.org> |
12 |
|
13 |
.../mongodb/files/mongodb-4.4.10-boost-1.81.patch | 317 +++++++++++++++++++++ |
14 |
dev-db/mongodb/mongodb-4.4.10-r1.ebuild | 1 + |
15 |
dev-db/mongodb/mongodb-5.0.5-r2.ebuild | 1 + |
16 |
3 files changed, 319 insertions(+) |
17 |
|
18 |
diff --git a/dev-db/mongodb/files/mongodb-4.4.10-boost-1.81.patch b/dev-db/mongodb/files/mongodb-4.4.10-boost-1.81.patch |
19 |
new file mode 100644 |
20 |
index 000000000000..331f0c5b922a |
21 |
--- /dev/null |
22 |
+++ b/dev-db/mongodb/files/mongodb-4.4.10-boost-1.81.patch |
23 |
@@ -0,0 +1,317 @@ |
24 |
+https://bugs.gentoo.org/887037 |
25 |
+ |
26 |
+Workaround https://github.com/boostorg/container/commit/99091420ae553b27345e04279fd19fe24fb684c1 |
27 |
+in Boost 1.81. |
28 |
+ |
29 |
+Upstream s2 (as in real upstream, not MongoDB) has deviated substantially |
30 |
+from the version vendored. |
31 |
+--- a/src/third_party/s2/base/stl_decl_msvc.h |
32 |
++++ b/src/third_party/s2/base/stl_decl_msvc.h |
33 |
+@@ -118,8 +118,8 @@ namespace msvchash { |
34 |
+ class hash_multimap; |
35 |
+ } // end namespace msvchash |
36 |
+ |
37 |
+-using msvchash::hash_set; |
38 |
+-using msvchash::hash_map; |
39 |
++using msvchash::hash_set = my_hash_set; |
40 |
++using msvchash::hash_map = my_hash_map; |
41 |
+ using msvchash::hash; |
42 |
+ using msvchash::hash_multimap; |
43 |
+ using msvchash::hash_multiset; |
44 |
+--- a/src/third_party/s2/base/stl_decl_osx.h |
45 |
++++ b/src/third_party/s2/base/stl_decl_osx.h |
46 |
+@@ -68,8 +68,8 @@ using std::string; |
47 |
+ |
48 |
+ using namespace std; |
49 |
+ using __gnu_cxx::hash; |
50 |
+-using __gnu_cxx::hash_set; |
51 |
+-using __gnu_cxx::hash_map; |
52 |
++using __gnu_cxx::hash_set = my_hash_set; |
53 |
++using __gnu_cxx::hash_map = my_hash_map; |
54 |
+ using __gnu_cxx::select1st; |
55 |
+ |
56 |
+ /* On Linux (and gdrive on OSX), this comes from places like |
57 |
+--- a/src/third_party/s2/hash.h |
58 |
++++ b/src/third_party/s2/hash.h |
59 |
+@@ -2,10 +2,10 @@ |
60 |
+ #define THIRD_PARTY_S2_HASH_H_ |
61 |
+ |
62 |
+ #include <unordered_map> |
63 |
+-#define hash_map std::unordered_map |
64 |
++#define my_hash_map std::unordered_map |
65 |
+ |
66 |
+ #include <unordered_set> |
67 |
+-#define hash_set std::unordered_set |
68 |
++#define my_hash_set std::unordered_set |
69 |
+ |
70 |
+ #define HASH_NAMESPACE_START namespace std { |
71 |
+ #define HASH_NAMESPACE_END } |
72 |
+--- a/src/third_party/s2/s2_test.cc |
73 |
++++ b/src/third_party/s2/s2_test.cc |
74 |
+@@ -10,7 +10,7 @@ using std::reverse; |
75 |
+ |
76 |
+ #include <hash_set> |
77 |
+ #include <hash_map> |
78 |
+-using __gnu_cxx::hash_set; |
79 |
++using __gnu_cxx::hash_set = my_hash_map; |
80 |
+ |
81 |
+ #include "s2.h" |
82 |
+ #include "base/logging.h" |
83 |
+@@ -709,8 +709,8 @@ TEST(S2, Frames) { |
84 |
+ #if 0 |
85 |
+ TEST(S2, S2PointHashSpreads) { |
86 |
+ int kTestPoints = 1 << 16; |
87 |
+- hash_set<size_t> set; |
88 |
+- hash_set<S2Point> points; |
89 |
++ my_hash_set<size_t> set; |
90 |
++ my_hash_set<S2Point> points; |
91 |
+ hash<S2Point> hasher; |
92 |
+ S2Point base = S2Point(1, 1, 1); |
93 |
+ for (int i = 0; i < kTestPoints; ++i) { |
94 |
+@@ -733,7 +733,7 @@ TEST(S2, S2PointHashCollapsesZero) { |
95 |
+ double minus_zero = -zero; |
96 |
+ EXPECT_NE(*reinterpret_cast<uint64 const*>(&zero), |
97 |
+ *reinterpret_cast<uint64 const*>(&minus_zero)); |
98 |
+- hash_map<S2Point, int> map; |
99 |
++ my_hash_map<S2Point, int> map; |
100 |
+ S2Point zero_pt(zero, zero, zero); |
101 |
+ S2Point minus_zero_pt(minus_zero, minus_zero, minus_zero); |
102 |
+ |
103 |
+--- a/src/third_party/s2/s2cellid_test.cc |
104 |
++++ b/src/third_party/s2/s2cellid_test.cc |
105 |
+@@ -10,7 +10,7 @@ using std::reverse; |
106 |
+ |
107 |
+ #include <cstdio> |
108 |
+ #include <hash_map> |
109 |
+-using __gnu_cxx::hash_map; |
110 |
++using __gnu_cxx::hash_map = my_hash_map; |
111 |
+ |
112 |
+ #include <sstream> |
113 |
+ #include <vector> |
114 |
+@@ -170,7 +170,7 @@ TEST(S2CellId, Tokens) { |
115 |
+ static const int kMaxExpandLevel = 3; |
116 |
+ |
117 |
+ static void ExpandCell(S2CellId const& parent, vector<S2CellId>* cells, |
118 |
+- hash_map<S2CellId, S2CellId>* parent_map) { |
119 |
++ my_hash_map<S2CellId, S2CellId>* parent_map) { |
120 |
+ cells->push_back(parent); |
121 |
+ if (parent.level() == kMaxExpandLevel) return; |
122 |
+ int i, j, orientation; |
123 |
+@@ -194,7 +194,7 @@ static void ExpandCell(S2CellId const& parent, vector<S2CellId>* cells, |
124 |
+ |
125 |
+ TEST(S2CellId, Containment) { |
126 |
+ // Test contains() and intersects(). |
127 |
+- hash_map<S2CellId, S2CellId> parent_map; |
128 |
++ my_hash_map<S2CellId, S2CellId> parent_map; |
129 |
+ vector<S2CellId> cells; |
130 |
+ for (int face = 0; face < 6; ++face) { |
131 |
+ ExpandCell(S2CellId::FromFacePosLevel(face, 0, 0), &cells, &parent_map); |
132 |
+--- a/src/third_party/s2/s2loop.cc |
133 |
++++ b/src/third_party/s2/s2loop.cc |
134 |
+@@ -120,7 +120,7 @@ bool S2Loop::IsValid(string* err) const { |
135 |
+ } |
136 |
+ } |
137 |
+ // Loops are not allowed to have any duplicate vertices. |
138 |
+- hash_map<S2Point, int> vmap; |
139 |
++ my_hash_map<S2Point, int> vmap; |
140 |
+ for (int i = 0; i < num_vertices(); ++i) { |
141 |
+ if (!vmap.insert(make_pair(vertex(i), i)).second) { |
142 |
+ VLOG(2) << "Duplicate vertices: " << vmap[vertex(i)] << " and " << i; |
143 |
+--- a/src/third_party/s2/s2polygon.cc |
144 |
++++ b/src/third_party/s2/s2polygon.cc |
145 |
+@@ -117,7 +117,7 @@ HASH_NAMESPACE_END |
146 |
+ bool S2Polygon::IsValid(const vector<S2Loop*>& loops, string* err) { |
147 |
+ // If a loop contains an edge AB, then no other loop may contain AB or BA. |
148 |
+ if (loops.size() > 1) { |
149 |
+- hash_map<S2PointPair, pair<int, int> > edges; |
150 |
++ my_hash_map<S2PointPair, pair<int, int> > edges; |
151 |
+ for (size_t i = 0; i < loops.size(); ++i) { |
152 |
+ S2Loop* lp = loops[i]; |
153 |
+ for (int j = 0; j < lp->num_vertices(); ++j) { |
154 |
+--- a/src/third_party/s2/s2polygonbuilder.cc |
155 |
++++ b/src/third_party/s2/s2polygonbuilder.cc |
156 |
+@@ -175,7 +175,7 @@ S2Loop* S2PolygonBuilder::AssembleLoop(S2Point const& v0, S2Point const& v1, |
157 |
+ // This ensures that only CCW loops are constructed when possible. |
158 |
+ |
159 |
+ vector<S2Point> path; // The path so far. |
160 |
+- hash_map<S2Point, int> index; // Maps a vertex to its index in "path". |
161 |
++ my_hash_map<S2Point, int> index; // Maps a vertex to its index in "path". |
162 |
+ path.push_back(v0); |
163 |
+ path.push_back(v1); |
164 |
+ index[v1] = 1; |
165 |
+@@ -361,7 +361,7 @@ void S2PolygonBuilder::BuildMergeMap(PointIndex* index, MergeMap* merge_map) { |
166 |
+ |
167 |
+ // First, we build the set of all the distinct vertices in the input. |
168 |
+ // We need to include the source and destination of every edge. |
169 |
+- hash_set<S2Point> vertices; |
170 |
++ my_hash_set<S2Point> vertices; |
171 |
+ for (EdgeSet::const_iterator i = edges_->begin(); i != edges_->end(); ++i) { |
172 |
+ vertices.insert(i->first); |
173 |
+ VertexSet const& vset = i->second; |
174 |
+@@ -370,7 +370,7 @@ void S2PolygonBuilder::BuildMergeMap(PointIndex* index, MergeMap* merge_map) { |
175 |
+ } |
176 |
+ |
177 |
+ // Build a spatial index containing all the distinct vertices. |
178 |
+- for (hash_set<S2Point>::const_iterator i = vertices.begin(); |
179 |
++ for (my_hash_set<S2Point>::const_iterator i = vertices.begin(); |
180 |
+ i != vertices.end(); ++i) { |
181 |
+ index->Insert(*i); |
182 |
+ } |
183 |
+@@ -378,7 +378,7 @@ void S2PolygonBuilder::BuildMergeMap(PointIndex* index, MergeMap* merge_map) { |
184 |
+ // Next, we loop through all the vertices and attempt to grow a maximial |
185 |
+ // mergeable group starting from each vertex. |
186 |
+ vector<S2Point> frontier, mergeable; |
187 |
+- for (hash_set<S2Point>::const_iterator vstart = vertices.begin(); |
188 |
++ for (my_hash_set<S2Point>::const_iterator vstart = vertices.begin(); |
189 |
+ vstart != vertices.end(); ++vstart) { |
190 |
+ // Skip any vertices that have already been merged with another vertex. |
191 |
+ if (merge_map->find(*vstart) != merge_map->end()) continue; |
192 |
+--- a/src/third_party/s2/s2polygonbuilder.h |
193 |
++++ b/src/third_party/s2/s2polygonbuilder.h |
194 |
+@@ -262,7 +262,7 @@ class S2PolygonBuilder { |
195 |
+ // current position to a new position, and also returns a spatial index |
196 |
+ // containing all of the vertices that do not need to be moved. |
197 |
+ class PointIndex; |
198 |
+- typedef hash_map<S2Point, S2Point> MergeMap; |
199 |
++ typedef my_hash_map<S2Point, S2Point> MergeMap; |
200 |
+ void BuildMergeMap(PointIndex* index, MergeMap* merge_map); |
201 |
+ |
202 |
+ // Moves a set of vertices from old to new positions. |
203 |
+@@ -282,7 +282,7 @@ class S2PolygonBuilder { |
204 |
+ // once. We could have also used a multiset<pair<S2Point, S2Point> >, |
205 |
+ // but this representation is a bit more convenient. |
206 |
+ typedef multiset<S2Point> VertexSet; |
207 |
+- typedef hash_map<S2Point, VertexSet> EdgeSet; |
208 |
++ typedef my_hash_map<S2Point, VertexSet> EdgeSet; |
209 |
+ scoped_ptr<EdgeSet> edges_; |
210 |
+ |
211 |
+ // Unique collection of the starting (first) vertex of all edges, |
212 |
+--- a/src/third_party/s2/s2regioncoverer.cc |
213 |
++++ b/src/third_party/s2/s2regioncoverer.cc |
214 |
+@@ -321,7 +321,7 @@ void S2RegionCoverer::GetInteriorCellUnion(S2Region const& region, |
215 |
+ |
216 |
+ void S2RegionCoverer::FloodFill( |
217 |
+ S2Region const& region, S2CellId const& start, vector<S2CellId>* output) { |
218 |
+- hash_set<S2CellId> all; |
219 |
++ my_hash_set<S2CellId> all; |
220 |
+ vector<S2CellId> frontier; |
221 |
+ output->clear(); |
222 |
+ all.insert(start); |
223 |
+--- a/src/third_party/s2/s2regioncoverer_test.cc |
224 |
++++ b/src/third_party/s2/s2regioncoverer_test.cc |
225 |
+@@ -11,7 +11,7 @@ using std::swap; |
226 |
+ using std::reverse; |
227 |
+ |
228 |
+ #include <hash_map> |
229 |
+-using __gnu_cxx::hash_map; |
230 |
++using __gnu_cxx::hash_map = my_hash_map; |
231 |
+ |
232 |
+ #include <queue> |
233 |
+ using std::priority_queue; |
234 |
+@@ -65,7 +65,7 @@ static void CheckCovering(S2RegionCoverer const& coverer, |
235 |
+ vector<S2CellId> const& covering, |
236 |
+ bool interior) { |
237 |
+ // Keep track of how many cells have the same coverer.min_level() ancestor. |
238 |
+- hash_map<S2CellId, int> min_level_cells; |
239 |
++ my_hash_map<S2CellId, int> min_level_cells; |
240 |
+ for (int i = 0; i < covering.size(); ++i) { |
241 |
+ int level = covering[i].level(); |
242 |
+ EXPECT_GE(level, coverer.min_level()); |
243 |
+@@ -76,7 +76,7 @@ static void CheckCovering(S2RegionCoverer const& coverer, |
244 |
+ if (covering.size() > coverer.max_cells()) { |
245 |
+ // If the covering has more than the requested number of cells, then check |
246 |
+ // that the cell count cannot be reduced by using the parent of some cell. |
247 |
+- for (hash_map<S2CellId, int>::const_iterator i = min_level_cells.begin(); |
248 |
++ for (my_hash_map<S2CellId, int>::const_iterator i = min_level_cells.begin(); |
249 |
+ i != min_level_cells.end(); ++i) { |
250 |
+ EXPECT_EQ(i->second, 1); |
251 |
+ } |
252 |
+--- a/src/third_party/s2/strings/split.cc |
253 |
++++ b/src/third_party/s2/strings/split.cc |
254 |
+@@ -156,7 +156,7 @@ struct simple_insert_iterator { |
255 |
+ // SplitStringToIterator{Using|AllowEmpty}(). |
256 |
+ template <typename T> |
257 |
+ struct simple_hash_map_iterator { |
258 |
+- typedef hash_map<T, T> hashmap; |
259 |
++ typedef my_hash_map<T, T> hashmap; |
260 |
+ hashmap* t; |
261 |
+ bool even; |
262 |
+ typename hashmap::iterator curr; |
263 |
+@@ -246,8 +246,8 @@ void SplitStringAllowEmpty(const string& full, const char* delim, |
264 |
+ } |
265 |
+ |
266 |
+ void SplitStringToHashsetAllowEmpty(const string& full, const char* delim, |
267 |
+- hash_set<string>* result) { |
268 |
+- simple_insert_iterator<hash_set<string> > it(result); |
269 |
++ my_hash_set<string>* result) { |
270 |
++ simple_insert_iterator<my_hash_set<string> > it(result); |
271 |
+ SplitStringToIteratorAllowEmpty(full, delim, 0, it); |
272 |
+ } |
273 |
+ |
274 |
+@@ -258,7 +258,7 @@ void SplitStringToSetAllowEmpty(const string& full, const char* delim, |
275 |
+ } |
276 |
+ |
277 |
+ void SplitStringToHashmapAllowEmpty(const string& full, const char* delim, |
278 |
+- hash_map<string, string>* result) { |
279 |
++ my_hash_map<string, string>* result) { |
280 |
+ simple_hash_map_iterator<string> it(result); |
281 |
+ SplitStringToIteratorAllowEmpty(full, delim, 0, it); |
282 |
+ } |
283 |
+@@ -352,8 +352,8 @@ void SplitStringUsing(const string& full, |
284 |
+ } |
285 |
+ |
286 |
+ void SplitStringToHashsetUsing(const string& full, const char* delim, |
287 |
+- hash_set<string>* result) { |
288 |
+- simple_insert_iterator<hash_set<string> > it(result); |
289 |
++ my_hash_set<string>* result) { |
290 |
++ simple_insert_iterator<my_hash_set<string> > it(result); |
291 |
+ SplitStringToIteratorUsing(full, delim, it); |
292 |
+ } |
293 |
+ |
294 |
+@@ -364,7 +364,7 @@ void SplitStringToSetUsing(const string& full, const char* delim, |
295 |
+ } |
296 |
+ |
297 |
+ void SplitStringToHashmapUsing(const string& full, const char* delim, |
298 |
+- hash_map<string, string>* result) { |
299 |
++ my_hash_map<string, string>* result) { |
300 |
+ simple_hash_map_iterator<string> it(result); |
301 |
+ SplitStringToIteratorUsing(full, delim, it); |
302 |
+ } |
303 |
+--- a/src/third_party/s2/strings/split.h |
304 |
++++ b/src/third_party/s2/strings/split.h |
305 |
+@@ -41,7 +41,7 @@ using namespace std; |
306 |
+ void SplitStringAllowEmpty(const string& full, const char* delim, |
307 |
+ vector<string>* res); |
308 |
+ void SplitStringToHashsetAllowEmpty(const string& full, const char* delim, |
309 |
+- hash_set<string>* res); |
310 |
++ my_hash_set<string>* res); |
311 |
+ void SplitStringToSetAllowEmpty(const string& full, const char* delim, |
312 |
+ set<string>* res); |
313 |
+ // The even-positioned (0-based) components become the keys for the |
314 |
+@@ -50,7 +50,7 @@ void SplitStringToSetAllowEmpty(const string& full, const char* delim, |
315 |
+ // if the key was already present in the hash table, or will be the |
316 |
+ // empty string if the key is a newly inserted key. |
317 |
+ void SplitStringToHashmapAllowEmpty(const string& full, const char* delim, |
318 |
+- hash_map<string, string>* result); |
319 |
++ my_hash_map<string, string>* result); |
320 |
+ |
321 |
+ // ---------------------------------------------------------------------- |
322 |
+ // SplitStringUsing() |
323 |
+@@ -66,7 +66,7 @@ void SplitStringToHashmapAllowEmpty(const string& full, const char* delim, |
324 |
+ void SplitStringUsing(const string& full, const char* delim, |
325 |
+ vector<string>* res); |
326 |
+ void SplitStringToHashsetUsing(const string& full, const char* delim, |
327 |
+- hash_set<string>* res); |
328 |
++ my_hash_set<string>* res); |
329 |
+ void SplitStringToSetUsing(const string& full, const char* delim, |
330 |
+ set<string>* res); |
331 |
+ // The even-positioned (0-based) components become the keys for the |
332 |
+@@ -75,7 +75,7 @@ void SplitStringToSetUsing(const string& full, const char* delim, |
333 |
+ // if the key was already present in the hash table, or will be the |
334 |
+ // empty string if the key is a newly inserted key. |
335 |
+ void SplitStringToHashmapUsing(const string& full, const char* delim, |
336 |
+- hash_map<string, string>* result); |
337 |
++ my_hash_map<string, string>* result); |
338 |
+ |
339 |
+ // ---------------------------------------------------------------------- |
340 |
+ // SplitOneIntToken() |
341 |
|
342 |
diff --git a/dev-db/mongodb/mongodb-4.4.10-r1.ebuild b/dev-db/mongodb/mongodb-4.4.10-r1.ebuild |
343 |
index b7037132f4ff..1c0661aee4ae 100644 |
344 |
--- a/dev-db/mongodb/mongodb-4.4.10-r1.ebuild |
345 |
+++ b/dev-db/mongodb/mongodb-4.4.10-r1.ebuild |
346 |
@@ -62,6 +62,7 @@ PATCHES=( |
347 |
"${FILESDIR}/${PN}-5.0.2-glibc-2.34.patch" |
348 |
"${FILESDIR}/${PN}-4.4.10-boost-1.79.patch" |
349 |
"${FILESDIR}/${PN}-4.4.10-no-force-lld.patch" |
350 |
+ "${FILESDIR}/${PN}-4.4.10-boost-1.81.patch" |
351 |
) |
352 |
|
353 |
S="${WORKDIR}/${MY_P}" |
354 |
|
355 |
diff --git a/dev-db/mongodb/mongodb-5.0.5-r2.ebuild b/dev-db/mongodb/mongodb-5.0.5-r2.ebuild |
356 |
index d0b26e5aafbf..d49a2e02c20b 100644 |
357 |
--- a/dev-db/mongodb/mongodb-5.0.5-r2.ebuild |
358 |
+++ b/dev-db/mongodb/mongodb-5.0.5-r2.ebuild |
359 |
@@ -69,6 +69,7 @@ PATCHES=( |
360 |
"${FILESDIR}/${PN}-5.0.2-skip-reqs-check.patch" |
361 |
"${FILESDIR}/${PN}-5.0.2-boost-1.79.patch" |
362 |
"${FILESDIR}/${PN}-5.0.5-no-force-lld.patch" |
363 |
+ "${FILESDIR}/${PN}-4.4.10-boost-1.81.patch" |
364 |
) |
365 |
|
366 |
S="${WORKDIR}/${MY_P}" |