Gentoo Archives: gentoo-commits

From: "Fabian Groffen (grobian)" <grobian@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] portage r11744 - in main/branches/prefix: bin pym/_emerge pym/portage pym/portage/dbapi
Date: Wed, 29 Oct 2008 20:02:46
Message-Id: E1KvHFa-0002eW-TF@stork.gentoo.org
1 Author: grobian
2 Date: 2008-10-29 20:02:41 +0000 (Wed, 29 Oct 2008)
3 New Revision: 11744
4
5 Modified:
6 main/branches/prefix/bin/repoman
7 main/branches/prefix/pym/_emerge/__init__.py
8 main/branches/prefix/pym/portage/__init__.py
9 main/branches/prefix/pym/portage/dbapi/vartree.py
10 Log:
11 Merged from trunk -r11736:11743
12
13 | 11737 | Fix graph.get() so that it works as intended, returning the |
14 | zmedico | node corresponding to the given key. |
15
16 | 11738 | Remove manifest1 digest-* autoadd code. Thanks to grobian. |
17 | zmedico | |
18
19 | 11739 | Update the auto-add message to say "Manifest" instead of |
20 | zmedico | "digests". Thanks to grobian. |
21
22 | 11740 | Bug #238957 - When removing unneeded preserved libs inside |
23 | zmedico | dblink.unmerge(), use a digraph to properly track consumer |
24 | | relationships between preserved libs. This fixes cases where |
25 | | preserved libs failed to be removed due to being consumed by |
26 | | other preserved libs. |
27
28 | 11741 | Fix $ROOT handling inside LinkageMap.findConsumers(). |
29 | zmedico | |
30
31 | 11742 | Fix interaction between LinkageMap.rebuild() and the package |
32 | zmedico | replacement process in order to avoid problems with stale or |
33 | | unaccounted NEEDED. This solves a LinkageMap corruption |
34 | | issue which caused findConsumers to return false positive |
35 | | inside dblink.unmerge(). |
36
37 | 11743 | Make config.setcpv() store the ebuild metadata inside |
38 | zmedico | self.configdict["pkg"], and reuse this metadata inside |
39 | | doebuild() in order to avoid redundant portdbapi.aux_get() |
40 | | calls. |
41
42
43 Modified: main/branches/prefix/bin/repoman
44 ===================================================================
45 --- main/branches/prefix/bin/repoman 2008-10-29 17:03:35 UTC (rev 11743)
46 +++ main/branches/prefix/bin/repoman 2008-10-29 20:02:41 UTC (rev 11744)
47 @@ -1665,17 +1665,9 @@
48 # It's a manifest... auto add
49 myautoadd+=[myunadded[x]]
50 del myunadded[x]
51 - elif len(xs[-1])>=7:
52 - if xs[-1][:7]=="digest-":
53 - del xs[-2]
54 - myeb="/".join(xs[:-1]+[xs[-1][7:]])+".ebuild"
55 - if os.path.exists(myeb):
56 - # Ebuild exists for digest... So autoadd it.
57 - myautoadd+=[myunadded[x]]
58 - del myunadded[x]
59 -
60 +
61 if myautoadd:
62 - print ">>> Auto-Adding missing digests..."
63 + print ">>> Auto-Adding missing Manifest(s)..."
64 if options.pretend:
65 if vcs == "cvs":
66 print "(cvs add "+" ".join(myautoadd)+")"
67
68 Modified: main/branches/prefix/pym/_emerge/__init__.py
69 ===================================================================
70 --- main/branches/prefix/pym/_emerge/__init__.py 2008-10-29 17:03:35 UTC (rev 11743)
71 +++ main/branches/prefix/pym/_emerge/__init__.py 2008-10-29 20:02:41 UTC (rev 11744)
72 @@ -10012,6 +10012,7 @@
73 # Since config.setcpv() isn't guaranteed to call config.reset() due to
74 # performance reasons, call it here to make sure all settings from the
75 # previous package get flushed out (such as PORTAGE_LOG_FILE).
76 + temp_settings.reload()
77 temp_settings.reset()
78 return temp_settings
79
80
81 Modified: main/branches/prefix/pym/portage/__init__.py
82 ===================================================================
83 --- main/branches/prefix/pym/portage/__init__.py 2008-10-29 17:03:35 UTC (rev 11743)
84 +++ main/branches/prefix/pym/portage/__init__.py 2008-10-29 20:02:41 UTC (rev 11744)
85 @@ -354,14 +354,14 @@
86 relationship to the parent, the relationship is left as hard."""
87
88 if node not in self.nodes:
89 - self.nodes[node] = ({}, {})
90 + self.nodes[node] = ({}, {}, node)
91 self.order.append(node)
92
93 if not parent:
94 return
95
96 if parent not in self.nodes:
97 - self.nodes[parent] = ({}, {})
98 + self.nodes[parent] = ({}, {}, parent)
99 self.order.append(parent)
100
101 if parent in self.nodes[node][1]:
102 @@ -442,7 +442,10 @@
103 return node in self.nodes
104
105 def get(self, key, default=None):
106 - return self.nodes.get(key, default)
107 + node_data = self.nodes.get(key, self)
108 + if node_data is self:
109 + return default
110 + return node_data[2]
111
112 def all_nodes(self):
113 """Return a list of all nodes in the graph"""
114 @@ -504,7 +507,7 @@
115 clone = digraph()
116 clone.nodes = {}
117 for k, v in self.nodes.iteritems():
118 - clone.nodes[k] = (v[0].copy(), v[1].copy())
119 + clone.nodes[k] = (v[0].copy(), v[1].copy(), v[2])
120 clone.order = self.order[:]
121 return clone
122
123 @@ -1952,19 +1955,33 @@
124
125 if self.mycpv == mycpv:
126 return
127 - ebuild_phase = self.get("EBUILD_PHASE")
128 has_changed = False
129 self.mycpv = mycpv
130 + cat, pf = catsplit(mycpv)
131 cp = dep_getkey(mycpv)
132 cpv_slot = self.mycpv
133 pkginternaluse = ""
134 iuse = ""
135 + env_configdict = self.configdict["env"]
136 + pkg_configdict = self.configdict["pkg"]
137 + previous_iuse = pkg_configdict.get("IUSE")
138 + for k in ("CATEGORY", "PKGUSE", "PF", "PORTAGE_USE"):
139 + env_configdict.pop(k, None)
140 + pkg_configdict["CATEGORY"] = cat
141 + pkg_configdict["PF"] = pf
142 if mydb:
143 if not hasattr(mydb, "aux_get"):
144 - slot = mydb["SLOT"]
145 - iuse = mydb["IUSE"]
146 + pkg_configdict.update(mydb)
147 else:
148 - slot, iuse = mydb.aux_get(self.mycpv, ["SLOT", "IUSE"])
149 + aux_keys = [k for k in auxdbkeys \
150 + if not k.startswith("UNUSED_")]
151 + for k, v in izip(aux_keys, mydb.aux_get(self.mycpv, aux_keys)):
152 + pkg_configdict[k] = v
153 + for k in pkg_configdict:
154 + if k != "USE":
155 + env_configdict.pop(k, None)
156 + slot = pkg_configdict["SLOT"]
157 + iuse = pkg_configdict["IUSE"]
158 if pkg is None:
159 cpv_slot = "%s:%s" % (self.mycpv, slot)
160 else:
161 @@ -2059,22 +2076,13 @@
162 has_changed = True
163 self.configdict["pkg"]["PKGUSE"] = self.puse[:] # For saving to PUSE file
164 self.configdict["pkg"]["USE"] = self.puse[:] # this gets appended to USE
165 - previous_iuse = self.configdict["pkg"].get("IUSE")
166 - self.configdict["pkg"]["IUSE"] = iuse
167
168 - # Always set known good values for these variables, since
169 - # corruption of these can cause problems:
170 - cat, pf = catsplit(self.mycpv)
171 - self.configdict["pkg"]["CATEGORY"] = cat
172 - self.configdict["pkg"]["PF"] = pf
173 -
174 if has_changed:
175 self.reset(keeping_pkg=1,use_cache=use_cache)
176
177 - # If this is not an ebuild phase and reset() has not been called,
178 - # it's safe to return early here if IUSE has not changed.
179 - if not (has_changed or ebuild_phase) and \
180 - previous_iuse == iuse:
181 + # If reset() has not been called, it's safe to return
182 + # early if IUSE has not changed.
183 + if not has_changed and previous_iuse == iuse:
184 return
185
186 # Filter out USE flags that aren't part of IUSE. This has to
187 @@ -2092,7 +2100,7 @@
188 self.configdict["pkg"]["PORTAGE_IUSE"] = regex
189
190 ebuild_force_test = self.get("EBUILD_FORCE_TEST") == "1"
191 - if ebuild_force_test and ebuild_phase and \
192 + if ebuild_force_test and \
193 not hasattr(self, "_ebuild_force_test_msg_shown"):
194 self._ebuild_force_test_msg_shown = True
195 writemsg("Forcing test.\n", noiselevel=-1)
196 @@ -4707,12 +4715,7 @@
197 # so that the caller can override it.
198 tmpdir = mysettings["PORTAGE_TMPDIR"]
199
200 - # This variable is a signal to setcpv where it triggers
201 - # filtering of USE for the ebuild environment.
202 - mysettings["EBUILD_PHASE"] = mydo
203 - mysettings.backup_changes("EBUILD_PHASE")
204 -
205 - if mydo != "depend":
206 + if mydo != "depend" and mycpv != mysettings.mycpv:
207 """For performance reasons, setcpv only triggers reset when it
208 detects a package-specific change in config. For the ebuild
209 environment, a reset call is forced in order to ensure that the
210 @@ -4776,18 +4779,17 @@
211 mysettings["PORTAGE_QUIET"] = "1"
212
213 if mydo != "depend":
214 - eapi, mysettings["INHERITED"], mysettings["SLOT"], mysettings["RESTRICT"] = \
215 - mydbapi.aux_get(mycpv, ["EAPI", "INHERITED", "SLOT", "RESTRICT"])
216 + # Metadata vars such as EAPI and RESTRICT are
217 + # set by the above config.setcpv() call.
218 + eapi = mysettings["EAPI"]
219 if not eapi_is_supported(eapi):
220 # can't do anything with this.
221 raise portage.exception.UnsupportedAPIException(mycpv, eapi)
222 - mysettings.pop("EAPI", None)
223 - mysettings.configdict["pkg"]["EAPI"] = eapi
224 try:
225 mysettings["PORTAGE_RESTRICT"] = " ".join(flatten(
226 portage.dep.use_reduce(portage.dep.paren_reduce(
227 - mysettings.get("RESTRICT","")),
228 - uselist=mysettings.get("USE","").split())))
229 + mysettings["RESTRICT"]),
230 + uselist=mysettings["PORTAGE_USE"].split())))
231 except portage.exception.InvalidDependString:
232 # RESTRICT is validated again inside doebuild, so let this go
233 mysettings["PORTAGE_RESTRICT"] = ""
234 @@ -5648,20 +5650,35 @@
235
236 mycpv = "/".join((mysettings["CATEGORY"], mysettings["PF"]))
237
238 - # Make sure we get the correct tree in case there are overlays.
239 - mytree = os.path.realpath(
240 - os.path.dirname(os.path.dirname(mysettings["O"])))
241 - useflags = mysettings["PORTAGE_USE"].split()
242 - try:
243 - alist = mydbapi.getFetchMap(mycpv, useflags=useflags, mytree=mytree)
244 - aalist = mydbapi.getFetchMap(mycpv, mytree=mytree)
245 - except portage.exception.InvalidDependString, e:
246 - writemsg("!!! %s\n" % str(e), noiselevel=-1)
247 - writemsg("!!! Invalid SRC_URI for '%s'.\n" % mycpv, noiselevel=-1)
248 - del e
249 - return 1
250 - mysettings["A"] = " ".join(alist)
251 - mysettings["AA"] = " ".join(aalist)
252 + emerge_skip_distfiles = returnpid
253 + # Only try and fetch the files if we are going to need them ...
254 + # otherwise, if user has FEATURES=noauto and they run `ebuild clean
255 + # unpack compile install`, we will try and fetch 4 times :/
256 + need_distfiles = not emerge_skip_distfiles and \
257 + (mydo in ("fetch", "unpack") or \
258 + mydo not in ("digest", "manifest") and "noauto" not in features)
259 + alist = mysettings.configdict["pkg"].get("A")
260 + aalist = mysettings.configdict["pkg"].get("AA")
261 + if need_distfiles or alist is None or aalist is None:
262 + # Make sure we get the correct tree in case there are overlays.
263 + mytree = os.path.realpath(
264 + os.path.dirname(os.path.dirname(mysettings["O"])))
265 + useflags = mysettings["PORTAGE_USE"].split()
266 + try:
267 + alist = mydbapi.getFetchMap(mycpv, useflags=useflags,
268 + mytree=mytree)
269 + aalist = mydbapi.getFetchMap(mycpv, mytree=mytree)
270 + except portage.exception.InvalidDependString, e:
271 + writemsg("!!! %s\n" % str(e), noiselevel=-1)
272 + writemsg("!!! Invalid SRC_URI for '%s'.\n" % mycpv,
273 + noiselevel=-1)
274 + del e
275 + return 1
276 + mysettings.configdict["pkg"]["A"] = " ".join(alist)
277 + mysettings.configdict["pkg"]["AA"] = " ".join(aalist)
278 + else:
279 + alist = set(alist.split())
280 + aalist = set(aalist.split())
281 if ("mirror" in features) or fetchall:
282 fetchme = aalist
283 checkme = aalist
284 @@ -5674,12 +5691,7 @@
285 # so do not check them again.
286 checkme = []
287
288 - # Only try and fetch the files if we are going to need them ...
289 - # otherwise, if user has FEATURES=noauto and they run `ebuild clean
290 - # unpack compile install`, we will try and fetch 4 times :/
291 - need_distfiles = (mydo in ("fetch", "unpack") or \
292 - mydo not in ("digest", "manifest") and "noauto" not in features)
293 - emerge_skip_distfiles = returnpid
294 +
295 if not emerge_skip_distfiles and \
296 need_distfiles and not fetch(
297 fetchme, mysettings, listonly=listonly, fetchonly=fetchonly):
298 @@ -5873,8 +5885,7 @@
299 misc_keys = ["LICENSE", "PROPERTIES", "PROVIDE", "RESTRICT", "SRC_URI"]
300 other_keys = ["SLOT"]
301 all_keys = dep_keys + misc_keys + other_keys
302 - metadata = dict(izip(all_keys,
303 - mydbapi.aux_get(mysettings.mycpv, all_keys)))
304 + metadata = mysettings.configdict["pkg"]
305
306 class FakeTree(object):
307 def __init__(self, mydb):
308
309 Modified: main/branches/prefix/pym/portage/dbapi/vartree.py
310 ===================================================================
311 --- main/branches/prefix/pym/portage/dbapi/vartree.py 2008-10-29 17:03:35 UTC (rev 11743)
312 +++ main/branches/prefix/pym/portage/dbapi/vartree.py 2008-10-29 20:02:41 UTC (rev 11744)
313 @@ -23,7 +23,7 @@
314 grabfile, grabdict, normalize_path, new_protect_filename, getlibpaths
315 from portage.versions import pkgsplit, catpkgsplit, catsplit, best, pkgcmp
316
317 -from portage import listdir, dep_expand, flatten, key_expand, \
318 +from portage import listdir, dep_expand, digraph, flatten, key_expand, \
319 doebuild_environment, doebuild, env_update, prepare_build_dirs, \
320 abssymlink, movefile, _movefile, bsd_chflags, cpv_getkey
321
322 @@ -208,13 +208,25 @@
323 """
324 return isinstance(self._key, tuple)
325
326 - def rebuild(self, include_file=None):
327 + class _LibGraphNode(_ObjectKey):
328 + __slots__ = ("alt_paths",)
329 +
330 + def __init__(self, obj, root):
331 + LinkageMap._ObjectKey.__init__(self, obj, root)
332 + self.alt_paths = set()
333 +
334 + def __str__(self):
335 + return str(sorted(self.alt_paths))
336 +
337 + def rebuild(self, exclude_pkgs=None, include_file=None):
338 root = self._root
339 libs = {}
340 obj_key_cache = {}
341 obj_properties = {}
342 lines = []
343 for cpv in self._dbapi.cpv_all():
344 + if exclude_pkgs is not None and cpv in exclude_pkgs:
345 + continue
346 lines += self._dbapi.aux_get(cpv, ["NEEDED.ELF.2"])[0].split('\n')
347 # Cache NEEDED.* files avoid doing excessive IO for every rebuild.
348 self._dbapi.flush_cache()
349 @@ -565,7 +577,8 @@
350 raise KeyError("%s (%s) not in object list" % (obj_key, obj))
351
352 # Determine the directory(ies) from the set of objects.
353 - objs_dirs = set([os.path.dirname(x) for x in objs])
354 + objs_dirs = set(os.path.join(self._root,
355 + os.path.dirname(x).lstrip(os.sep)) for x in objs)
356
357 # If there is another version of this lib with the
358 # same soname and the master link points to that
359 @@ -2349,7 +2362,8 @@
360 writemsg("!!! FAILED prerm: %s\n" % retval, noiselevel=-1)
361
362 self._unmerge_pkgfiles(pkgfiles, others_in_slot)
363 -
364 + self._clear_contents_cache()
365 +
366 # Remove the registration of preserved libs for this pkg instance
367 plib_registry = self.vartree.dbapi.plib_registry
368 plib_registry.unregister(self.mycpv, self.settings["SLOT"],
369 @@ -2369,64 +2383,67 @@
370 if retval != os.EX_OK:
371 writemsg("!!! FAILED postrm: %s\n" % retval, noiselevel=-1)
372
373 - # regenerate reverse NEEDED map
374 - self.vartree.dbapi.linkmap.rebuild()
375 + # Skip this if another package in the same slot has just been
376 + # merged on top of this package, since the other package has
377 + # already called LinkageMap.rebuild() and passed it's NEEDED file
378 + # in as an argument.
379 + if not others_in_slot:
380 + self.vartree.dbapi.linkmap.rebuild(exclude_pkgs=(self.mycpv,))
381
382 # remove preserved libraries that don't have any consumers left
383 - # FIXME: this code is quite ugly and can likely be optimized in several ways
384 + # Since preserved libraries can be consumers of other preserved
385 + # libraries, use a graph to track consumer relationships.
386 plib_dict = plib_registry.getPreservedLibs()
387 - for cpv in plib_dict:
388 - plib_dict[cpv].sort()
389 - # for the loop below to work correctly, we need all
390 - # symlinks to come before the actual files, such that
391 - # the recorded symlinks (sonames) will be resolved into
392 - # their real target before the object is found not to be
393 - # in the reverse NEEDED map
394 - def symlink_compare(x, y):
395 - x = os.path.join(self.myroot, x.lstrip(os.path.sep))
396 - y = os.path.join(self.myroot, y.lstrip(os.path.sep))
397 - if os.path.islink(x):
398 - if os.path.islink(y):
399 - return 0
400 - else:
401 - return -1
402 - elif os.path.islink(y):
403 - return 1
404 + lib_graph = digraph()
405 + preserved_nodes = set()
406 + root = self.myroot
407 + for plibs in plib_dict.itervalues():
408 + for f in plibs:
409 + preserved_node = LinkageMap._LibGraphNode(f, root)
410 + if not preserved_node.file_exists():
411 + continue
412 + existing_node = lib_graph.get(preserved_node)
413 + if existing_node is not None:
414 + preserved_node = existing_node
415 else:
416 - return 0
417 + lib_graph.add(preserved_node, None)
418 + preserved_node.alt_paths.add(f)
419 + preserved_nodes.add(preserved_node)
420 + for c in self.vartree.dbapi.linkmap.findConsumers(f):
421 + consumer_node = LinkageMap._LibGraphNode(c, root)
422 + if not consumer_node.file_exists():
423 + continue
424 + # Note that consumers may also be providers.
425 + existing_node = lib_graph.get(consumer_node)
426 + if existing_node is not None:
427 + consumer_node = existing_node
428 + consumer_node.alt_paths.add(c)
429 + lib_graph.add(preserved_node, consumer_node)
430
431 - plib_dict[cpv].sort(symlink_compare)
432 - for f in plib_dict[cpv]:
433 - f_abs = os.path.join(self.myroot, f.lstrip(os.path.sep))
434 - if not os.path.exists(f_abs):
435 - continue
436 - unlink_list = []
437 - consumers = self.vartree.dbapi.linkmap.findConsumers(f)
438 - if not consumers:
439 - unlink_list.append(f_abs)
440 + while not lib_graph.empty():
441 + root_nodes = preserved_nodes.intersection(lib_graph.root_nodes())
442 + if not root_nodes:
443 + break
444 + lib_graph.difference_update(root_nodes)
445 + unlink_list = set()
446 + for node in root_nodes:
447 + unlink_list.update(node.alt_paths)
448 + unlink_list = sorted(unlink_list)
449 + for obj in unlink_list:
450 + obj = os.path.join(root, obj.lstrip(os.sep))
451 + if os.path.islink(obj):
452 + obj_type = "sym"
453 else:
454 - keep=False
455 - for c in consumers:
456 - c = os.path.join(self.myroot,
457 - c.lstrip(os.path.sep))
458 - if c not in self.getcontents():
459 - keep=True
460 - break
461 - if not keep:
462 - unlink_list.append(f_abs)
463 - for obj in unlink_list:
464 - try:
465 - if os.path.islink(obj):
466 - obj_type = "sym"
467 - else:
468 - obj_type = "obj"
469 - os.unlink(obj)
470 - showMessage("<<< !needed %s %s\n" % (obj_type, obj))
471 - except OSError, e:
472 - if e.errno == errno.ENOENT:
473 - pass
474 - else:
475 - raise e
476 + obj_type = "obj"
477 + try:
478 + os.unlink(obj)
479 + except OSError, e:
480 + if e.errno != errno.ENOENT:
481 + raise
482 + del e
483 + else:
484 + showMessage("<<< !needed %s %s\n" % (obj_type, obj))
485 +
486 plib_registry.pruneNonExisting()
487
488 finally:
489 @@ -3556,6 +3573,10 @@
490 gid=portage_gid, mode=02750, mask=02)
491 writedict(cfgfiledict, conf_mem_file)
492
493 + exclude_pkgs = set(dblnk.mycpv for dblnk in others_in_slot)
494 + self.vartree.dbapi.linkmap.rebuild(exclude_pkgs=exclude_pkgs,
495 + include_file=os.path.join(inforoot, "NEEDED.ELF.2"))
496 +
497 # These caches are populated during collision-protect and the data
498 # they contain is now invalid. It's very important to invalidate
499 # the contents_inodes cache so that FEATURES=unmerge-orphans