Gentoo Archives: gentoo-commits

From: "Fabian Groffen (grobian)" <grobian@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] portage r10088 - in main/branches/prefix: bin pym/_emerge pym/portage
Date: Fri, 02 May 2008 14:39:14
Message-Id: E1JrwPm-0003dE-1Y@stork.gentoo.org
1 Author: grobian
2 Date: 2008-05-02 14:39:08 +0000 (Fri, 02 May 2008)
3 New Revision: 10088
4
5 Modified:
6 main/branches/prefix/bin/emerge-webrsync
7 main/branches/prefix/pym/_emerge/__init__.py
8 main/branches/prefix/pym/portage/__init__.py
9 Log:
10 Merged from trunk 10035:10069
11
12 | 10037 | Make unmerge() group packages by cat/pn when the order |
13 | zmedico | doesn't matter, so the display appears like earlier versions |
14 | | of portage. |
15
16 | 10039 | Fix "deep" logic in depgraph._complete_graph() so that it |
17 | zmedico | always properly recognizes whether or not "deep" was |
18 | | previously enabled. |
19
20 | 10041 | * Fix dep_check() so that it doesn't expand virtual blockers |
21 | zmedico | since the un-expanded virtual atom is more useful for |
22 | | maintaining a cache of blocker atoms. * Expand virtual |
23 | | blockers in depgraph.validate_blockers(), since it's not |
24 | | done by dep_check() anymore. * If blocker data from the |
25 | | graph is available, use it to validate the blocker cache and |
26 | | update the cache if it seems invalid. * Make |
27 | | BlockerCache._load() more tolerant to installs/uninstalls so |
28 | | so that cache isn't rebuilt every time. |
29
30 | 10042 | Fix logic to re-use blockers from the graph when populating |
31 | zmedico | the blocker cache. |
32
33 | 10043 | Fix broken comparison, compare with None instead of nonzero. |
34 | zmedico | |
35
36 | 10045 | Handle a KeyError when validating the blocker cache. |
37 | zmedico | |
38
39 | 10047 | Bug #219837 - Adjust date calculations to improve accuracy. |
40 | zmedico | |
41
42 | 10050 | * Add a Package.operation attribute and use it to |
43 | zmedico | distinguish "uninstall" operations. |
44
45 | 10052 | Don't add blockers to the graph more times than necessary. |
46 | zmedico | |
47
48 | 10054 | Sort blockers when caching them. |
49 | zmedico | |
50
51 | 10056 | Detect an old version of tarsync and use bzip2 compression |
52 | zmedico | in that case. |
53
54 | 10062 | When adding blockers for installed packages to the graph, |
55 | zmedico | use cached Package instances instead of constructing new |
56 | | ones. |
57
58 | 10065 | Fix variable rename breakage: pkg -> pkg.cpv |
59 | zmedico | |
60
61 | 10067 | Bug #219837 - Adjust date calculations some more and show an |
62 | zmedico | informative ewarn message when bailing out due to a newer |
63 | | snapshot being unavailable. |
64
65 | 10069 | check if PORTAGE_TMPDIR is mounted readonly/noexec (bug |
66 | genone | #219957) |
67
68
69 Modified: main/branches/prefix/bin/emerge-webrsync
70 ===================================================================
71 --- main/branches/prefix/bin/emerge-webrsync 2008-05-02 14:36:59 UTC (rev 10087)
72 +++ main/branches/prefix/bin/emerge-webrsync 2008-05-02 14:39:08 UTC (rev 10088)
73 @@ -173,8 +173,7 @@
74
75 vecho "Syncing local tree ..."
76
77 - # tarsync-0.2.1 doesn't seem to support lzma compression.
78 - if [ "${file##*.}" != "lzma" ] && type -P tarsync > /dev/null; then
79 + if type -P tarsync > /dev/null ; then
80 if ! tarsync $(vvecho -v) -s 1 -o portage -g portage -e /distfiles -e /packages -e /local "${file}" "${PORTDIR}"; then
81 eecho "tarsync failed; tarball is corrupt? (${file})"
82 return 1
83 @@ -221,7 +220,14 @@
84 local mirror
85
86 local compressions=""
87 - type -P lzcat > /dev/null && compressions="${compressions} lzma"
88 + # lzma is not supported in <=app-arch/tarsync-0.2.1, so use
89 + # bz2 format if we have an old version of tarsync.
90 + if type -P tarsync > /dev/null && \
91 + portageq has_version / '<=app-arch/tarsync-0.2.1' ; then
92 + true
93 + else
94 + type -P lzcat > /dev/null && compressions="${compressions} lzma"
95 + fi
96 type -P bzcat > /dev/null && compressions="${compressions} bz2"
97 type -P zcat > /dev/null && compressions="${compressions} gz"
98 if [[ -z ${compressions} ]] ; then
99 @@ -308,33 +314,75 @@
100 }
101
102 do_latest_snapshot() {
103 - local attempts=-1
104 + local attempts=0
105 local r=1
106
107 vecho "Fetching most recent snapshot ..."
108
109 - while (( ${attempts} < 40 )) ; do
110 - local day
111 - local month
112 - local year
113 - local seconds
114 + # The snapshot for a given day is generated at 01:45 UTC on the following
115 + # day, so the current day's snapshot (going by UTC time) hasn't been
116 + # generated yet. Therefore, always start by looking for the previous day's
117 + # snapshot (for attempts=1, subtract 1 day from the current UTC time).
118
119 - attempts=$(( ${attempts} + 1 ))
120 + # Timestamps that differ by less than 2 hours
121 + # are considered to be approximately equal.
122 + local min_time_diff=$(( 2 * 60 * 60 ))
123
124 - utc_attempt=$(expr $(get_utc_date_in_seconds) - 86400 \* ${attempts})
125 + local existing_timestamp=$(get_portage_timestamp)
126 + local timestamp_difference
127 + local timestamp_problem
128 + local approx_snapshot_time
129 + local start_time=$(get_utc_date_in_seconds)
130 + local start_hour=$(get_date_part ${start_time} "%H")
131
132 - day=$(get_date_part ${utc_attempt} "%d")
133 - month=$(get_date_part ${utc_attempt} "%m")
134 - year=$(get_date_part ${utc_attempt} "%Y")
135 - utc_midnight=$(get_date_part $(expr ${utc_attempt} - ${utc_attempt} % 86400) "%s")
136 + # Daily snapshots are created at 1:45 AM and are not
137 + # available until after 2 AM. Don't waste time trying
138 + # to fetch a snapshot before it's been created.
139 + if [ ${start_hour} -lt 2 ] ; then
140 + (( start_time -= 86400 ))
141 + fi
142 + local snapshot_date=$(get_date_part ${start_time} "%Y%m%d")
143 + local snapshot_date_seconds=$(get_utc_second_from_string ${snapshot_date})
144
145 - if [ ${utc_midnight} -lt $(($(get_portage_timestamp)-86400)) ]; then
146 - wecho "portage content is newer than available snapshots (use --revert option to overide)"
147 + while (( ${attempts} < 40 )) ; do
148 + (( attempts++ ))
149 + (( snapshot_date_seconds -= 86400 ))
150 + # snapshots are created at 1:45 AM
151 + (( approx_snapshot_time = snapshot_date_seconds + 86400 + 6300 ))
152 + (( timestamp_difference = existing_timestamp - approx_snapshot_time ))
153 + [ ${timestamp_difference} -lt 0 ] && (( timestamp_difference = -1 * timestamp_difference ))
154 + snapshot_date=$(get_date_part ${snapshot_date_seconds} "%Y%m%d")
155 +
156 + timestamp_problem=""
157 + if [ ${timestamp_difference} -eq 0 ]; then
158 + timestamp_problem="is identical to"
159 + elif [ ${timestamp_difference} -lt ${min_time_diff} ]; then
160 + timestamp_problem="is possibly identical to"
161 + elif [ ${approx_snapshot_time} -lt ${existing_timestamp} ] ; then
162 + timestamp_problem="is newer than"
163 + fi
164 +
165 + if [ -n "${timestamp_problem}" ]; then
166 + ewarn "Latest snapshot date: ${snapshot_date}"
167 + ewarn
168 + ewarn "Approximate snapshot timestamp: ${approx_snapshot_time}"
169 + ewarn " Current local timestamp: ${existing_timestamp}"
170 + ewarn
171 + echo -e "The current local timestamp" \
172 + "${timestamp_problem} the" \
173 + "timestamp of the latest" \
174 + "snapshot. In order to force sync," \
175 + "use the --revert option or remove" \
176 + "the timestamp file located at" \
177 + "'${PORTDIR}/metadata/timestamp.x'." | fmt -w 70 | \
178 + while read line ; do
179 + ewarn "${line}"
180 + done
181 r=0
182 break
183 fi
184
185 - if do_snapshot 0 "${year}${month}${day}"; then
186 + if do_snapshot 0 "${snapshot_date}"; then
187 r=0
188 break;
189 fi
190
191 Modified: main/branches/prefix/pym/_emerge/__init__.py
192 ===================================================================
193 --- main/branches/prefix/pym/_emerge/__init__.py 2008-05-02 14:36:59 UTC (rev 10087)
194 +++ main/branches/prefix/pym/_emerge/__init__.py 2008-05-02 14:39:08 UTC (rev 10088)
195 @@ -1274,8 +1274,12 @@
196 return str(self._get_hash_key())
197
198 class Blocker(Task):
199 - __slots__ = ("root", "atom", "satisfied")
200 + __slots__ = ("root", "atom", "cp", "satisfied")
201
202 + def __init__(self, **kwargs):
203 + Task.__init__(self, **kwargs)
204 + self.cp = portage.dep_getkey(self.atom)
205 +
206 def _get_hash_key(self):
207 hash_key = getattr(self, "_hash_key", None)
208 if hash_key is None:
209 @@ -1285,7 +1289,8 @@
210
211 class Package(Task):
212 __slots__ = ("built", "cpv", "depth",
213 - "installed", "metadata", "root", "onlydeps", "type_name",
214 + "installed", "metadata", "onlydeps", "operation",
215 + "root", "type_name",
216 "cp", "cpv_slot", "pv_split", "slot_atom")
217 def __init__(self, **kwargs):
218 Task.__init__(self, **kwargs)
219 @@ -1297,11 +1302,12 @@
220 def _get_hash_key(self):
221 hash_key = getattr(self, "_hash_key", None)
222 if hash_key is None:
223 - operation = "merge"
224 - if self.onlydeps or self.installed:
225 - operation = "nomerge"
226 + if self.operation is None:
227 + self.operation = "merge"
228 + if self.onlydeps or self.installed:
229 + self.operation = "nomerge"
230 self._hash_key = \
231 - (self.type_name, self.root, self.cpv, operation)
232 + (self.type_name, self.root, self.cpv, self.operation)
233 return self._hash_key
234
235 def __lt__(self, other):
236 @@ -1332,15 +1338,6 @@
237 return True
238 return False
239
240 -class Uninstall(Package):
241 - __slots__ = ()
242 - def _get_hash_key(self):
243 - hash_key = getattr(self, "_hash_key", None)
244 - if hash_key is None:
245 - self._hash_key = \
246 - (self.type_name, self.root, self.cpv, "uninstall")
247 - return self._hash_key
248 -
249 class DependencyArg(object):
250 def __init__(self, arg=None, root_config=None):
251 self.arg = arg
252 @@ -1415,14 +1412,27 @@
253 cache_valid = self._cache_data and \
254 isinstance(self._cache_data, dict) and \
255 self._cache_data.get("version") == self._cache_version and \
256 - self._cache_data.get("virtuals") == self._virtuals and \
257 - set(self._cache_data.get("blockers", [])) == self._installed_pkgs
258 + isinstance(self._cache_data.get("blockers"), dict)
259 if cache_valid:
260 - for pkg in self._installed_pkgs:
261 - if long(self._vardb.aux_get(pkg, ["COUNTER"])[0]) != \
262 - self[pkg].counter:
263 - cache_valid = False
264 - break
265 + invalid_cache = set()
266 + for cpv, value \
267 + in self._cache_data["blockers"].iteritems():
268 + if not (isinstance(value, tuple) and len(value) == 2):
269 + invalid_cache.add(cpv)
270 + continue
271 + counter, atoms = value
272 + try:
273 + if counter != long(self._vardb.aux_get(cpv, ["COUNTER"])[0]):
274 + invalid_cache.add(cpv)
275 + continue
276 + except KeyError:
277 + # The package is no longer installed.
278 + invalid_cache.add(cpv)
279 + continue
280 + for cpv in invalid_cache:
281 + del self._cache_data["blockers"][cpv]
282 + if not self._cache_data["blockers"]:
283 + cache_valid = False
284 if not cache_valid:
285 self._cache_data = {"version":self._cache_version}
286 self._cache_data["blockers"] = {}
287 @@ -3063,12 +3073,14 @@
288 # accounted for.
289 self._select_atoms = self._select_atoms_from_graph
290 self._select_package = self._select_pkg_from_graph
291 - self.myparams.add("deep")
292 + already_deep = "deep" in self.myparams
293 + if not already_deep:
294 + self.myparams.add("deep")
295
296 for root in self.roots:
297 required_set_names = self._required_set_names.copy()
298 if root == self.target_root and \
299 - ("deep" in self.myparams or "empty" in self.myparams):
300 + (already_deep or "empty" in self.myparams):
301 required_set_names.difference_update(self._sets)
302 if not required_set_names and not self._ignored_deps:
303 continue
304 @@ -3162,25 +3174,50 @@
305 final_db = self.mydbapi[myroot]
306 cpv_all_installed = self.trees[myroot]["vartree"].dbapi.cpv_all()
307 blocker_cache = BlockerCache(myroot, vardb)
308 - for pkg in cpv_all_installed:
309 + for cpv in cpv_all_installed:
310 blocker_atoms = None
311 - metadata = dict(izip(self._mydbapi_keys,
312 - vardb.aux_get(pkg, self._mydbapi_keys)))
313 - node = Package(cpv=pkg, built=True,
314 - installed=True, metadata=metadata,
315 - type_name="installed", root=myroot)
316 - if self.digraph.contains(node):
317 - continue
318 + pkg = self._pkg_cache[
319 + ("installed", myroot, cpv, "nomerge")]
320 + blockers = None
321 + if self.digraph.contains(pkg):
322 + try:
323 + blockers = self._blocker_parents.child_nodes(pkg)
324 + except KeyError:
325 + blockers = []
326 + if blockers is not None:
327 + blockers = set("!" + blocker.atom \
328 + for blocker in blockers)
329 +
330 # If this node has any blockers, create a "nomerge"
331 # node for it so that they can be enforced.
332 self.spinner.update()
333 - blocker_data = blocker_cache.get(pkg)
334 + blocker_data = blocker_cache.get(cpv)
335 +
336 + # If blocker data from the graph is available, use
337 + # it to validate the cache and update the cache if
338 + # it seems invalid.
339 + if blocker_data is not None and \
340 + blockers is not None:
341 + if not blockers.symmetric_difference(
342 + blocker_data.atoms):
343 + continue
344 + blocker_data = None
345 +
346 + if blocker_data is None and \
347 + blockers is not None:
348 + # Re-use the blockers from the graph.
349 + blocker_atoms = sorted(blockers)
350 + counter = long(node.metadata["COUNTER"])
351 + blocker_data = \
352 + blocker_cache.BlockerData(counter, blocker_atoms)
353 + blocker_cache[pkg.cpv] = blocker_data
354 + continue
355 +
356 if blocker_data:
357 blocker_atoms = blocker_data.atoms
358 else:
359 - dep_vals = vardb.aux_get(pkg, dep_keys)
360 - myuse = vardb.aux_get(pkg, ["USE"])[0].split()
361 - depstr = " ".join(dep_vals)
362 + myuse = pkg.metadata["USE"].split()
363 + depstr = " ".join(pkg.metadata[k] for k in dep_keys)
364 # It is crucial to pass in final_db here in order to
365 # optimize dep_check calls by eliminating atoms via
366 # dep_wordreduce and dep_eval calls.
367 @@ -3198,41 +3235,64 @@
368 # matches (this can happen if an atom lacks a
369 # category).
370 show_invalid_depstring_notice(
371 - node, depstr, str(e))
372 + pkg, depstr, str(e))
373 del e
374 raise
375 finally:
376 portage.dep._dep_check_strict = True
377 if not success:
378 - slot_atom = "%s:%s" % (portage.dep_getkey(pkg),
379 - vardb.aux_get(pkg, ["SLOT"])[0])
380 - if slot_atom in modified_slots[myroot]:
381 + if pkg.slot_atom in modified_slots[myroot]:
382 # This package is being replaced anyway, so
383 # ignore invalid dependencies so as not to
384 # annoy the user too much (otherwise they'd be
385 # forced to manually unmerge it first).
386 continue
387 - show_invalid_depstring_notice(node, depstr, atoms)
388 + show_invalid_depstring_notice(pkg, depstr, atoms)
389 return False
390 blocker_atoms = [myatom for myatom in atoms \
391 if myatom.startswith("!")]
392 - counter = long(vardb.aux_get(pkg, ["COUNTER"])[0])
393 - blocker_cache[pkg] = \
394 + blocker_atoms.sort()
395 + counter = long(pkg.metadata["COUNTER"])
396 + blocker_cache[cpv] = \
397 blocker_cache.BlockerData(counter, blocker_atoms)
398 if blocker_atoms:
399 for myatom in blocker_atoms:
400 blocker = Blocker(atom=myatom[1:], root=myroot)
401 - self._blocker_parents.add(blocker, node)
402 + self._blocker_parents.add(blocker, pkg)
403 blocker_cache.flush()
404 del blocker_cache
405
406 for blocker in self._blocker_parents.leaf_nodes():
407 self.spinner.update()
408 + root_config = self.roots[blocker.root]
409 + virtuals = root_config.settings.getvirtuals()
410 mytype, myroot, mydep = blocker
411 initial_db = self.trees[myroot]["vartree"].dbapi
412 final_db = self.mydbapi[myroot]
413 - blocked_initial = initial_db.match(mydep)
414 - blocked_final = final_db.match(mydep)
415 +
416 + provider_virtual = False
417 + if blocker.cp in virtuals and \
418 + not self._have_new_virt(blocker.root, blocker.cp):
419 + provider_virtual = True
420 +
421 + if provider_virtual:
422 + atoms = []
423 + for provider_entry in virtuals[blocker.cp]:
424 + provider_cp = \
425 + portage.dep_getkey(provider_entry)
426 + atoms.append(blocker.atom.replace(
427 + blocker.cp, provider_cp))
428 + else:
429 + atoms = [blocker.atom]
430 +
431 + blocked_initial = []
432 + for atom in atoms:
433 + blocked_initial.extend(initial_db.match(atom))
434 +
435 + blocked_final = []
436 + for atom in atoms:
437 + blocked_final.extend(final_db.match(atom))
438 +
439 if not blocked_initial and not blocked_final:
440 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
441 self._blocker_parents.remove(blocker)
442 @@ -3322,9 +3382,10 @@
443
444 if not unresolved_blocks and depends_on_order:
445 for inst_pkg, inst_task in depends_on_order:
446 - uninst_task = Uninstall(built=inst_pkg.built,
447 + uninst_task = Package(built=inst_pkg.built,
448 cpv=inst_pkg.cpv, installed=inst_pkg.installed,
449 - metadata=inst_pkg.metadata, root=inst_pkg.root,
450 + metadata=inst_pkg.metadata,
451 + operation="uninstall", root=inst_pkg.root,
452 type_name=inst_pkg.type_name)
453 self._pkg_cache[uninst_task] = uninst_task
454 # Enforce correct merge order with a hard dep.
455 @@ -3437,7 +3498,8 @@
456 since those should be executed as late as possible.
457 """
458 return [node for node in mygraph.leaf_nodes(**kwargs) \
459 - if not isinstance(node, Uninstall)]
460 + if isinstance(node, Package) and \
461 + node.operation != "uninstall"]
462 if True:
463 for node in mygraph.order:
464 if node.root == "/" and \
465 @@ -3764,7 +3826,8 @@
466 # and uninstallation tasks.
467 solved_blockers = set()
468 uninst_task = None
469 - if isinstance(node, Uninstall):
470 + if isinstance(node, Package) and \
471 + "uninstall" == node.operation:
472 have_uninstall_task = True
473 uninst_task = node
474 else:
475 @@ -3794,7 +3857,8 @@
476 if node[-1] != "nomerge":
477 retlist.append(node)
478
479 - if isinstance(node, Uninstall):
480 + if isinstance(node, Package) and \
481 + "uninstall" == node.operation:
482 # Include satisfied blockers in the merge list so
483 # that the user can see why the package had to be
484 # uninstalled in advance rather than through
485 @@ -4753,13 +4817,10 @@
486 metadata["USE"] = pkgsettings["PORTAGE_USE"]
487 installed = action == "uninstall"
488 built = pkg_type != "ebuild"
489 - if installed:
490 - pkg_constructor = Uninstall
491 - else:
492 - pkg_constructor = Package
493 - pkg = pkg_constructor(built=built, cpv=pkg_key,
494 + pkg = Package(built=built, cpv=pkg_key,
495 installed=installed, metadata=metadata,
496 - root=myroot, type_name=pkg_type)
497 + operation=action, root=myroot,
498 + type_name=pkg_type)
499 self._pkg_cache[pkg] = pkg
500 fakedb[myroot].cpv_inject(pkg)
501 self.spinner.update()
502 @@ -5161,7 +5222,7 @@
503 if "--resume" not in self.myopts:
504 mymergelist = mylist
505 mtimedb["resume"]["mergelist"] = [list(x) for x in mymergelist \
506 - if isinstance(x, (Package, Uninstall))]
507 + if isinstance(x, Package)]
508 mtimedb.commit()
509
510 myfeat = self.settings.features[:]
511 @@ -5218,11 +5279,9 @@
512 mymergelist = [x for x in mymergelist if x[-1] == "merge"]
513 mergecount=0
514 for x in task_list:
515 - pkg_type = x[0]
516 - if pkg_type == "blocks":
517 + if x[0] == "blocks":
518 continue
519 - myroot=x[1]
520 - pkg_key = x[2]
521 + pkg_type, myroot, pkg_key, operation = x
522 pkgindex=2
523 built = pkg_type != "ebuild"
524 installed = pkg_type == "installed"
525 @@ -5258,14 +5317,12 @@
526 # isn't installed anymore. Since it's already
527 # been uninstalled, move on to the next task.
528 continue
529 - if installed:
530 - pkg_constructor = Uninstall
531 - else:
532 - pkg_constructor = Package
533 + if not installed:
534 mergecount += 1
535 - pkg = pkg_constructor(type_name=pkg_type, root=myroot,
536 - cpv=pkg_key, built=built, installed=installed,
537 - metadata=metadata)
538 + pkg = Package(cpv=pkg_key, built=built,
539 + installed=installed, metadata=metadata,
540 + operation=operation, root=myroot,
541 + type_name=pkg_type)
542 if pkg.installed:
543 if not (buildpkgonly or fetchonly or pretend):
544 self._uninstall_queue.append(pkg)
545 @@ -5590,7 +5647,7 @@
546 return os.EX_OK
547
548 def unmerge(root_config, myopts, unmerge_action,
549 - unmerge_files, ldpath_mtimes, autoclean=0, clean_world=1):
550 + unmerge_files, ldpath_mtimes, autoclean=0, clean_world=1, ordered=0):
551 settings = root_config.settings
552 sets = root_config.sets
553 vartree = root_config.trees["vartree"]
554 @@ -5864,7 +5921,25 @@
555 pkgmap[cp]["protected"].add(cpv)
556
557 del installed_sets
558 -
559 +
560 + # Unmerge order only matters in some cases
561 + if not ordered:
562 + unordered = {}
563 + for d in pkgmap:
564 + selected = d["selected"]
565 + if not selected:
566 + continue
567 + cp = portage.cpv_getkey(iter(selected).next())
568 + cp_dict = unordered.get(cp)
569 + if cp_dict is None:
570 + cp_dict = {}
571 + unordered[cp] = cp_dict
572 + for k in d:
573 + cp_dict[k] = set()
574 + for k, v in d.iteritems():
575 + cp_dict[k].update(v)
576 + pkgmap = [unordered[cp] for cp in sorted(unordered)]
577 +
578 for x in xrange(len(pkgmap)):
579 selected = pkgmap[x]["selected"]
580 if not selected:
581 @@ -7519,11 +7594,12 @@
582 if cpv in clean_set:
583 graph.add(cpv, node, priority=priority)
584
585 + ordered = True
586 if len(graph.order) == len(graph.root_nodes()):
587 # If there are no dependencies between packages
588 - # then just unmerge them alphabetically.
589 - cleanlist = graph.order[:]
590 - cleanlist.sort()
591 + # let unmerge() group them by cat/pn.
592 + ordered = False
593 + cleanlist = graph.all_nodes()
594 else:
595 # Order nodes from lowest to highest overall reference count for
596 # optimal root node selection.
597 @@ -7553,8 +7629,8 @@
598 graph.remove(node)
599 cleanlist.append(node)
600
601 - unmerge(root_config, myopts,
602 - "unmerge", cleanlist, ldpath_mtimes)
603 + unmerge(root_config, myopts, "unmerge", cleanlist,
604 + ldpath_mtimes, ordered=ordered)
605
606 if action == "prune":
607 return
608 @@ -8491,8 +8567,11 @@
609 (myaction == "prune" and "--nodeps" in myopts):
610 validate_ebuild_environment(trees)
611 root_config = trees[settings["ROOT"]]["root_config"]
612 + # When given a list of atoms, unmerge
613 + # them in the order given.
614 + ordered = myaction == "unmerge"
615 if 1 == unmerge(root_config, myopts, myaction, myfiles,
616 - mtimedb["ldpath"]):
617 + mtimedb["ldpath"], ordered=ordered):
618 if not (buildpkgonly or fetchonly or pretend):
619 post_emerge(trees, mtimedb, os.EX_OK)
620
621
622 Modified: main/branches/prefix/pym/portage/__init__.py
623 ===================================================================
624 --- main/branches/prefix/pym/portage/__init__.py 2008-05-02 14:36:59 UTC (rev 10087)
625 +++ main/branches/prefix/pym/portage/__init__.py 2008-05-02 14:39:08 UTC (rev 10088)
626 @@ -4774,7 +4774,35 @@
627 writemsg("does not exist. Please create this directory or " + \
628 "correct your PORTAGE_TMPDIR setting.\n", noiselevel=-1)
629 return 1
630 +
631 + # as some people use a separate PORTAGE_TMPDIR mount
632 + # we prefer that as the checks below would otherwise be pointless
633 + # for those people.
634 + if os.path.exists(os.path.join(mysettings["PORTAGE_TMPDIR"], "portage")):
635 + checkdir = os.path.join(mysettings["PORTAGE_TMPDIR"], "portage")
636 + else:
637 + checkdir = mysettings["PORTAGE_TMPDIR"]
638
639 + if not os.access(checkdir, os.W_OK):
640 + writemsg("%s is not writable.\n" % checkdir + \
641 + "Likely cause is that you've mounted it as readonly.\n" \
642 + , noiselevel=-1)
643 + return 1
644 + else:
645 + from tempfile import NamedTemporaryFile
646 + fd = NamedTemporaryFile(prefix="exectest-", dir=checkdir)
647 + os.chmod(fd.name, 0755)
648 + if not os.access(fd.name, os.X_OK):
649 + writemsg("Can not execute files in %s\n" % checkdir + \
650 + "Likely cause is that you've mounted it with one of the\n" + \
651 + "following mount options: 'noexec', 'user', 'users'\n\n" + \
652 + "Please make sure that portage can execute files in this direxctory.\n" \
653 + , noiselevel=-1)
654 + fd.close()
655 + return 1
656 + fd.close()
657 + del checkdir
658 +
659 if mydo == "unmerge":
660 return unmerge(mysettings["CATEGORY"],
661 mysettings["PF"], myroot, mysettings, vartree=vartree)
662 @@ -5422,6 +5450,12 @@
663 continue
664 mychoices = myvirtuals.get(mykey, [])
665 isblocker = x.startswith("!")
666 + if isblocker:
667 + # Virtual blockers are no longer expanded here since
668 + # the un-expanded virtual atom is more useful for
669 + # maintaining a cache of blocker atoms.
670 + newsplit.append(x)
671 + continue
672 match_atom = x
673 if isblocker:
674 match_atom = x[1:]
675
676 --
677 gentoo-commits@l.g.o mailing list