Gentoo Archives: gentoo-commits

From: "Fabian Groffen (grobian)" <grobian@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] portage r10168 - main/branches/prefix/pym/_emerge
Date: Sun, 04 May 2008 07:49:47
Message-Id: E1JsYye-00086Z-1i@stork.gentoo.org
1 Author: grobian
2 Date: 2008-05-04 07:49:43 +0000 (Sun, 04 May 2008)
3 New Revision: 10168
4
5 Modified:
6 main/branches/prefix/pym/_emerge/__init__.py
7 Log:
8 Merged from trunk 10091:10124
9
10 | 10092 | Clean up merge list handling: * In |
11 | zmedico | depgraph.loadResumeCommand(), populate |
12 | | _serialized_tasks_cache so that it can be returned from |
13 | | depgraph.altlist(). * Handle --skipfirst instide |
14 | | depgraph.loadResumeCommand(), and fix it to skip the first |
15 | | "merge" task and never skip "uninstall" tasks. * Always get |
16 | | the merge list from depgraph.altlist() instead of using the |
17 | | mtimedb directly. The list returned from depgraph.altlist() |
18 | | contains Package instances with cached metadata. * Remove |
19 | | obsolete code from depgraph.display() and MergeTask.merge() |
20 | | since the merge list always comes from depgraph.altlist() |
21 | | now, and never directly from the mtimedb. |
22
23 | 10094 | Validate resume list data some more. |
24 | zmedico | |
25
26 | 10096 | In depgraph._complete_graph(), use cached Package instances |
27 | zmedico | instead of constructing new ones. |
28
29 | 10098 | In depgraph._serialize_tasks(), filter "nomerge" nodes at a |
30 | zmedico | more appropriate place. |
31
32 | 10100 | Use Package instance attributes to clean up and simplify |
33 | zmedico | depgraph.validate_blockers(). |
34
35 | 10102 | Use Package.cp attribute instead of calling cpv_getkey(). |
36 | zmedico | |
37
38 | 10104 | When searching for a "sys-apps/portage" installation task, |
39 | zmedico | use match_pkgs() instead of sequential search. |
40
41 | 10106 | In depgraph.validate_blockers(), discard any "uninstall" |
42 | zmedico | tasks scheduled by previous calls, since those tasks may not |
43 | | make sense given the current graph state. |
44
45 | 10108 | Move the blocker cache validation into |
46 | zmedico | depgraph.validate_blockers(). |
47
48 | 10110 | Use match_pkgs() to simplify package comparison. |
49 | zmedico | |
50
51 | 10112 | Add a PackageVirtualDbapi.__iter__() method and use it to |
52 | zmedico | iterate over all installed packages in |
53 | | depgraph.validate_blockers(). |
54
55 | 10114 | * Check for masked packages in the --resume merge list and |
56 | zmedico | bail out in that case (bug #199408). * In |
57 | | depgraph.loadResumeCommand(), create a dependency graph from |
58 | | the resume list. This ensures that the merge list has it's |
59 | | dependencies satisfied. It also enables --tree display |
60 | | together with --resume. |
61
62 | 10115 | Fix --resume mode to check for unsatisfied blockers and bail |
63 | zmedico | out when necessary. |
64
65 | 10116 | Add --nodeps to the parallel-fetch --resume options since |
66 | zmedico | recalculation of deps is a a waste here. |
67
68 | 10117 | Delete the resume list(s) if there is an unsatisfied block. |
69 | zmedico | |
70
71 | 10119 | Fix depgraph.loadResumeCommand() to identify an unsatisfied |
72 | zmedico | dependency and invalidate the resume list in that case. |
73
74 | 10121 | Make --nodepa automatically disable --tree. |
75 | zmedico | |
76
77 | 10123 | Move --resume options handling to the beginning of |
78 | zmedico | action_build() so that the current options are updated with |
79 | | the saved options as soon as possible. |
80
81 | 10124 | Also move mtimedb resume list validation earlier. |
82 | zmedico | |
83
84
85 Modified: main/branches/prefix/pym/_emerge/__init__.py
86 ===================================================================
87 --- main/branches/prefix/pym/_emerge/__init__.py 2008-05-04 07:46:00 UTC (rev 10167)
88 +++ main/branches/prefix/pym/_emerge/__init__.py 2008-05-04 07:49:43 UTC (rev 10168)
89 @@ -1413,26 +1413,6 @@
90 isinstance(self._cache_data, dict) and \
91 self._cache_data.get("version") == self._cache_version and \
92 isinstance(self._cache_data.get("blockers"), dict)
93 - if cache_valid:
94 - invalid_cache = set()
95 - for cpv, value \
96 - in self._cache_data["blockers"].iteritems():
97 - if not (isinstance(value, tuple) and len(value) == 2):
98 - invalid_cache.add(cpv)
99 - continue
100 - counter, atoms = value
101 - try:
102 - if counter != long(self._vardb.aux_get(cpv, ["COUNTER"])[0]):
103 - invalid_cache.add(cpv)
104 - continue
105 - except KeyError:
106 - # The package is no longer installed.
107 - invalid_cache.add(cpv)
108 - continue
109 - for cpv in invalid_cache:
110 - del self._cache_data["blockers"][cpv]
111 - if not self._cache_data["blockers"]:
112 - cache_valid = False
113 if not cache_valid:
114 self._cache_data = {"version":self._cache_version}
115 self._cache_data["blockers"] = {}
116 @@ -1481,6 +1461,13 @@
117 (blocker_data.counter, blocker_data.atoms)
118 self._modified = True
119
120 + def __iter__(self):
121 + return iter(self._cache_data["blockers"])
122 +
123 + def __delitem__(self, cpv):
124 + del self._cache_data["blockers"][cpv]
125 + self._modified = True
126 +
127 def __getitem__(self, cpv):
128 """
129 @rtype: BlockerData
130 @@ -1491,9 +1478,7 @@
131 def keys(self):
132 """This needs to be implemented so that self.__repr__() doesn't raise
133 an AttributeError."""
134 - if self._cache_data and "blockers" in self._cache_data:
135 - return self._cache_data["blockers"].keys()
136 - return []
137 + return list(self)
138
139 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
140
141 @@ -1554,6 +1539,9 @@
142 obj._cpv_map = self._cpv_map.copy()
143 return obj
144
145 + def __iter__(self):
146 + return self._cpv_map.itervalues()
147 +
148 def __contains__(self, item):
149 existing = self._cpv_map.get(item.cpv)
150 if existing is not None and \
151 @@ -3119,7 +3107,7 @@
152 # that are initially satisfied.
153 while self._unsatisfied_deps:
154 dep = self._unsatisfied_deps.pop()
155 - matches = vardb.match(dep.atom)
156 + matches = vardb.match_pkgs(dep.atom)
157 if not matches:
158 # Initially unsatisfied.
159 continue
160 @@ -3127,12 +3115,7 @@
161 # Add the installed package to the graph so that it
162 # will be appropriately reported as a slot collision
163 # (possibly solvable via backtracking).
164 - cpv = matches[-1] # highest match
165 - metadata = dict(izip(self._mydbapi_keys,
166 - vardb.aux_get(cpv, self._mydbapi_keys)))
167 - pkg = Package(type_name="installed", root=root,
168 - cpv=cpv, metadata=metadata, built=True,
169 - installed=True)
170 + pkg = matches[-1] # highest match
171 if not self._add_pkg(pkg, dep.parent,
172 priority=dep.priority, depth=dep.depth):
173 return 0
174 @@ -3150,15 +3133,6 @@
175 "--nodeps" in self.myopts:
176 return True
177
178 - modified_slots = {}
179 - for myroot in self.trees:
180 - myslots = {}
181 - modified_slots[myroot] = myslots
182 - final_db = self.mydbapi[myroot]
183 - for pkg in self._slot_pkg_map[myroot].itervalues():
184 - if not (pkg.installed or pkg.onlydeps):
185 - myslots[pkg.slot_atom] = pkg.cpv
186 -
187 #if "deep" in self.myparams:
188 if True:
189 # Pull in blockers from all installed packages that haven't already
190 @@ -3172,12 +3146,12 @@
191 portdb = self.trees[myroot]["porttree"].dbapi
192 pkgsettings = self.pkgsettings[myroot]
193 final_db = self.mydbapi[myroot]
194 - cpv_all_installed = self.trees[myroot]["vartree"].dbapi.cpv_all()
195 blocker_cache = BlockerCache(myroot, vardb)
196 - for cpv in cpv_all_installed:
197 + stale_cache = set(blocker_cache)
198 + for pkg in vardb:
199 + cpv = pkg.cpv
200 + stale_cache.discard(cpv)
201 blocker_atoms = None
202 - pkg = self._pkg_cache[
203 - ("installed", myroot, cpv, "nomerge")]
204 blockers = None
205 if self.digraph.contains(pkg):
206 try:
207 @@ -3192,6 +3166,9 @@
208 # node for it so that they can be enforced.
209 self.spinner.update()
210 blocker_data = blocker_cache.get(cpv)
211 + if blocker_data is not None and \
212 + blocker_data.counter != long(pkg.metadata["COUNTER"]):
213 + blocker_data = None
214
215 # If blocker data from the graph is available, use
216 # it to validate the cache and update the cache if
217 @@ -3217,7 +3194,9 @@
218 blocker_atoms = blocker_data.atoms
219 else:
220 myuse = pkg.metadata["USE"].split()
221 - depstr = " ".join(pkg.metadata[k] for k in dep_keys)
222 + # Use aux_get() to trigger FakeVartree global
223 + # updates on *DEPEND when appropriate.
224 + depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
225 # It is crucial to pass in final_db here in order to
226 # optimize dep_check calls by eliminating atoms via
227 # dep_wordreduce and dep_eval calls.
228 @@ -3241,7 +3220,9 @@
229 finally:
230 portage.dep._dep_check_strict = True
231 if not success:
232 - if pkg.slot_atom in modified_slots[myroot]:
233 + replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
234 + if replacement_pkg and \
235 + replacement_pkg[0].operation == "merge":
236 # This package is being replaced anyway, so
237 # ignore invalid dependencies so as not to
238 # annoy the user too much (otherwise they'd be
239 @@ -3259,9 +3240,19 @@
240 for myatom in blocker_atoms:
241 blocker = Blocker(atom=myatom[1:], root=myroot)
242 self._blocker_parents.add(blocker, pkg)
243 + for cpv in stale_cache:
244 + del blocker_cache[cpv]
245 blocker_cache.flush()
246 del blocker_cache
247
248 + # Discard any "uninstall" tasks scheduled by previous calls
249 + # to this method, since those tasks may not make sense given
250 + # the current graph state.
251 + previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
252 + if previous_uninstall_tasks:
253 + self._blocker_uninstalls = digraph()
254 + self.digraph.difference_update(previous_uninstall_tasks)
255 +
256 for blocker in self._blocker_parents.leaf_nodes():
257 self.spinner.update()
258 root_config = self.roots[blocker.root]
259 @@ -3287,11 +3278,11 @@
260
261 blocked_initial = []
262 for atom in atoms:
263 - blocked_initial.extend(initial_db.match(atom))
264 + blocked_initial.extend(initial_db.match_pkgs(atom))
265
266 blocked_final = []
267 for atom in atoms:
268 - blocked_final.extend(final_db.match(atom))
269 + blocked_final.extend(final_db.match_pkgs(atom))
270
271 if not blocked_initial and not blocked_final:
272 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
273 @@ -3301,27 +3292,11 @@
274 if not self._blocker_parents.child_nodes(pkg):
275 self._blocker_parents.remove(pkg)
276 continue
277 - blocked_slots_initial = {}
278 - blocked_slots_final = {}
279 - for cpv in blocked_initial:
280 - blocked_slots_initial[cpv] = \
281 - "%s:%s" % (portage.dep_getkey(cpv),
282 - initial_db.aux_get(cpv, ["SLOT"])[0])
283 - for cpv in blocked_final:
284 - blocked_slots_final[cpv] = \
285 - "%s:%s" % (portage.dep_getkey(cpv),
286 - final_db.aux_get(cpv, ["SLOT"])[0])
287 for parent in self._blocker_parents.parent_nodes(blocker):
288 - ptype, proot, pcpv, pstatus = parent
289 - pdbapi = self.trees[proot][self.pkg_tree_map[ptype]].dbapi
290 - pslot = pdbapi.aux_get(pcpv, ["SLOT"])[0]
291 - pslot_atom = "%s:%s" % (portage.dep_getkey(pcpv), pslot)
292 - parent_static = pslot_atom not in modified_slots[proot]
293 unresolved_blocks = False
294 depends_on_order = set()
295 - for cpv in blocked_initial:
296 - slot_atom = blocked_slots_initial[cpv]
297 - if slot_atom == pslot_atom:
298 + for pkg in blocked_initial:
299 + if pkg.slot_atom == parent.slot_atom:
300 # TODO: Support blocks within slots in cases where it
301 # might make sense. For example, a new version might
302 # require that the old version be uninstalled at build
303 @@ -3333,39 +3308,31 @@
304 # is already done and this would be likely to
305 # confuse users if displayed like a normal blocker.
306 continue
307 - if pstatus == "merge":
308 + if parent.operation == "merge":
309 # Maybe the blocked package can be replaced or simply
310 # unmerged to resolve this block.
311 - inst_pkg = self._pkg_cache[
312 - ("installed", myroot, cpv, "nomerge")]
313 - depends_on_order.add((inst_pkg, parent))
314 + depends_on_order.add((pkg, parent))
315 continue
316 # None of the above blocker resolutions techniques apply,
317 # so apparently this one is unresolvable.
318 unresolved_blocks = True
319 - for cpv in blocked_final:
320 - slot_atom = blocked_slots_final[cpv]
321 - if slot_atom == pslot_atom:
322 + for pkg in blocked_final:
323 + if pkg.slot_atom == parent.slot_atom:
324 # TODO: Support blocks within slots.
325 continue
326 - if parent_static and \
327 - slot_atom not in modified_slots[myroot]:
328 + if parent.operation == "nomerge" and \
329 + pkg.operation == "nomerge":
330 # This blocker will be handled the next time that a
331 # merge of either package is triggered.
332 continue
333
334 # Maybe the blocking package can be
335 # unmerged to resolve this block.
336 - try:
337 - blocked_pkg = self._slot_pkg_map[myroot][slot_atom]
338 - except KeyError:
339 - blocked_pkg = self._pkg_cache[
340 - ("installed", myroot, cpv, "nomerge")]
341 - if pstatus == "merge" and blocked_pkg.installed:
342 - depends_on_order.add((blocked_pkg, parent))
343 + if parent.operation == "merge" and pkg.installed:
344 + depends_on_order.add((pkg, parent))
345 continue
346 - elif pstatus == "nomerge":
347 - depends_on_order.add((parent, blocked_pkg))
348 + elif parent.operation == "nomerge":
349 + depends_on_order.add((parent, pkg))
350 continue
351 # None of the above blocker resolutions techniques apply,
352 # so apparently this one is unresolvable.
353 @@ -3491,7 +3458,7 @@
354 complete = "complete" in self.myparams
355 myblocker_parents = self._blocker_parents.copy()
356 asap_nodes = []
357 - portage_node = None
358 +
359 def get_nodes(**kwargs):
360 """
361 Returns leaf nodes excluding Uninstall instances
362 @@ -3500,13 +3467,18 @@
363 return [node for node in mygraph.leaf_nodes(**kwargs) \
364 if isinstance(node, Package) and \
365 node.operation != "uninstall"]
366 - if True:
367 - for node in mygraph.order:
368 - if node.root == "/" and \
369 - "sys-apps/portage" == portage.cpv_getkey(node.cpv):
370 - portage_node = node
371 - asap_nodes.append(node)
372 - break
373 +
374 + # sys-apps/portage needs special treatment if ROOT="/"
375 + portage_node = self.mydbapi["/"].match_pkgs("sys-apps/portage")
376 + if portage_node:
377 + portage_node = portage_node[0]
378 + else:
379 + portage_node = None
380 + if portage_node is not None and \
381 + (not mygraph.contains(portage_node) or \
382 + portage_node.operation == "nomerge"):
383 + portage_node = None
384 +
385 ignore_priority_soft_range = [None]
386 ignore_priority_soft_range.extend(
387 xrange(DepPriority.MIN, DepPriority.MEDIUM_SOFT + 1))
388 @@ -3719,9 +3691,8 @@
389 for atom in root_config.sets[
390 "world"].iterAtomsForPackage(task):
391 satisfied = False
392 - for cpv in graph_db.match(atom):
393 - if cpv == inst_pkg.cpv and \
394 - inst_pkg in graph_db:
395 + for pkg in graph_db.match_pkgs(atom):
396 + if pkg == inst_pkg:
397 continue
398 satisfied = True
399 break
400 @@ -3797,12 +3768,14 @@
401 node = tempgraph.order[0]
402 else:
403 node = nodes[0]
404 - display_order.append(list(node))
405 + display_order.append(node)
406 tempgraph.remove(node)
407 display_order.reverse()
408 self.myopts.pop("--quiet", None)
409 self.myopts.pop("--verbose", None)
410 self.myopts["--tree"] = True
411 + print
412 + print
413 self.display(display_order)
414 print "!!! Error: circular dependencies:"
415 print
416 @@ -3821,6 +3794,9 @@
417 mygraph.difference_update(selected_nodes)
418
419 for node in selected_nodes:
420 + if isinstance(node, Package) and \
421 + node.operation == "nomerge":
422 + continue
423
424 # Handle interactions between blockers
425 # and uninstallation tasks.
426 @@ -3854,8 +3830,7 @@
427 myblocker_uninstalls.remove(blocker)
428 solved_blockers.add(blocker)
429
430 - if node[-1] != "nomerge":
431 - retlist.append(node)
432 + retlist.append(node)
433
434 if isinstance(node, Package) and \
435 "uninstall" == node.operation:
436 @@ -4145,18 +4120,7 @@
437 pkg_status = "nomerge"
438 built = pkg_type != "ebuild"
439 installed = pkg_type == "installed"
440 - try:
441 - pkg = self._pkg_cache[tuple(x)]
442 - except KeyError:
443 - if pkg_status != "uninstall":
444 - raise
445 - # A package scheduled for uninstall apparently
446 - # isn't installed anymore. Since it's already
447 - # been uninstalled, move on to the next task.
448 - # This case should only be reachable in --resume
449 - # mode, since otherwise the package would have
450 - # been cached.
451 - continue
452 + pkg = x
453 metadata = pkg.metadata
454 ebuild_path = None
455 if pkg_type == "binary":
456 @@ -4790,10 +4754,22 @@
457 """
458 self._sets["args"].update(resume_data.get("favorites", []))
459 mergelist = resume_data.get("mergelist", [])
460 + favorites = resume_data.get("favorites")
461 + if not isinstance(favorites, list):
462 + favorites = []
463 +
464 + if mergelist and "--skipfirst" in self.myopts:
465 + for i, task in enumerate(mergelist):
466 + if isinstance(task, list) and \
467 + task and task[-1] == "merge":
468 + del mergelist[i]
469 + break
470 +
471 fakedb = self.mydbapi
472 trees = self.trees
473 + serialized_tasks = []
474 for x in mergelist:
475 - if len(x) != 4:
476 + if not (isinstance(x, list) and len(x) == 4):
477 continue
478 pkg_type, myroot, pkg_key, action = x
479 if pkg_type not in self.pkg_tree_map:
480 @@ -4820,9 +4796,62 @@
481 operation=action, root=myroot,
482 type_name=pkg_type)
483 self._pkg_cache[pkg] = pkg
484 +
485 + root_config = self.roots[pkg.root]
486 + if "merge" == pkg.operation and \
487 + not visible(root_config.settings, pkg):
488 + self._unsatisfied_deps_for_display.append(
489 + ((pkg.root, "="+pkg.cpv), {"myparent":None}))
490 +
491 fakedb[myroot].cpv_inject(pkg)
492 + serialized_tasks.append(pkg)
493 self.spinner.update()
494
495 + if self._unsatisfied_deps_for_display:
496 + return False
497 +
498 + if not serialized_tasks or "--nodeps" in self.myopts:
499 + self._serialized_tasks_cache = serialized_tasks
500 + else:
501 + favorites_set = InternalPackageSet(atom for atom in favorites \
502 + if isinstance(atom, basestring) and portage.isvalidatom(atom))
503 + for node in serialized_tasks:
504 + if isinstance(node, Package) and \
505 + node.operation == "merge" and \
506 + favorites_set.findAtomForPackage(node.cpv, node.metadata):
507 + self._set_nodes.add(node)
508 +
509 + self._select_package = self._select_pkg_from_graph
510 + for task in serialized_tasks:
511 + if isinstance(task, Package) and \
512 + task.operation == "merge":
513 + self._add_pkg(task, None)
514 +
515 + # Allow unsatisfied deps here to avoid showing a masking
516 + # message for an unsatisfied dep that isn't necessarily
517 + # masked.
518 + if not self._create_graph(allow_unsatisfied=True):
519 + return False
520 + if self._unsatisfied_deps:
521 + # This probably means that a required package
522 + # was dropped via --skipfirst. It makes the
523 + # resume list invalid, so convert it to a
524 + # PackageNotFound exception.
525 + raise portage.exception.PackageNotFound(
526 + self._unsatisfied_deps[0].atom)
527 + self._serialized_tasks_cache = None
528 + try:
529 + self.altlist()
530 + except self._unknown_internal_error:
531 + return False
532 +
533 + for node in self.digraph.root_nodes():
534 + if isinstance(node, Package) and \
535 + node.operation == "merge":
536 + # Give hint to the --tree display.
537 + self._set_nodes.add(node)
538 + return True
539 +
540 class _internal_exception(portage.exception.PortageException):
541 def __init__(self, value=""):
542 portage.exception.PortageException.__init__(self, value)
543 @@ -5170,7 +5199,6 @@
544 fetchonly = "--fetchonly" in self.myopts or \
545 "--fetch-all-uri" in self.myopts
546 pretend = "--pretend" in self.myopts
547 - mymergelist=[]
548 ldpath_mtimes = mtimedb["ldpath"]
549 xterm_titles = "notitles" not in self.settings.features
550
551 @@ -5178,12 +5206,6 @@
552 # We're resuming.
553 print colorize("GOOD", "*** Resuming merge...")
554 emergelog(xterm_titles, " *** Resuming merge...")
555 - mylist = mtimedb["resume"]["mergelist"][:]
556 - if "--skipfirst" in self.myopts and mylist:
557 - del mtimedb["resume"]["mergelist"][0]
558 - del mylist[0]
559 - mtimedb.commit()
560 - mymergelist = mylist
561
562 # Verify all the manifests now so that the user is notified of failure
563 # as soon as possible.
564 @@ -5217,14 +5239,14 @@
565 system_set = root_config.sets["system"]
566 args_set = InternalPackageSet(favorites)
567 world_set = root_config.sets["world"]
568 - if "--resume" not in self.myopts:
569 - mymergelist = mylist
570 - mtimedb["resume"]["mergelist"] = [list(x) for x in mymergelist \
571 - if isinstance(x, Package)]
572 - mtimedb.commit()
573
574 + mtimedb["resume"]["mergelist"] = [list(x) for x in mylist \
575 + if isinstance(x, Package)]
576 + mtimedb.commit()
577 +
578 + mymergelist = mylist
579 myfeat = self.settings.features[:]
580 - bad_resume_opts = set(["--ask", "--tree", "--changelog", "--skipfirst",
581 + bad_resume_opts = set(["--ask", "--changelog", "--skipfirst",
582 "--resume"])
583 if "parallel-fetch" in myfeat and \
584 not ("--pretend" in self.myopts or \
585 @@ -5246,7 +5268,8 @@
586 fetch_env["FEATURES"] = fetch_env.get("FEATURES", "") + " -cvs"
587 fetch_env["PORTAGE_NICENESS"] = "0"
588 fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
589 - fetch_args = [sys.argv[0], "--resume", "--fetchonly"]
590 + fetch_args = [sys.argv[0], "--resume",
591 + "--fetchonly", "--nodeps"]
592 resume_opts = self.myopts.copy()
593 # For automatic resume, we need to prevent
594 # any of bad_resume_opts from leaking in
595 @@ -5289,15 +5312,10 @@
596 vardb = vartree.dbapi
597 root_config = self.trees[myroot]["root_config"]
598 pkgsettings = self.pkgsettings[myroot]
599 - metadata = {}
600 if pkg_type == "blocks":
601 pass
602 elif pkg_type == "ebuild":
603 mydbapi = portdb
604 - metadata.update(izip(metadata_keys,
605 - mydbapi.aux_get(pkg_key, metadata_keys)))
606 - pkgsettings.setcpv(pkg_key, mydb=mydbapi)
607 - metadata["USE"] = pkgsettings["PORTAGE_USE"]
608 else:
609 if pkg_type == "binary":
610 mydbapi = bindb
611 @@ -5305,22 +5323,10 @@
612 mydbapi = vardb
613 else:
614 raise AssertionError("Package type: '%s'" % pkg_type)
615 - try:
616 - metadata.update(izip(metadata_keys,
617 - mydbapi.aux_get(pkg_key, metadata_keys)))
618 - except KeyError:
619 - if not installed:
620 - raise
621 - # A package scheduled for uninstall apparently
622 - # isn't installed anymore. Since it's already
623 - # been uninstalled, move on to the next task.
624 - continue
625 if not installed:
626 mergecount += 1
627 - pkg = Package(cpv=pkg_key, built=built,
628 - installed=installed, metadata=metadata,
629 - operation=operation, root=myroot,
630 - type_name=pkg_type)
631 + pkg = x
632 + metadata = pkg.metadata
633 if pkg.installed:
634 if not (buildpkgonly or fetchonly or pretend):
635 self._uninstall_queue.append(pkg)
636 @@ -7652,6 +7658,41 @@
637
638 def action_build(settings, trees, mtimedb,
639 myopts, myaction, myfiles, spinner):
640 +
641 + # validate the state of the resume data
642 + # so that we can make assumptions later.
643 + for k in ("resume", "resume_backup"):
644 + if k in mtimedb:
645 + if "mergelist" in mtimedb[k]:
646 + if not mtimedb[k]["mergelist"]:
647 + del mtimedb[k]
648 + else:
649 + del mtimedb[k]
650 +
651 + resume = False
652 + if "--resume" in myopts and \
653 + ("resume" in mtimedb or
654 + "resume_backup" in mtimedb):
655 + resume = True
656 + if "resume" not in mtimedb:
657 + mtimedb["resume"] = mtimedb["resume_backup"]
658 + del mtimedb["resume_backup"]
659 + mtimedb.commit()
660 + # "myopts" is a list for backward compatibility.
661 + resume_opts = mtimedb["resume"].get("myopts", [])
662 + if isinstance(resume_opts, list):
663 + resume_opts = dict((k,True) for k in resume_opts)
664 + for opt in ("--skipfirst", "--ask", "--tree"):
665 + resume_opts.pop(opt, None)
666 + myopts.update(resume_opts)
667 + # Adjust config according to options of the command being resumed.
668 + for myroot in trees:
669 + mysettings = trees[myroot]["vartree"].settings
670 + mysettings.unlock()
671 + adjust_config(myopts, mysettings)
672 + mysettings.lock()
673 + del myroot, mysettings
674 +
675 ldpath_mtimes = mtimedb["ldpath"]
676 favorites=[]
677 merge_count = 0
678 @@ -7659,7 +7700,13 @@
679 pretend = "--pretend" in myopts
680 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
681 ask = "--ask" in myopts
682 + nodeps = "--nodeps" in myopts
683 tree = "--tree" in myopts
684 + if nodeps and tree:
685 + tree = False
686 + del myopts["--tree"]
687 + portage.writemsg(colorize("WARN", " * ") + \
688 + "--tree is broken with --nodeps. Disabling...\n")
689 verbose = "--verbose" in myopts
690 quiet = "--quiet" in myopts
691 if pretend or fetchonly:
692 @@ -7696,50 +7743,23 @@
693 print darkgreen("These are the packages that would be %s, in order:") % action
694 print
695
696 - # validate the state of the resume data
697 - # so that we can make assumptions later.
698 - for k in ("resume", "resume_backup"):
699 - if k in mtimedb:
700 - if "mergelist" in mtimedb[k]:
701 - if not mtimedb[k]["mergelist"]:
702 - del mtimedb[k]
703 - else:
704 - del mtimedb[k]
705 -
706 show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
707 if not show_spinner:
708 spinner.update = spinner.update_quiet
709
710 - if "--resume" in myopts and \
711 - ("resume" in mtimedb or
712 - "resume_backup" in mtimedb):
713 - if "resume" not in mtimedb:
714 - mtimedb["resume"] = mtimedb["resume_backup"]
715 - del mtimedb["resume_backup"]
716 - mtimedb.commit()
717 + if resume:
718 + favorites = mtimedb["resume"].get("favorites")
719 + if not isinstance(favorites, list):
720 + favorites = []
721
722 - # Adjust config according to options of the command being resumed.
723 - for myroot in trees:
724 - mysettings = trees[myroot]["vartree"].settings
725 - mysettings.unlock()
726 - adjust_config(myopts, mysettings)
727 - mysettings.lock()
728 - del myroot, mysettings
729 -
730 - # "myopts" is a list for backward compatibility.
731 - resume_opts = mtimedb["resume"].get("myopts", [])
732 - if isinstance(resume_opts, list):
733 - resume_opts = dict((k,True) for k in resume_opts)
734 - for opt in ("--skipfirst", "--ask", "--tree"):
735 - resume_opts.pop(opt, None)
736 - myopts.update(resume_opts)
737 if show_spinner:
738 print "Calculating dependencies ",
739 myparams = create_depgraph_params(myopts, myaction)
740 mydepgraph = depgraph(settings, trees,
741 myopts, myparams, spinner)
742 + success = False
743 try:
744 - mydepgraph.loadResumeCommand(mtimedb["resume"])
745 + success = mydepgraph.loadResumeCommand(mtimedb["resume"])
746 except portage.exception.PackageNotFound:
747 if show_spinner:
748 print
749 @@ -7748,15 +7768,37 @@
750 out.eerror("Error: The resume list contains packages that are no longer")
751 out.eerror(" available to be emerged. Please restart/continue")
752 out.eerror(" the merge operation manually.")
753 + else:
754 + if show_spinner:
755 + print "\b\b... done!"
756
757 + unsatisfied_block = False
758 + if success:
759 + mymergelist = mydepgraph.altlist()
760 + if mymergelist and \
761 + (isinstance(mymergelist[-1], Blocker) and \
762 + not mymergelist[-1].satisfied):
763 + if not fetchonly and not pretend:
764 + unsatisfied_block = True
765 + mydepgraph.display(
766 + mydepgraph.altlist(reversed=tree),
767 + favorites=favorites)
768 + print "\n!!! Error: The above package list contains packages which cannot be installed"
769 + print "!!! at the same time on the same system."
770 + if not quiet:
771 + show_blocker_docs_link()
772 +
773 + if not success:
774 + mydepgraph.display_problems()
775 +
776 + if unsatisfied_block or not success:
777 # delete the current list and also the backup
778 # since it's probably stale too.
779 for k in ("resume", "resume_backup"):
780 mtimedb.pop(k, None)
781 mtimedb.commit()
782 +
783 return 1
784 - if show_spinner:
785 - print "\b\b... done!"
786 else:
787 if ("--resume" in myopts):
788 print darkgreen("emerge: It seems we have nothing to resume...")
789 @@ -7787,14 +7829,14 @@
790 "--verbose" in myopts) and \
791 not ("--quiet" in myopts and "--ask" not in myopts):
792 if "--resume" in myopts:
793 - mymergelist = mtimedb["resume"]["mergelist"]
794 - if "--skipfirst" in myopts:
795 - mymergelist = mymergelist[1:]
796 + mymergelist = mydepgraph.altlist()
797 if len(mymergelist) == 0:
798 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
799 return os.EX_OK
800 favorites = mtimedb["resume"]["favorites"]
801 - retval = mydepgraph.display(mymergelist, favorites=favorites)
802 + retval = mydepgraph.display(
803 + mydepgraph.altlist(reversed=tree),
804 + favorites=favorites)
805 if retval != os.EX_OK:
806 return retval
807 prompt="Would you like to resume merging these packages?"
808 @@ -7845,14 +7887,14 @@
809
810 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
811 if ("--resume" in myopts):
812 - mymergelist = mtimedb["resume"]["mergelist"]
813 - if "--skipfirst" in myopts:
814 - mymergelist = mymergelist[1:]
815 + mymergelist = mydepgraph.altlist()
816 if len(mymergelist) == 0:
817 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
818 return os.EX_OK
819 favorites = mtimedb["resume"]["favorites"]
820 - retval = mydepgraph.display(mymergelist, favorites=favorites)
821 + retval = mydepgraph.display(
822 + mydepgraph.altlist(reversed=tree),
823 + favorites=favorites)
824 if retval != os.EX_OK:
825 return retval
826 else:
827 @@ -7889,9 +7931,9 @@
828 it to write the mtimedb"""
829 mtimedb.filename = None
830 time.sleep(3) # allow the parent to have first fetch
831 + mymergelist = mydepgraph.altlist()
832 del mydepgraph
833 - retval = mergetask.merge(
834 - mtimedb["resume"]["mergelist"], favorites, mtimedb)
835 + retval = mergetask.merge(mymergelist, favorites, mtimedb)
836 merge_count = mergetask.curval
837 else:
838 if "resume" in mtimedb and \
839 @@ -8433,11 +8475,6 @@
840 import portage.debug
841 portage.debug.set_trace(True)
842
843 - if ("--resume" in myopts):
844 - if "--tree" in myopts:
845 - print "* --tree is currently broken with --resume. Disabling..."
846 - del myopts["--tree"]
847 -
848 if not ("--quiet" in myopts):
849 if not sys.stdout.isatty() or ("--nospinner" in myopts):
850 spinner.update = spinner.update_basic
851
852 --
853 gentoo-commits@l.g.o mailing list