Gentoo Archives: gentoo-commits

From: "Zac Medico (zmedico)" <zmedico@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] portage r9957 - in main/branches/2.1.2: bin doc/dependency_resolution man pym
Date: Thu, 24 Apr 2008 03:34:12
Message-Id: E1JosDp-0005dI-3W@stork.gentoo.org
1 Author: zmedico
2 Date: 2008-04-24 03:34:07 +0000 (Thu, 24 Apr 2008)
3 New Revision: 9957
4
5 Modified:
6 main/branches/2.1.2/bin/emerge
7 main/branches/2.1.2/doc/dependency_resolution/task_scheduling.docbook
8 main/branches/2.1.2/man/color.map.5
9 main/branches/2.1.2/pym/output.py
10 Log:
11 Bug #172812 - Automatically uninstall packages to avoid blocker conflicts.
12 (trunk r9944:9956)
13
14
15 Modified: main/branches/2.1.2/bin/emerge
16 ===================================================================
17 --- main/branches/2.1.2/bin/emerge 2008-04-24 03:17:26 UTC (rev 9956)
18 +++ main/branches/2.1.2/bin/emerge 2008-04-24 03:34:07 UTC (rev 9957)
19 @@ -743,7 +743,7 @@
20
21 return mynewlines
22
23 -def clean_world(vardb, cpv):
24 +def world_clean_package(vardb, cpv):
25 """Remove a package from the world file when unmerged."""
26 world_set = WorldSet(vardb.settings)
27 world_set.lock()
28 @@ -1088,7 +1088,13 @@
29 return "medium-soft"
30 return "soft"
31
32 +class BlockerDepPriority(DepPriority):
33 + __slots__ = ()
34 + def __int__(self):
35 + return 0
36 +
37 class UnmergeDepPriority(AbstractDepPriority):
38 + __slots__ = ()
39 """
40 Combination of properties Priority Category
41
42 @@ -1340,10 +1346,9 @@
43 shown_comments.add(comment)
44 return have_eapi_mask
45
46 -class Package(object):
47 - __slots__ = ("__weakref__", "built", "cpv", "depth",
48 - "installed", "metadata", "root", "onlydeps", "type_name",
49 - "cp", "cpv_slot", "slot_atom", "_digraph_node")
50 +class Task(object):
51 + __slots__ = ("__weakref__", "_hash_key",)
52 +
53 def __init__(self, **kwargs):
54 for myattr in self.__slots__:
55 if myattr == "__weakref__":
56 @@ -1351,14 +1356,56 @@
57 myvalue = kwargs.get(myattr, None)
58 setattr(self, myattr, myvalue)
59
60 + def _get_hash_key(self):
61 + try:
62 + return self._hash_key
63 + except AttributeError:
64 + raise NotImplementedError(self)
65 +
66 + def __eq__(self, other):
67 + return self._get_hash_key() == other
68 +
69 + def __ne__(self, other):
70 + return self._get_hash_key() != other
71 +
72 + def __hash__(self):
73 + return hash(self._get_hash_key())
74 +
75 + def __len__(self):
76 + return len(self._get_hash_key())
77 +
78 + def __getitem__(self, key):
79 + return self._get_hash_key()[key]
80 +
81 + def __iter__(self):
82 + return iter(self._get_hash_key())
83 +
84 + def __contains__(self, key):
85 + return key in self._get_hash_key()
86 +
87 + def __str__(self):
88 + return str(self._get_hash_key())
89 +
90 +class Package(Task):
91 + __slots__ = ("built", "cpv", "depth",
92 + "installed", "metadata", "root", "onlydeps", "type_name",
93 + "cp", "cpv_slot", "slot_atom")
94 + def __init__(self, **kwargs):
95 + Task.__init__(self, **kwargs)
96 self.cp = portage.cpv_getkey(self.cpv)
97 self.slot_atom = "%s:%s" % (self.cp, self.metadata["SLOT"])
98 self.cpv_slot = "%s:%s" % (self.cpv, self.metadata["SLOT"])
99
100 - status = "merge"
101 - if self.onlydeps or self.installed:
102 - status = "nomerge"
103 - self._digraph_node = (self.type_name, self.root, self.cpv, status)
104 + def _get_hash_key(self):
105 + try:
106 + return self._hash_key
107 + except AttributeError:
108 + operation = "merge"
109 + if self.onlydeps or self.installed:
110 + operation = "nomerge"
111 + self._hash_key = \
112 + (self.type_name, self.root, self.cpv, operation)
113 + return self._hash_key
114
115 def __lt__(self, other):
116 other_split = portage.catpkgsplit(other.cpv)
117 @@ -1378,22 +1425,14 @@
118 return True
119 return False
120
121 - def __eq__(self, other):
122 - return self._digraph_node == other
123 - def __ne__(self, other):
124 - return self._digraph_node != other
125 - def __hash__(self):
126 - return hash(self._digraph_node)
127 - def __len__(self):
128 - return len(self._digraph_node)
129 - def __getitem__(self, key):
130 - return self._digraph_node[key]
131 - def __iter__(self):
132 - return iter(self._digraph_node)
133 - def __contains__(self, key):
134 - return key in self._digraph_node
135 - def __str__(self):
136 - return str(self._digraph_node)
137 +class Uninstall(Package):
138 + def _get_hash_key(self):
139 + try:
140 + return self._hash_key
141 + except AttributeError:
142 + self._hash_key = \
143 + (self.type_name, self.root, self.cpv, "uninstall")
144 + return self._hash_key
145
146 class DependencyArg(object):
147 def __init__(self, arg=None, root_config=None):
148 @@ -1594,6 +1633,13 @@
149 self._cp_map = {}
150 self._cpv_map = {}
151
152 + def __contains__(self, item):
153 + existing = self._cpv_map.get(item.cpv)
154 + if existing is not None and \
155 + existing == item:
156 + return True
157 + return False
158 +
159 def _clear_cache(self):
160 if self._categories is not None:
161 self._categories = None
162 @@ -3264,20 +3310,19 @@
163 # require that the old version be uninstalled at build
164 # time.
165 continue
166 - if parent_static and \
167 - slot_atom not in modified_slots[myroot]:
168 - # This blocker will be handled the next time that a
169 - # merge of either package is triggered.
170 + if parent.installed:
171 + # Two currently installed packages conflict with
172 + # eachother. Ignore this case since the damage
173 + # is already done and this would be likely to
174 + # confuse users if displayed like a normal blocker.
175 continue
176 - if pstatus == "merge" and \
177 - slot_atom in modified_slots[myroot]:
178 - replacement = self._slot_pkg_map[myroot][slot_atom]
179 - if not portage.match_from_list(
180 - mydep, [replacement.cpv_slot]):
181 - # Apparently a replacement may be able to
182 - # invalidate this block.
183 - depends_on_order.add((replacement, parent))
184 - continue
185 + if pstatus == "merge":
186 + # Maybe the blocked package can be replaced or simply
187 + # unmerged to resolve this block.
188 + inst_pkg = self._pkg_cache[
189 + ("installed", myroot, cpv, "nomerge")]
190 + depends_on_order.add((inst_pkg, parent))
191 + continue
192 # None of the above blocker resolutions techniques apply,
193 # so apparently this one is unresolvable.
194 unresolved_blocks = True
195 @@ -3291,30 +3336,47 @@
196 # This blocker will be handled the next time that a
197 # merge of either package is triggered.
198 continue
199 - if not parent_static and pstatus == "nomerge" and \
200 - slot_atom in modified_slots[myroot]:
201 - replacement = self._slot_pkg_map[myroot][pslot_atom]
202 - if replacement not in \
203 - self.blocker_parents[blocker]:
204 - # Apparently a replacement may be able to
205 - # invalidate this block.
206 - blocked_node = \
207 - self._slot_pkg_map[myroot][slot_atom]
208 - depends_on_order.add(
209 - (replacement, blocked_node))
210 - continue
211 +
212 + # Maybe the blocking package can be
213 + # unmerged to resolve this block.
214 + try:
215 + blocked_pkg = self._slot_pkg_map[myroot][slot_atom]
216 + except KeyError:
217 + blocked_pkg = self._pkg_cache[
218 + ("installed", myroot, cpv, "nomerge")]
219 + if pstatus == "merge" and blocked_pkg.installed:
220 + depends_on_order.add((blocked_pkg, parent))
221 + continue
222 + elif pstatus == "nomerge":
223 + depends_on_order.add((parent, blocked_pkg))
224 + continue
225 # None of the above blocker resolutions techniques apply,
226 # so apparently this one is unresolvable.
227 unresolved_blocks = True
228 +
229 + # Make sure we don't unmerge any package that have been pulled
230 + # into the graph.
231 if not unresolved_blocks and depends_on_order:
232 - for node, pnode in depends_on_order:
233 + for inst_pkg, inst_task in depends_on_order:
234 + if self.digraph.contains(inst_pkg) and \
235 + self.digraph.parent_nodes(inst_pkg):
236 + unresolved_blocks = True
237 + break
238 +
239 + if not unresolved_blocks and depends_on_order:
240 + for inst_pkg, inst_task in depends_on_order:
241 + uninst_task = Uninstall(built=inst_pkg.built,
242 + cpv=inst_pkg.cpv, installed=inst_pkg.installed,
243 + metadata=inst_pkg.metadata, root=inst_pkg.root,
244 + type_name=inst_pkg.type_name)
245 + self._pkg_cache[uninst_task] = uninst_task
246 # Enforce correct merge order with a hard dep.
247 - self.digraph.addnode(node, pnode,
248 - priority=DepPriority(buildtime=True))
249 + self.digraph.addnode(uninst_task, inst_task,
250 + priority=BlockerDepPriority())
251 # Count references to this blocker so that it can be
252 # invalidated after nodes referencing it have been
253 # merged.
254 - self.blocker_digraph.addnode(node, blocker)
255 + self.blocker_digraph.addnode(uninst_task, blocker)
256 if not unresolved_blocks and not depends_on_order:
257 self.blocker_parents[blocker].remove(parent)
258 if unresolved_blocks:
259 @@ -3395,14 +3457,22 @@
260 return -1
261 myblockers = self.blocker_digraph.copy()
262 retlist=[]
263 - circular_blocks = False
264 + # Contains any Uninstall tasks that have been ignored
265 + # in order to avoid the circular deps code path. These
266 + # correspond to blocker conflicts that could not be
267 + # resolved.
268 + ignored_uninstall_tasks = set()
269 blocker_deps = None
270 asap_nodes = []
271 portage_node = None
272 - if reversed:
273 - get_nodes = mygraph.root_nodes
274 - else:
275 - get_nodes = mygraph.leaf_nodes
276 + def get_nodes(**kwargs):
277 + """
278 + Returns leaf nodes excluding Uninstall instances
279 + since those should be executed as late as possible.
280 + """
281 + return [node for node in mygraph.leaf_nodes(**kwargs) \
282 + if not isinstance(node, Uninstall)]
283 + if True:
284 for node in mygraph.order:
285 if node.root == "/" and \
286 "sys-apps/portage" == portage.cpv_getkey(node.cpv):
287 @@ -3564,27 +3634,102 @@
288 selected_nodes = list(selected_nodes)
289 selected_nodes.sort(cmp_circular_bias)
290
291 - if not selected_nodes:
292 - if not myblockers.is_empty():
293 - """A blocker couldn't be circumnavigated while keeping all
294 - dependencies satisfied. The user will have to resolve this
295 - manually. This is a panic condition and thus the order
296 - doesn't really matter, so just pop a random node in order
297 - to avoid a circular dependency panic if possible."""
298 - if not circular_blocks:
299 - circular_blocks = True
300 - blocker_deps = myblockers.leaf_nodes()
301 - while blocker_deps:
302 - # Some of these nodes might have already been selected
303 - # by the normal node selection process after the
304 - # circular_blocks flag has been set. Therefore, we
305 - # have to verify that they're still in the graph so
306 - # that they're not selected more than once.
307 - node = blocker_deps.pop()
308 - if mygraph.contains(node):
309 - selected_nodes = [node]
310 + if not selected_nodes and not myblockers.is_empty():
311 + # An Uninstall task needs to be executed in order to
312 + # avoid conflict if possible.
313 +
314 + min_parent_deps = None
315 + uninst_task = None
316 + for task in myblockers.leaf_nodes():
317 + # Do some sanity checks so that system or world packages
318 + # don't get uninstalled inappropriately here (only really
319 + # necessary when --complete-graph has not been enabled).
320 +
321 + if task in ignored_uninstall_tasks:
322 + continue
323 +
324 + root_config = self.roots[task.root]
325 + inst_pkg = self._pkg_cache[
326 + ("installed", task.root, task.cpv, "nomerge")]
327 +
328 + # For packages in the system set, don't take
329 + # any chances. If the conflict can't be resolved
330 + # by a normal upgrade operation then require
331 + # user intervention.
332 + skip = False
333 + try:
334 + for atom in root_config.sets[
335 + "system"].iterAtomsForPackage(task):
336 + skip = True
337 break
338 + except portage_exception.InvalidDependString:
339 + skip = True
340 + if skip:
341 + continue
342
343 + # For packages in the world set, go ahead an uninstall
344 + # when necessary, as long as the atom will be satisfied
345 + # in the final state.
346 + graph_db = self.mydbapi[task.root]
347 + try:
348 + for atom in root_config.sets[
349 + "world"].iterAtomsForPackage(task):
350 + satisfied = False
351 + for cpv in graph_db.match(atom):
352 + if cpv == inst_pkg.cpv and \
353 + inst_pkg in graph_db:
354 + continue
355 + satisfied = True
356 + break
357 + if not satisfied:
358 + skip = True
359 + break
360 + except portage_exception.InvalidDependString:
361 + skip = True
362 + if skip:
363 + continue
364 +
365 + # Check the deps of parent nodes to ensure that
366 + # the chosen task produces a leaf node. Maybe
367 + # this can be optimized some more to make the
368 + # best possible choice, but the current algorithm
369 + # is simple and should be near optimal for most
370 + # common cases.
371 + parent_deps = set()
372 + for parent in mygraph.parent_nodes(task):
373 + parent_deps.update(mygraph.child_nodes(parent,
374 + ignore_priority=DepPriority.MEDIUM_SOFT))
375 + parent_deps.remove(task)
376 + if min_parent_deps is None or \
377 + len(parent_deps) < min_parent_deps:
378 + min_parent_deps = len(parent_deps)
379 + uninst_task = task
380 +
381 + if uninst_task is not None:
382 + selected_nodes = [uninst_task]
383 + else:
384 + # None of the Uninstall tasks are acceptable, so
385 + # the corresponding blockers are unresolvable.
386 + # We need to drop an Uninstall task here in order
387 + # to avoid the circular deps code path, but the
388 + # blocker will still be counted as an unresolved
389 + # conflict.
390 + for node in myblockers.leaf_nodes():
391 + try:
392 + mygraph.remove(node)
393 + except KeyError:
394 + pass
395 + else:
396 + ignored_uninstall_tasks.add(node)
397 + break
398 +
399 + # After dropping an Uninstall task, reset
400 + # the state variables for leaf node selection and
401 + # continue trying to select leaf nodes.
402 + prefer_asap = True
403 + accept_root_node = False
404 + continue
405 +
406 if not selected_nodes:
407 # No leaf nodes are available, so we have a circular
408 # dependency panic situation. Reduce the noise level to a
409 @@ -3630,21 +3775,43 @@
410 accept_root_node = False
411
412 for node in selected_nodes:
413 +
414 + # Handle interactions between blockers
415 + # and uninstallation tasks.
416 + uninst_task = None
417 + if isinstance(node, Uninstall):
418 + uninst_task = node
419 + else:
420 + vardb = self.trees[node.root]["vartree"].dbapi
421 + previous_cpv = vardb.match(node.slot_atom)
422 + if previous_cpv:
423 + # The package will be replaced by this one, so remove
424 + # the corresponding Uninstall task if necessary.
425 + previous_cpv = previous_cpv[0]
426 + uninst_task = \
427 + ("installed", node.root, previous_cpv, "uninstall")
428 + try:
429 + mygraph.remove(uninst_task)
430 + except KeyError:
431 + pass
432 + if uninst_task is not None and \
433 + uninst_task not in ignored_uninstall_tasks and \
434 + myblockers.contains(uninst_task):
435 + myblockers.remove(uninst_task)
436 + for blocker in myblockers.root_nodes():
437 + if myblockers.child_nodes(blocker):
438 + continue
439 + myblockers.remove(blocker)
440 + unresolved = \
441 + self._unresolved_blocker_parents.get(blocker)
442 + if unresolved:
443 + self.blocker_parents[blocker] = unresolved
444 + else:
445 + del self.blocker_parents[blocker]
446 +
447 if node[-1] != "nomerge":
448 retlist.append(list(node))
449 mygraph.remove(node)
450 - if not reversed and not circular_blocks and myblockers.contains(node):
451 - """This node may have invalidated one or more blockers."""
452 - myblockers.remove(node)
453 - for blocker in myblockers.root_nodes():
454 - if not myblockers.child_nodes(blocker):
455 - myblockers.remove(blocker)
456 - unresolved = \
457 - self._unresolved_blocker_parents.get(blocker)
458 - if unresolved:
459 - self.blocker_parents[blocker] = unresolved
460 - else:
461 - del self.blocker_parents[blocker]
462
463 if not reversed:
464 """Blocker validation does not work with reverse mode,
465 @@ -3873,8 +4040,12 @@
466 pkg = self._pkg_cache[tuple(x)]
467 metadata = pkg.metadata
468 pkg_status = x[3]
469 - pkg_merge = ordered and pkg_status != "nomerge"
470 - if pkg in self._slot_collision_nodes or pkg.onlydeps:
471 + pkg_merge = ordered and pkg_status == "merge"
472 + if not pkg_merge and pkg_status == "merge":
473 + pkg_status = "nomerge"
474 + if pkg_status == "uninstall":
475 + mydbapi = vardb
476 + elif pkg in self._slot_collision_nodes or pkg.onlydeps:
477 # The metadata isn't cached due to a slot collision or
478 # --onlydeps.
479 mydbapi = self.trees[myroot][self.pkg_tree_map[pkg_type]].dbapi
480 @@ -3898,7 +4069,7 @@
481 mydbapi.aux_get(pkg_key, ["RESTRICT"])[0]),
482 uselist=pkg_use))
483 except portage_exception.InvalidDependString, e:
484 - if pkg_status != "nomerge":
485 + if not pkg.installed:
486 restrict = mydbapi.aux_get(pkg_key, ["RESTRICT"])[0]
487 show_invalid_depstring_notice(x, restrict, str(e))
488 del e
489 @@ -3921,9 +4092,10 @@
490 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
491 if vardb.cpv_exists(pkg_key):
492 addl=" "+yellow("R")+fetch+" "
493 - if x[3] != "nomerge":
494 - if ordered:
495 - counters.reinst += 1
496 + if pkg_merge:
497 + counters.reinst += 1
498 + elif pkg_status == "uninstall":
499 + counters.uninst += 1
500 # filter out old-style virtual matches
501 elif installed_versions and \
502 portage.cpv_getkey(installed_versions[0]) == \
503 @@ -4112,7 +4284,7 @@
504
505 # now use the data to generate output
506 repoadd = None
507 - if pkg_status == "nomerge" or not has_previous:
508 + if pkg.installed or not has_previous:
509 repoadd = repo_display.repoStr(repo_path_real)
510 else:
511 repo_path_prev = None
512 @@ -4197,6 +4369,8 @@
513 return colorize("PKG_MERGE_WORLD", pkg_str)
514 else:
515 return colorize("PKG_MERGE", pkg_str)
516 + elif pkg_status == "uninstall":
517 + return colorize("PKG_UNINSTALL", pkg_str)
518 else:
519 if pkg_system:
520 return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
521 @@ -4215,7 +4389,14 @@
522 myprint=myprint+myoldbest
523 myprint=myprint+darkgreen("to "+x[1])
524 else:
525 - myprint="["+pkgprint(pkg_type)+" "+addl+"] "+indent+pkgprint(pkg_cp)
526 + if not pkg_merge:
527 + myprint = "[%s] %s%s" % \
528 + (pkgprint(pkg_status.ljust(13)),
529 + indent, pkgprint(pkg.cp))
530 + else:
531 + myprint = "[%s %s] %s%s" % \
532 + (pkgprint(pkg.type_name), addl,
533 + indent, pkgprint(pkg.cp))
534 if (newlp-nc_len(myprint)) > 0:
535 myprint=myprint+(" "*(newlp-nc_len(myprint)))
536 myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
537 @@ -4225,7 +4406,7 @@
538 myprint=myprint+darkgreen("to "+x[1])+" "+verboseadd
539 else:
540 if not pkg_merge:
541 - myprint = "[%s ] " % pkgprint("nomerge")
542 + myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
543 else:
544 myprint = "[" + pkg_type + " " + addl + "] "
545 myprint += indent + pkgprint(pkg_key) + " " + \
546 @@ -4238,7 +4419,14 @@
547 myprint=myprint+" "+green(xs[1]+xs[2])+" "
548 myprint=myprint+myoldbest
549 else:
550 - myprint="["+pkgprint(pkg_type)+" "+addl+"] "+indent+pkgprint(pkg_cp)
551 + if not pkg_merge:
552 + myprint = "[%s] %s%s" % \
553 + (pkgprint(pkg_status.ljust(13)),
554 + indent, pkgprint(pkg.cp))
555 + else:
556 + myprint = "[%s %s] %s%s" % \
557 + (pkgprint(pkg.type_name), addl,
558 + indent, pkgprint(pkg.cp))
559 if (newlp-nc_len(myprint)) > 0:
560 myprint=myprint+(" "*(newlp-nc_len(myprint)))
561 myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
562 @@ -4247,7 +4435,10 @@
563 myprint=myprint+myoldbest+" "+verboseadd
564 else:
565 if not pkg_merge:
566 - myprint="["+pkgprint("nomerge")+" ] "+indent+pkgprint(pkg_key)+" "+myoldbest+" "+verboseadd
567 + myprint = "[%s] %s%s %s %s" % \
568 + (pkgprint(pkg_status.ljust(13)),
569 + indent, pkgprint(pkg.cpv),
570 + myoldbest, verboseadd)
571 else:
572 myprint="["+pkgprint(pkg_type)+" "+addl+"] "+indent+pkgprint(pkg_key)+" "+myoldbest+" "+verboseadd
573 p.append(myprint)
574 @@ -4501,7 +4692,7 @@
575 pkg_type, myroot, pkg_key, action = x
576 if pkg_type not in self.pkg_tree_map:
577 continue
578 - if action != "merge":
579 + if action not in ("merge", "uninstall"):
580 continue
581 mydb = trees[myroot][self.pkg_tree_map[pkg_type]].dbapi
582 try:
583 @@ -4509,15 +4700,22 @@
584 mydb.aux_get(pkg_key, self._mydbapi_keys)))
585 except KeyError:
586 # It does no exist or it is corrupt.
587 + if action == "uninstall":
588 + continue
589 raise portage_exception.PackageNotFound(pkg_key)
590 if pkg_type == "ebuild":
591 pkgsettings = self.pkgsettings[myroot]
592 pkgsettings.setcpv(pkg_key, mydb=metadata)
593 metadata["USE"] = pkgsettings["PORTAGE_USE"]
594 - installed = False
595 + installed = action == "uninstall"
596 built = pkg_type != "ebuild"
597 - pkg = Package(built=built, cpv=pkg_key, installed=installed,
598 - metadata=metadata, root=myroot, type_name=pkg_type)
599 + if installed:
600 + pkg_constructor = Uninstall
601 + else:
602 + pkg_constructor = Package
603 + pkg = pkg_constructor(built=built, cpv=pkg_key,
604 + installed=installed, metadata=metadata,
605 + root=myroot, type_name=pkg_type)
606 self._pkg_cache[pkg] = pkg
607 fakedb[myroot].cpv_inject(pkg)
608 self.spinner.update()
609 @@ -4713,6 +4911,7 @@
610 self.new = 0
611 self.newslot = 0
612 self.reinst = 0
613 + self.uninst = 0
614 self.blocks = 0
615 self.totalsize = 0
616 self.restrict_fetch = 0
617 @@ -4745,6 +4944,10 @@
618 details.append("%s reinstall" % self.reinst)
619 if self.reinst > 1:
620 details[-1] += "s"
621 + if self.uninst > 0:
622 + details.append("%s uninstall" % self.uninst)
623 + if self.uninst > 1:
624 + details[-1] += "s"
625 if self.blocks > 0:
626 details.append("%s block" % self.blocks)
627 if self.blocks > 1:
628 @@ -4780,6 +4983,7 @@
629 portage.config(clone=trees["/"]["vartree"].settings)
630 self.curval = 0
631 self._spawned_pids = []
632 + self._uninstall_queue = []
633
634 def merge(self, mylist, favorites, mtimedb):
635 try:
636 @@ -4808,8 +5012,21 @@
637 pass
638 spawned_pids.remove(pid)
639
640 + def _dequeue_uninstall_tasks(self, mtimedb):
641 + if not self._uninstall_queue:
642 + return
643 + for uninst_task in self._uninstall_queue:
644 + root_config = self.trees[uninst_task.root]["root_config"]
645 + unmerge(root_config.settings, self.myopts,
646 + root_config.trees["vartree"], "unmerge",
647 + [uninst_task.cpv], mtimedb["ldpath"], clean_world=0)
648 + del mtimedb["resume"]["mergelist"][0]
649 + mtimedb.commit()
650 + del self._uninstall_queue[:]
651 +
652 def _merge(self, mylist, favorites, mtimedb):
653 failed_fetches = []
654 + buildpkgonly = "--buildpkgonly" in self.myopts
655 fetchonly = "--fetchonly" in self.myopts or \
656 "--fetch-all-uri" in self.myopts
657 pretend = "--pretend" in self.myopts
658 @@ -4913,18 +5130,23 @@
659 metadata_keys = [k for k in portage.auxdbkeys \
660 if not k.startswith("UNUSED_")] + ["USE"]
661
662 + task_list = mymergelist
663 + # Filter mymergelist so that all the len(mymergelist) calls
664 + # below (for display) do not count Uninstall instances.
665 + mymergelist = [x for x in mymergelist if x[-1] == "merge"]
666 mergecount=0
667 - for x in mymergelist:
668 + for x in task_list:
669 pkg_type = x[0]
670 if pkg_type == "blocks":
671 continue
672 - mergecount+=1
673 myroot=x[1]
674 pkg_key = x[2]
675 pkgindex=2
676 portdb = self.trees[myroot]["porttree"].dbapi
677 bindb = self.trees[myroot]["bintree"].dbapi
678 vartree = self.trees[myroot]["vartree"]
679 + vardb = vartree.dbapi
680 + root_config = self.trees[myroot]["root_config"]
681 pkgsettings = self.pkgsettings[myroot]
682 metadata = {}
683 if pkg_type == "blocks":
684 @@ -4938,15 +5160,27 @@
685 else:
686 if pkg_type == "binary":
687 mydbapi = bindb
688 + elif pkg_type == "installed":
689 + mydbapi = vardb
690 else:
691 raise AssertionError("Package type: '%s'" % pkg_type)
692 metadata.update(izip(metadata_keys,
693 mydbapi.aux_get(pkg_key, metadata_keys)))
694 built = pkg_type != "ebuild"
695 installed = pkg_type == "installed"
696 - pkg = Package(type_name=pkg_type, root=myroot,
697 + if installed:
698 + pkg_constructor = Uninstall
699 + else:
700 + pkg_constructor = Package
701 + mergecount += 1
702 + pkg = pkg_constructor(type_name=pkg_type, root=myroot,
703 cpv=pkg_key, built=built, installed=installed,
704 metadata=metadata)
705 + if pkg.installed:
706 + if not (buildpkgonly or fetchonly or pretend):
707 + self._uninstall_queue.append(pkg)
708 + continue
709 +
710 if x[0]=="blocks":
711 pkgindex=3
712 y = portdb.findname(pkg_key)
713 @@ -5041,6 +5275,7 @@
714 bintree = self.trees[myroot]["bintree"]
715 if bintree.populated:
716 bintree.inject(pkg_key)
717 + self._dequeue_uninstall_tasks(mtimedb)
718 if "--buildpkgonly" not in self.myopts:
719 msg = " === (%s of %s) Merging (%s::%s)" % \
720 (mergecount, len(mymergelist), pkg_key, y)
721 @@ -5066,12 +5301,22 @@
722 short_msg = "emerge: (%s of %s) %s Compile" % \
723 (mergecount, len(mymergelist), pkg_key)
724 emergelog(xterm_titles, msg, short_msg=short_msg)
725 - retval = portage.doebuild(y, "merge", myroot,
726 + retval = portage.doebuild(y, "install", myroot,
727 pkgsettings, self.edebug, vartree=vartree,
728 mydbapi=portdb, tree="porttree",
729 prev_mtimes=ldpath_mtimes)
730 if retval != os.EX_OK:
731 return retval
732 + self._dequeue_uninstall_tasks(mtimedb)
733 + retval = portage.merge(pkgsettings["CATEGORY"],
734 + pkgsettings["PF"], pkgsettings["D"],
735 + os.path.join(pkgsettings["PORTAGE_BUILDDIR"],
736 + "build-info"), myroot, pkgsettings,
737 + myebuild=pkgsettings["EBUILD"],
738 + mytree="porttree", mydbapi=portdb,
739 + vartree=vartree, prev_mtimes=ldpath_mtimes)
740 + if retval != os.EX_OK:
741 + return retval
742 finally:
743 if builddir_lock:
744 portage_locks.unlockdir(builddir_lock)
745 @@ -5091,6 +5336,7 @@
746 portage_locks.unlockdir(catdir_lock)
747
748 elif x[0]=="binary":
749 + self._dequeue_uninstall_tasks(mtimedb)
750 #merge the tbz2
751 mytbz2 = self.trees[myroot]["bintree"].getname(pkg_key)
752 if "--getbinpkg" in self.myopts:
753 @@ -5252,7 +5498,7 @@
754 return os.EX_OK
755
756 def unmerge(settings, myopts, vartree, unmerge_action, unmerge_files,
757 - ldpath_mtimes, autoclean=0):
758 + ldpath_mtimes, autoclean=0, clean_world=1):
759 candidate_catpkgs=[]
760 global_unmerge=0
761 xterm_titles = "notitles" not in settings.features
762 @@ -5566,7 +5812,8 @@
763 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
764 sys.exit(retval)
765 else:
766 - clean_world(vartree.dbapi, y)
767 + if clean_world:
768 + world_clean_package(vartree.dbapi, y)
769 emergelog(xterm_titles, " >>> unmerge success: "+y)
770 return 1
771
772
773 Modified: main/branches/2.1.2/doc/dependency_resolution/task_scheduling.docbook
774 ===================================================================
775 --- main/branches/2.1.2/doc/dependency_resolution/task_scheduling.docbook 2008-04-24 03:17:26 UTC (rev 9956)
776 +++ main/branches/2.1.2/doc/dependency_resolution/task_scheduling.docbook 2008-04-24 03:34:07 UTC (rev 9957)
777 @@ -12,11 +12,38 @@
778 <sect1 id='dependency-resolution-task-scheduling-conflict-avoidance'>
779 <title>Conflict Avoidance</title>
780 <para>
781 - In some cases it is possible to adjust package installation order
782 - to avoid having two conflicting packages installed simultaneously.
783 + Sometimes a package installation order exists such that it is
784 + possible to avoid having two conflicting packages installed
785 + simultaneously. If a currently installed package conflicts with a
786 + new package that is planned to be installed, it may be possible to
787 + solve the conflict by replacing the installed package with a
788 + different package that occupies the same slot.
789 </para>
790 <para>
791 - TODO: Automatically uninstall packages when necessary to avoid conflicts.
792 + In order to avoid a conflict, a package may need to be uninstalled
793 + in advance, rather than through replacement. The following constraints
794 + protect inappropriate packages from being chosen for automatic
795 + uninstallation:
796 + <itemizedlist>
797 + <listitem>
798 + Installed packages that have been pulled into the current dependency
799 + graph will not be uninstalled. Due to
800 + <link linkend='dependency-resolution-package-modeling-dependency-neglection'>
801 + dependency neglection</link>, other checks may be necessary in order
802 + to protect inappropriate packages from being uninstalled.
803 + </listitem>
804 + <listitem>
805 + An installed package that is matched by a dependency atom from the
806 + "system" set will not be uninstalled in advance since it might not
807 + be safe. Such a package will be uninstalled through replacement.
808 + </listitem>
809 + <listitem>
810 + An installed package that is matched by a dependency atom from the
811 + "world" set will not be uninstalled if the dependency graph does not
812 + contain a replacement package that is matched by the same dependency
813 + atom.
814 + </listitem>
815 + </itemizedlist>
816 </para>
817 </sect1>
818 <sect1 id='dependency-resolution-task-scheduling-circular-dependencies'>
819
820 Modified: main/branches/2.1.2/man/color.map.5
821 ===================================================================
822 --- main/branches/2.1.2/man/color.map.5 2008-04-24 03:17:26 UTC (rev 9956)
823 +++ main/branches/2.1.2/man/color.map.5 2008-04-24 03:34:07 UTC (rev 9957)
824 @@ -48,6 +48,10 @@
825 \fBPKG_NOMERGE_WORLD\fR = \fI"blue"\fR
826 Defines color used for world packages not planned to be merged.
827 .TP
828 +\fBPKG_UNINSTALL\fR = \fI"red"\fR
829 +Defines color used for packages planned to be uninstalled in order
830 +to resolve conflicts.
831 +.TP
832 \fBPROMPT_CHOICE_DEFAULT\fR = \fI"green"\fR
833 Defines color used for the default choice at a prompt.
834 .TP
835
836 Modified: main/branches/2.1.2/pym/output.py
837 ===================================================================
838 --- main/branches/2.1.2/pym/output.py 2008-04-24 03:17:26 UTC (rev 9956)
839 +++ main/branches/2.1.2/pym/output.py 2008-04-24 03:34:07 UTC (rev 9957)
840 @@ -145,6 +145,7 @@
841 codes["PKG_MERGE"] = codes["darkgreen"]
842 codes["PKG_MERGE_SYSTEM"] = codes["darkgreen"]
843 codes["PKG_MERGE_WORLD"] = codes["green"]
844 +codes["PKG_UNINSTALL"] = codes["red"]
845 codes["PKG_NOMERGE"] = codes["darkblue"]
846 codes["PKG_NOMERGE_SYSTEM"] = codes["darkblue"]
847 codes["PKG_NOMERGE_WORLD"] = codes["blue"]
848
849 --
850 gentoo-commits@l.g.o mailing list