Gentoo Archives: gentoo-commits

From: "Fabian Groffen (grobian)" <grobian@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] portage r13710 - main/branches/prefix/pym/_emerge
Date: Sat, 27 Jun 2009 14:07:49
Message-Id: E1MKYYr-0001xf-LA@stork.gentoo.org
1 Author: grobian
2 Date: 2009-06-27 14:07:14 +0000 (Sat, 27 Jun 2009)
3 New Revision: 13710
4
5 Added:
6 main/branches/prefix/pym/_emerge/Scheduler.py
7 main/branches/prefix/pym/_emerge/_find_deep_system_runtime_deps.py
8 main/branches/prefix/pym/_emerge/_flush_elog_mod_echo.py
9 main/branches/prefix/pym/_emerge/clear_caches.py
10 main/branches/prefix/pym/_emerge/create_depgraph_params.py
11 main/branches/prefix/pym/_emerge/create_world_atom.py
12 main/branches/prefix/pym/_emerge/depgraph.py
13 main/branches/prefix/pym/_emerge/is_valid_package_atom.py
14 Modified:
15 main/branches/prefix/pym/_emerge/__init__.py
16 Log:
17 Merged from trunk -r13671:13672
18
19 | 13672 | Bug #275047 - Split _emerge/__init__.py into smaller pieces |
20 | zmedico | (part 5). Thanks to Sebastian Mingramm (few) |
21 | | <s.mingramm@×××.de> for this patch. |
22
23
24 Copied: main/branches/prefix/pym/_emerge/Scheduler.py (from rev 13672, main/trunk/pym/_emerge/Scheduler.py)
25 ===================================================================
26 --- main/branches/prefix/pym/_emerge/Scheduler.py (rev 0)
27 +++ main/branches/prefix/pym/_emerge/Scheduler.py 2009-06-27 14:07:14 UTC (rev 13710)
28 @@ -0,0 +1,1643 @@
29 +import logging
30 +import os
31 +import sys
32 +import textwrap
33 +import time
34 +import weakref
35 +from itertools import izip
36 +
37 +# for an explanation on this logic, see pym/_emerge/__init__.py
38 +import os
39 +import sys
40 +if os.environ.__contains__("PORTAGE_PYTHONPATH"):
41 + sys.path.insert(0, os.environ["PORTAGE_PYTHONPATH"])
42 +else:
43 + sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "pym"))
44 +import portage
45 +
46 +from portage.cache.mappings import slot_dict_class
47 +from portage.elog.messages import eerror
48 +from portage.output import colorize, create_color_func, darkgreen, red
49 +bad = create_color_func("BAD")
50 +from portage.sets.base import InternalPackageSet
51 +from portage.util import writemsg, writemsg_level
52 +
53 +from _emerge.BinpkgPrefetcher import BinpkgPrefetcher
54 +from _emerge.Blocker import Blocker
55 +from _emerge.BlockerDB import BlockerDB
56 +from _emerge.clear_caches import clear_caches
57 +from _emerge.create_depgraph_params import create_depgraph_params
58 +from _emerge.create_world_atom import create_world_atom
59 +from _emerge.DepPriority import DepPriority
60 +from _emerge.EbuildFetcher import EbuildFetcher
61 +from _emerge.EbuildPhase import EbuildPhase
62 +from _emerge.emergelog import emergelog, _emerge_log_dir
63 +from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
64 +from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo
65 +from _emerge.JobStatusDisplay import JobStatusDisplay
66 +from _emerge.MergeListItem import MergeListItem
67 +from _emerge.Package import Package
68 +from _emerge.PackageMerge import PackageMerge
69 +from _emerge.PollScheduler import PollScheduler
70 +from _emerge.RootConfig import RootConfig
71 +from _emerge.SlotObject import SlotObject
72 +from _emerge.SequentialTaskQueue import SequentialTaskQueue
73 +from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
74 +
75 +import portage.proxy.lazyimport
76 +import portage.proxy as proxy
77 +proxy.lazyimport.lazyimport(globals(),
78 + '_emerge.depgraph:depgraph',
79 +)
80 +
81 +class Scheduler(PollScheduler):
82 +
83 + _opts_ignore_blockers = \
84 + frozenset(["--buildpkgonly",
85 + "--fetchonly", "--fetch-all-uri",
86 + "--nodeps", "--pretend"])
87 +
88 + _opts_no_background = \
89 + frozenset(["--pretend",
90 + "--fetchonly", "--fetch-all-uri"])
91 +
92 + _opts_no_restart = frozenset(["--buildpkgonly",
93 + "--fetchonly", "--fetch-all-uri", "--pretend"])
94 +
95 + _bad_resume_opts = set(["--ask", "--changelog",
96 + "--resume", "--skipfirst"])
97 +
98 + _fetch_log = os.path.join(_emerge_log_dir, 'emerge-fetch.log')
99 +
100 + class _iface_class(SlotObject):
101 + __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
102 + "dblinkElog", "dblinkEmergeLog", "fetch", "register", "schedule",
103 + "scheduleSetup", "scheduleUnpack", "scheduleYield",
104 + "unregister")
105 +
106 + class _fetch_iface_class(SlotObject):
107 + __slots__ = ("log_file", "schedule")
108 +
109 + _task_queues_class = slot_dict_class(
110 + ("merge", "jobs", "fetch", "unpack"), prefix="")
111 +
112 + class _build_opts_class(SlotObject):
113 + __slots__ = ("buildpkg", "buildpkgonly",
114 + "fetch_all_uri", "fetchonly", "pretend")
115 +
116 + class _binpkg_opts_class(SlotObject):
117 + __slots__ = ("fetchonly", "getbinpkg", "pretend")
118 +
119 + class _pkg_count_class(SlotObject):
120 + __slots__ = ("curval", "maxval")
121 +
122 + class _emerge_log_class(SlotObject):
123 + __slots__ = ("xterm_titles",)
124 +
125 + def log(self, *pargs, **kwargs):
126 + if not self.xterm_titles:
127 + # Avoid interference with the scheduler's status display.
128 + kwargs.pop("short_msg", None)
129 + emergelog(self.xterm_titles, *pargs, **kwargs)
130 +
131 + class _failed_pkg(SlotObject):
132 + __slots__ = ("build_dir", "build_log", "pkg", "returncode")
133 +
134 + class _ConfigPool(object):
135 + """Interface for a task to temporarily allocate a config
136 + instance from a pool. This allows a task to be constructed
137 + long before the config instance actually becomes needed, like
138 + when prefetchers are constructed for the whole merge list."""
139 + __slots__ = ("_root", "_allocate", "_deallocate")
140 + def __init__(self, root, allocate, deallocate):
141 + self._root = root
142 + self._allocate = allocate
143 + self._deallocate = deallocate
144 + def allocate(self):
145 + return self._allocate(self._root)
146 + def deallocate(self, settings):
147 + self._deallocate(settings)
148 +
149 + class _unknown_internal_error(portage.exception.PortageException):
150 + """
151 + Used internally to terminate scheduling. The specific reason for
152 + the failure should have been dumped to stderr.
153 + """
154 + def __init__(self, value=""):
155 + portage.exception.PortageException.__init__(self, value)
156 +
157 + def __init__(self, settings, trees, mtimedb, myopts,
158 + spinner, mergelist, favorites, digraph):
159 + PollScheduler.__init__(self)
160 + self.settings = settings
161 + self.target_root = settings["ROOT"]
162 + self.trees = trees
163 + self.myopts = myopts
164 + self._spinner = spinner
165 + self._mtimedb = mtimedb
166 + self._mergelist = mergelist
167 + self._favorites = favorites
168 + self._args_set = InternalPackageSet(favorites)
169 + self._build_opts = self._build_opts_class()
170 + for k in self._build_opts.__slots__:
171 + setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
172 + self._binpkg_opts = self._binpkg_opts_class()
173 + for k in self._binpkg_opts.__slots__:
174 + setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
175 +
176 + self.curval = 0
177 + self._logger = self._emerge_log_class()
178 + self._task_queues = self._task_queues_class()
179 + for k in self._task_queues.allowed_keys:
180 + setattr(self._task_queues, k,
181 + SequentialTaskQueue())
182 +
183 + # Holds merges that will wait to be executed when no builds are
184 + # executing. This is useful for system packages since dependencies
185 + # on system packages are frequently unspecified.
186 + self._merge_wait_queue = []
187 + # Holds merges that have been transfered from the merge_wait_queue to
188 + # the actual merge queue. They are removed from this list upon
189 + # completion. Other packages can start building only when this list is
190 + # empty.
191 + self._merge_wait_scheduled = []
192 +
193 + # Holds system packages and their deep runtime dependencies. Before
194 + # being merged, these packages go to merge_wait_queue, to be merged
195 + # when no other packages are building.
196 + self._deep_system_deps = set()
197 +
198 + # Holds packages to merge which will satisfy currently unsatisfied
199 + # deep runtime dependencies of system packages. If this is not empty
200 + # then no parallel builds will be spawned until it is empty. This
201 + # minimizes the possibility that a build will fail due to the system
202 + # being in a fragile state. For example, see bug #259954.
203 + self._unsatisfied_system_deps = set()
204 +
205 + self._status_display = JobStatusDisplay(
206 + xterm_titles=('notitles' not in settings.features))
207 + self._max_load = myopts.get("--load-average")
208 + max_jobs = myopts.get("--jobs")
209 + if max_jobs is None:
210 + max_jobs = 1
211 + self._set_max_jobs(max_jobs)
212 +
213 + # The root where the currently running
214 + # portage instance is installed.
215 + self._running_root = trees["/"]["root_config"]
216 + self.edebug = 0
217 + if settings.get("PORTAGE_DEBUG", "") == "1":
218 + self.edebug = 1
219 + self.pkgsettings = {}
220 + self._config_pool = {}
221 + self._blocker_db = {}
222 + for root in trees:
223 + self._config_pool[root] = []
224 + self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
225 +
226 + fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
227 + schedule=self._schedule_fetch)
228 + self._sched_iface = self._iface_class(
229 + dblinkEbuildPhase=self._dblink_ebuild_phase,
230 + dblinkDisplayMerge=self._dblink_display_merge,
231 + dblinkElog=self._dblink_elog,
232 + dblinkEmergeLog=self._dblink_emerge_log,
233 + fetch=fetch_iface, register=self._register,
234 + schedule=self._schedule_wait,
235 + scheduleSetup=self._schedule_setup,
236 + scheduleUnpack=self._schedule_unpack,
237 + scheduleYield=self._schedule_yield,
238 + unregister=self._unregister)
239 +
240 + self._prefetchers = weakref.WeakValueDictionary()
241 + self._pkg_queue = []
242 + self._completed_tasks = set()
243 +
244 + self._failed_pkgs = []
245 + self._failed_pkgs_all = []
246 + self._failed_pkgs_die_msgs = []
247 + self._post_mod_echo_msgs = []
248 + self._parallel_fetch = False
249 + merge_count = len([x for x in mergelist \
250 + if isinstance(x, Package) and x.operation == "merge"])
251 + self._pkg_count = self._pkg_count_class(
252 + curval=0, maxval=merge_count)
253 + self._status_display.maxval = self._pkg_count.maxval
254 +
255 + # The load average takes some time to respond when new
256 + # jobs are added, so we need to limit the rate of adding
257 + # new jobs.
258 + self._job_delay_max = 10
259 + self._job_delay_factor = 1.0
260 + self._job_delay_exp = 1.5
261 + self._previous_job_start_time = None
262 +
263 + self._set_digraph(digraph)
264 +
265 + # This is used to memoize the _choose_pkg() result when
266 + # no packages can be chosen until one of the existing
267 + # jobs completes.
268 + self._choose_pkg_return_early = False
269 +
270 + features = self.settings.features
271 + if "parallel-fetch" in features and \
272 + not ("--pretend" in self.myopts or \
273 + "--fetch-all-uri" in self.myopts or \
274 + "--fetchonly" in self.myopts):
275 + if "distlocks" not in features:
276 + portage.writemsg(red("!!!")+"\n", noiselevel=-1)
277 + portage.writemsg(red("!!!")+" parallel-fetching " + \
278 + "requires the distlocks feature enabled"+"\n",
279 + noiselevel=-1)
280 + portage.writemsg(red("!!!")+" you have it disabled, " + \
281 + "thus parallel-fetching is being disabled"+"\n",
282 + noiselevel=-1)
283 + portage.writemsg(red("!!!")+"\n", noiselevel=-1)
284 + elif len(mergelist) > 1:
285 + self._parallel_fetch = True
286 +
287 + if self._parallel_fetch:
288 + # clear out existing fetch log if it exists
289 + try:
290 + open(self._fetch_log, 'w')
291 + except EnvironmentError:
292 + pass
293 +
294 + self._running_portage = None
295 + portage_match = self._running_root.trees["vartree"].dbapi.match(
296 + portage.const.PORTAGE_PACKAGE_ATOM)
297 + if portage_match:
298 + cpv = portage_match.pop()
299 + self._running_portage = self._pkg(cpv, "installed",
300 + self._running_root, installed=True)
301 +
302 + def _poll(self, timeout=None):
303 + self._schedule()
304 + PollScheduler._poll(self, timeout=timeout)
305 +
306 + def _set_max_jobs(self, max_jobs):
307 + self._max_jobs = max_jobs
308 + self._task_queues.jobs.max_jobs = max_jobs
309 +
310 + def _background_mode(self):
311 + """
312 + Check if background mode is enabled and adjust states as necessary.
313 +
314 + @rtype: bool
315 + @returns: True if background mode is enabled, False otherwise.
316 + """
317 + background = (self._max_jobs is True or \
318 + self._max_jobs > 1 or "--quiet" in self.myopts) and \
319 + not bool(self._opts_no_background.intersection(self.myopts))
320 +
321 + if background:
322 + interactive_tasks = self._get_interactive_tasks()
323 + if interactive_tasks:
324 + background = False
325 + writemsg_level(">>> Sending package output to stdio due " + \
326 + "to interactive package(s):\n",
327 + level=logging.INFO, noiselevel=-1)
328 + msg = [""]
329 + for pkg in interactive_tasks:
330 + pkg_str = " " + colorize("INFORM", str(pkg.cpv))
331 + if pkg.root != "/":
332 + pkg_str += " for " + pkg.root
333 + msg.append(pkg_str)
334 + msg.append("")
335 + writemsg_level("".join("%s\n" % (l,) for l in msg),
336 + level=logging.INFO, noiselevel=-1)
337 + if self._max_jobs is True or self._max_jobs > 1:
338 + self._set_max_jobs(1)
339 + writemsg_level(">>> Setting --jobs=1 due " + \
340 + "to the above interactive package(s)\n",
341 + level=logging.INFO, noiselevel=-1)
342 +
343 + self._status_display.quiet = \
344 + not background or \
345 + ("--quiet" in self.myopts and \
346 + "--verbose" not in self.myopts)
347 +
348 + self._logger.xterm_titles = \
349 + "notitles" not in self.settings.features and \
350 + self._status_display.quiet
351 +
352 + return background
353 +
354 + def _get_interactive_tasks(self):
355 + from portage import flatten
356 + from portage.dep import use_reduce, paren_reduce
357 + interactive_tasks = []
358 + for task in self._mergelist:
359 + if not (isinstance(task, Package) and \
360 + task.operation == "merge"):
361 + continue
362 + try:
363 + properties = flatten(use_reduce(paren_reduce(
364 + task.metadata["PROPERTIES"]), uselist=task.use.enabled))
365 + except portage.exception.InvalidDependString, e:
366 + show_invalid_depstring_notice(task,
367 + task.metadata["PROPERTIES"], str(e))
368 + raise self._unknown_internal_error()
369 + if "interactive" in properties:
370 + interactive_tasks.append(task)
371 + return interactive_tasks
372 +
373 + def _set_digraph(self, digraph):
374 + if "--nodeps" in self.myopts or \
375 + (self._max_jobs is not True and self._max_jobs < 2):
376 + # save some memory
377 + self._digraph = None
378 + return
379 +
380 + self._digraph = digraph
381 + self._find_system_deps()
382 + self._prune_digraph()
383 + self._prevent_builddir_collisions()
384 +
385 + def _find_system_deps(self):
386 + """
387 + Find system packages and their deep runtime dependencies. Before being
388 + merged, these packages go to merge_wait_queue, to be merged when no
389 + other packages are building.
390 + """
391 + deep_system_deps = self._deep_system_deps
392 + deep_system_deps.clear()
393 + deep_system_deps.update(
394 + _find_deep_system_runtime_deps(self._digraph))
395 + deep_system_deps.difference_update([pkg for pkg in \
396 + deep_system_deps if pkg.operation != "merge"])
397 +
398 + def _prune_digraph(self):
399 + """
400 + Prune any root nodes that are irrelevant.
401 + """
402 +
403 + graph = self._digraph
404 + completed_tasks = self._completed_tasks
405 + removed_nodes = set()
406 + while True:
407 + for node in graph.root_nodes():
408 + if not isinstance(node, Package) or \
409 + (node.installed and node.operation == "nomerge") or \
410 + node.onlydeps or \
411 + node in completed_tasks:
412 + removed_nodes.add(node)
413 + if removed_nodes:
414 + graph.difference_update(removed_nodes)
415 + if not removed_nodes:
416 + break
417 + removed_nodes.clear()
418 +
419 + def _prevent_builddir_collisions(self):
420 + """
421 + When building stages, sometimes the same exact cpv needs to be merged
422 + to both $ROOTs. Add edges to the digraph in order to avoid collisions
423 + in the builddir. Currently, normal file locks would be inappropriate
424 + for this purpose since emerge holds all of it's build dir locks from
425 + the main process.
426 + """
427 + cpv_map = {}
428 + for pkg in self._mergelist:
429 + if not isinstance(pkg, Package):
430 + # a satisfied blocker
431 + continue
432 + if pkg.installed:
433 + continue
434 + if pkg.cpv not in cpv_map:
435 + cpv_map[pkg.cpv] = [pkg]
436 + continue
437 + for earlier_pkg in cpv_map[pkg.cpv]:
438 + self._digraph.add(earlier_pkg, pkg,
439 + priority=DepPriority(buildtime=True))
440 + cpv_map[pkg.cpv].append(pkg)
441 +
442 + class _pkg_failure(portage.exception.PortageException):
443 + """
444 + An instance of this class is raised by unmerge() when
445 + an uninstallation fails.
446 + """
447 + status = 1
448 + def __init__(self, *pargs):
449 + portage.exception.PortageException.__init__(self, pargs)
450 + if pargs:
451 + self.status = pargs[0]
452 +
453 + def _schedule_fetch(self, fetcher):
454 + """
455 + Schedule a fetcher on the fetch queue, in order to
456 + serialize access to the fetch log.
457 + """
458 + self._task_queues.fetch.addFront(fetcher)
459 +
460 + def _schedule_setup(self, setup_phase):
461 + """
462 + Schedule a setup phase on the merge queue, in order to
463 + serialize unsandboxed access to the live filesystem.
464 + """
465 + self._task_queues.merge.addFront(setup_phase)
466 + self._schedule()
467 +
468 + def _schedule_unpack(self, unpack_phase):
469 + """
470 + Schedule an unpack phase on the unpack queue, in order
471 + to serialize $DISTDIR access for live ebuilds.
472 + """
473 + self._task_queues.unpack.add(unpack_phase)
474 +
475 + def _find_blockers(self, new_pkg):
476 + """
477 + Returns a callable which should be called only when
478 + the vdb lock has been acquired.
479 + """
480 + def get_blockers():
481 + return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
482 + return get_blockers
483 +
484 + def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
485 + if self._opts_ignore_blockers.intersection(self.myopts):
486 + return None
487 +
488 + # Call gc.collect() here to avoid heap overflow that
489 + # triggers 'Cannot allocate memory' errors (reported
490 + # with python-2.5).
491 + import gc
492 + gc.collect()
493 +
494 + blocker_db = self._blocker_db[new_pkg.root]
495 +
496 + blocker_dblinks = []
497 + for blocking_pkg in blocker_db.findInstalledBlockers(
498 + new_pkg, acquire_lock=acquire_lock):
499 + if new_pkg.slot_atom == blocking_pkg.slot_atom:
500 + continue
501 + if new_pkg.cpv == blocking_pkg.cpv:
502 + continue
503 + blocker_dblinks.append(portage.dblink(
504 + blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
505 + self.pkgsettings[blocking_pkg.root], treetype="vartree",
506 + vartree=self.trees[blocking_pkg.root]["vartree"]))
507 +
508 + gc.collect()
509 +
510 + return blocker_dblinks
511 +
512 + def _dblink_pkg(self, pkg_dblink):
513 + cpv = pkg_dblink.mycpv
514 + type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
515 + root_config = self.trees[pkg_dblink.myroot]["root_config"]
516 + installed = type_name == "installed"
517 + return self._pkg(cpv, type_name, root_config, installed=installed)
518 +
519 + def _append_to_log_path(self, log_path, msg):
520 + f = open(log_path, 'a')
521 + try:
522 + f.write(msg)
523 + finally:
524 + f.close()
525 +
526 + def _dblink_elog(self, pkg_dblink, phase, func, msgs):
527 +
528 + log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
529 + log_file = None
530 + out = sys.stdout
531 + background = self._background
532 +
533 + if background and log_path is not None:
534 + log_file = open(log_path, 'a')
535 + out = log_file
536 +
537 + try:
538 + for msg in msgs:
539 + func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
540 + finally:
541 + if log_file is not None:
542 + log_file.close()
543 +
544 + def _dblink_emerge_log(self, msg):
545 + self._logger.log(msg)
546 +
547 + def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
548 + log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
549 + background = self._background
550 +
551 + if log_path is None:
552 + if not (background and level < logging.WARN):
553 + portage.util.writemsg_level(msg,
554 + level=level, noiselevel=noiselevel)
555 + else:
556 + if not background:
557 + portage.util.writemsg_level(msg,
558 + level=level, noiselevel=noiselevel)
559 + self._append_to_log_path(log_path, msg)
560 +
561 + def _dblink_ebuild_phase(self,
562 + pkg_dblink, pkg_dbapi, ebuild_path, phase):
563 + """
564 + Using this callback for merge phases allows the scheduler
565 + to run while these phases execute asynchronously, and allows
566 + the scheduler control output handling.
567 + """
568 +
569 + scheduler = self._sched_iface
570 + settings = pkg_dblink.settings
571 + pkg = self._dblink_pkg(pkg_dblink)
572 + background = self._background
573 + log_path = settings.get("PORTAGE_LOG_FILE")
574 +
575 + ebuild_phase = EbuildPhase(background=background,
576 + pkg=pkg, phase=phase, scheduler=scheduler,
577 + settings=settings, tree=pkg_dblink.treetype)
578 + ebuild_phase.start()
579 + ebuild_phase.wait()
580 +
581 + return ebuild_phase.returncode
582 +
583 + def _generate_digests(self):
584 + """
585 + Generate digests if necessary for --digests or FEATURES=digest.
586 + In order to avoid interference, this must done before parallel
587 + tasks are started.
588 + """
589 +
590 + if '--fetchonly' in self.myopts:
591 + return os.EX_OK
592 +
593 + digest = '--digest' in self.myopts
594 + if not digest:
595 + for pkgsettings in self.pkgsettings.itervalues():
596 + if 'digest' in pkgsettings.features:
597 + digest = True
598 + break
599 +
600 + if not digest:
601 + return os.EX_OK
602 +
603 + for x in self._mergelist:
604 + if not isinstance(x, Package) or \
605 + x.type_name != 'ebuild' or \
606 + x.operation != 'merge':
607 + continue
608 + pkgsettings = self.pkgsettings[x.root]
609 + if '--digest' not in self.myopts and \
610 + 'digest' not in pkgsettings.features:
611 + continue
612 + portdb = x.root_config.trees['porttree'].dbapi
613 + ebuild_path = portdb.findname(x.cpv)
614 + if not ebuild_path:
615 + writemsg_level(
616 + "!!! Could not locate ebuild for '%s'.\n" \
617 + % x.cpv, level=logging.ERROR, noiselevel=-1)
618 + return 1
619 + pkgsettings['O'] = os.path.dirname(ebuild_path)
620 + if not portage.digestgen([], pkgsettings, myportdb=portdb):
621 + writemsg_level(
622 + "!!! Unable to generate manifest for '%s'.\n" \
623 + % x.cpv, level=logging.ERROR, noiselevel=-1)
624 + return 1
625 +
626 + return os.EX_OK
627 +
628 + def _check_manifests(self):
629 + # Verify all the manifests now so that the user is notified of failure
630 + # as soon as possible.
631 + if "strict" not in self.settings.features or \
632 + "--fetchonly" in self.myopts or \
633 + "--fetch-all-uri" in self.myopts:
634 + return os.EX_OK
635 +
636 + shown_verifying_msg = False
637 + quiet_settings = {}
638 + for myroot, pkgsettings in self.pkgsettings.iteritems():
639 + quiet_config = portage.config(clone=pkgsettings)
640 + quiet_config["PORTAGE_QUIET"] = "1"
641 + quiet_config.backup_changes("PORTAGE_QUIET")
642 + quiet_settings[myroot] = quiet_config
643 + del quiet_config
644 +
645 + for x in self._mergelist:
646 + if not isinstance(x, Package) or \
647 + x.type_name != "ebuild":
648 + continue
649 +
650 + if not shown_verifying_msg:
651 + shown_verifying_msg = True
652 + self._status_msg("Verifying ebuild manifests")
653 +
654 + root_config = x.root_config
655 + portdb = root_config.trees["porttree"].dbapi
656 + quiet_config = quiet_settings[root_config.root]
657 + quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
658 + if not portage.digestcheck([], quiet_config, strict=True):
659 + return 1
660 +
661 + return os.EX_OK
662 +
663 + def _add_prefetchers(self):
664 +
665 + if not self._parallel_fetch:
666 + return
667 +
668 + if self._parallel_fetch:
669 + self._status_msg("Starting parallel fetch")
670 +
671 + prefetchers = self._prefetchers
672 + getbinpkg = "--getbinpkg" in self.myopts
673 +
674 + # In order to avoid "waiting for lock" messages
675 + # at the beginning, which annoy users, never
676 + # spawn a prefetcher for the first package.
677 + for pkg in self._mergelist[1:]:
678 + prefetcher = self._create_prefetcher(pkg)
679 + if prefetcher is not None:
680 + self._task_queues.fetch.add(prefetcher)
681 + prefetchers[pkg] = prefetcher
682 +
683 + def _create_prefetcher(self, pkg):
684 + """
685 + @return: a prefetcher, or None if not applicable
686 + """
687 + prefetcher = None
688 +
689 + if not isinstance(pkg, Package):
690 + pass
691 +
692 + elif pkg.type_name == "ebuild":
693 +
694 + prefetcher = EbuildFetcher(background=True,
695 + config_pool=self._ConfigPool(pkg.root,
696 + self._allocate_config, self._deallocate_config),
697 + fetchonly=1, logfile=self._fetch_log,
698 + pkg=pkg, prefetch=True, scheduler=self._sched_iface)
699 +
700 + elif pkg.type_name == "binary" and \
701 + "--getbinpkg" in self.myopts and \
702 + pkg.root_config.trees["bintree"].isremote(pkg.cpv):
703 +
704 + prefetcher = BinpkgPrefetcher(background=True,
705 + pkg=pkg, scheduler=self._sched_iface)
706 +
707 + return prefetcher
708 +
709 + def _is_restart_scheduled(self):
710 + """
711 + Check if the merge list contains a replacement
712 + for the current running instance, that will result
713 + in restart after merge.
714 + @rtype: bool
715 + @returns: True if a restart is scheduled, False otherwise.
716 + """
717 + if self._opts_no_restart.intersection(self.myopts):
718 + return False
719 +
720 + mergelist = self._mergelist
721 +
722 + for i, pkg in enumerate(mergelist):
723 + if self._is_restart_necessary(pkg) and \
724 + i != len(mergelist) - 1:
725 + return True
726 +
727 + return False
728 +
729 + def _is_restart_necessary(self, pkg):
730 + """
731 + @return: True if merging the given package
732 + requires restart, False otherwise.
733 + """
734 +
735 + # Figure out if we need a restart.
736 + if pkg.root == self._running_root.root and \
737 + portage.match_from_list(
738 + portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
739 + if self._running_portage:
740 + return pkg.cpv != self._running_portage.cpv
741 + return True
742 + return False
743 +
744 + def _restart_if_necessary(self, pkg):
745 + """
746 + Use execv() to restart emerge. This happens
747 + if portage upgrades itself and there are
748 + remaining packages in the list.
749 + """
750 +
751 + if self._opts_no_restart.intersection(self.myopts):
752 + return
753 +
754 + if not self._is_restart_necessary(pkg):
755 + return
756 +
757 + if pkg == self._mergelist[-1]:
758 + return
759 +
760 + self._main_loop_cleanup()
761 +
762 + logger = self._logger
763 + pkg_count = self._pkg_count
764 + mtimedb = self._mtimedb
765 + bad_resume_opts = self._bad_resume_opts
766 +
767 + logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
768 + (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
769 +
770 + logger.log(" *** RESTARTING " + \
771 + "emerge via exec() after change of " + \
772 + "portage version.")
773 +
774 + mtimedb["resume"]["mergelist"].remove(list(pkg))
775 + mtimedb.commit()
776 + portage.run_exitfuncs()
777 + mynewargv = [sys.argv[0], "--resume"]
778 + resume_opts = self.myopts.copy()
779 + # For automatic resume, we need to prevent
780 + # any of bad_resume_opts from leaking in
781 + # via EMERGE_DEFAULT_OPTS.
782 + resume_opts["--ignore-default-opts"] = True
783 + for myopt, myarg in resume_opts.iteritems():
784 + if myopt not in bad_resume_opts:
785 + if myarg is True:
786 + mynewargv.append(myopt)
787 + else:
788 + mynewargv.append(myopt +"="+ str(myarg))
789 + # priority only needs to be adjusted on the first run
790 + os.environ["PORTAGE_NICENESS"] = "0"
791 + os.execv(mynewargv[0], mynewargv)
792 +
793 + def merge(self):
794 +
795 + if "--resume" in self.myopts:
796 + # We're resuming.
797 + portage.writemsg_stdout(
798 + colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
799 + self._logger.log(" *** Resuming merge...")
800 +
801 + self._save_resume_list()
802 +
803 + try:
804 + self._background = self._background_mode()
805 + except self._unknown_internal_error:
806 + return 1
807 +
808 + for root in self.trees:
809 + root_config = self.trees[root]["root_config"]
810 +
811 + # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
812 + # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
813 + # for ensuring sane $PWD (bug #239560) and storing elog messages.
814 + tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
815 + if not tmpdir or not os.path.isdir(tmpdir):
816 + msg = "The directory specified in your " + \
817 + "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
818 + "does not exist. Please create this " + \
819 + "directory or correct your PORTAGE_TMPDIR setting."
820 + msg = textwrap.wrap(msg, 70)
821 + out = portage.output.EOutput()
822 + for l in msg:
823 + out.eerror(l)
824 + return 1
825 +
826 + if self._background:
827 + root_config.settings.unlock()
828 + root_config.settings["PORTAGE_BACKGROUND"] = "1"
829 + root_config.settings.backup_changes("PORTAGE_BACKGROUND")
830 + root_config.settings.lock()
831 +
832 + self.pkgsettings[root] = portage.config(
833 + clone=root_config.settings)
834 +
835 + rval = self._generate_digests()
836 + if rval != os.EX_OK:
837 + return rval
838 +
839 + rval = self._check_manifests()
840 + if rval != os.EX_OK:
841 + return rval
842 +
843 + keep_going = "--keep-going" in self.myopts
844 + fetchonly = self._build_opts.fetchonly
845 + mtimedb = self._mtimedb
846 + failed_pkgs = self._failed_pkgs
847 +
848 + while True:
849 + rval = self._merge()
850 + if rval == os.EX_OK or fetchonly or not keep_going:
851 + break
852 + if "resume" not in mtimedb:
853 + break
854 + mergelist = self._mtimedb["resume"].get("mergelist")
855 + if not mergelist:
856 + break
857 +
858 + if not failed_pkgs:
859 + break
860 +
861 + for failed_pkg in failed_pkgs:
862 + mergelist.remove(list(failed_pkg.pkg))
863 +
864 + self._failed_pkgs_all.extend(failed_pkgs)
865 + del failed_pkgs[:]
866 +
867 + if not mergelist:
868 + break
869 +
870 + if not self._calc_resume_list():
871 + break
872 +
873 + clear_caches(self.trees)
874 + if not self._mergelist:
875 + break
876 +
877 + self._save_resume_list()
878 + self._pkg_count.curval = 0
879 + self._pkg_count.maxval = len([x for x in self._mergelist \
880 + if isinstance(x, Package) and x.operation == "merge"])
881 + self._status_display.maxval = self._pkg_count.maxval
882 +
883 + self._logger.log(" *** Finished. Cleaning up...")
884 +
885 + if failed_pkgs:
886 + self._failed_pkgs_all.extend(failed_pkgs)
887 + del failed_pkgs[:]
888 +
889 + background = self._background
890 + failure_log_shown = False
891 + if background and len(self._failed_pkgs_all) == 1:
892 + # If only one package failed then just show it's
893 + # whole log for easy viewing.
894 + failed_pkg = self._failed_pkgs_all[-1]
895 + build_dir = failed_pkg.build_dir
896 + log_file = None
897 +
898 + log_paths = [failed_pkg.build_log]
899 +
900 + log_path = self._locate_failure_log(failed_pkg)
901 + if log_path is not None:
902 + try:
903 + log_file = open(log_path)
904 + except IOError:
905 + pass
906 +
907 + if log_file is not None:
908 + try:
909 + for line in log_file:
910 + writemsg_level(line, noiselevel=-1)
911 + finally:
912 + log_file.close()
913 + failure_log_shown = True
914 +
915 + # Dump mod_echo output now since it tends to flood the terminal.
916 + # This allows us to avoid having more important output, generated
917 + # later, from being swept away by the mod_echo output.
918 + mod_echo_output = _flush_elog_mod_echo()
919 +
920 + if background and not failure_log_shown and \
921 + self._failed_pkgs_all and \
922 + self._failed_pkgs_die_msgs and \
923 + not mod_echo_output:
924 +
925 + printer = portage.output.EOutput()
926 + for mysettings, key, logentries in self._failed_pkgs_die_msgs:
927 + root_msg = ""
928 + if mysettings["ROOT"] != "/":
929 + root_msg = " merged to %s" % mysettings["ROOT"]
930 + print
931 + printer.einfo("Error messages for package %s%s:" % \
932 + (colorize("INFORM", key), root_msg))
933 + print
934 + for phase in portage.const.EBUILD_PHASES:
935 + if phase not in logentries:
936 + continue
937 + for msgtype, msgcontent in logentries[phase]:
938 + if isinstance(msgcontent, basestring):
939 + msgcontent = [msgcontent]
940 + for line in msgcontent:
941 + printer.eerror(line.strip("\n"))
942 +
943 + if self._post_mod_echo_msgs:
944 + for msg in self._post_mod_echo_msgs:
945 + msg()
946 +
947 + if len(self._failed_pkgs_all) > 1 or \
948 + (self._failed_pkgs_all and "--keep-going" in self.myopts):
949 + if len(self._failed_pkgs_all) > 1:
950 + msg = "The following %d packages have " % \
951 + len(self._failed_pkgs_all) + \
952 + "failed to build or install:"
953 + else:
954 + msg = "The following package has " + \
955 + "failed to build or install:"
956 + prefix = bad(" * ")
957 + writemsg(prefix + "\n", noiselevel=-1)
958 + from textwrap import wrap
959 + for line in wrap(msg, 72):
960 + writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
961 + writemsg(prefix + "\n", noiselevel=-1)
962 + for failed_pkg in self._failed_pkgs_all:
963 + writemsg("%s\t%s\n" % (prefix,
964 + colorize("INFORM", str(failed_pkg.pkg))),
965 + noiselevel=-1)
966 + writemsg(prefix + "\n", noiselevel=-1)
967 +
968 + return rval
969 +
970 + def _elog_listener(self, mysettings, key, logentries, fulltext):
971 + errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
972 + if errors:
973 + self._failed_pkgs_die_msgs.append(
974 + (mysettings, key, errors))
975 +
976 + def _locate_failure_log(self, failed_pkg):
977 +
978 + build_dir = failed_pkg.build_dir
979 + log_file = None
980 +
981 + log_paths = [failed_pkg.build_log]
982 +
983 + for log_path in log_paths:
984 + if not log_path:
985 + continue
986 +
987 + try:
988 + log_size = os.stat(log_path).st_size
989 + except OSError:
990 + continue
991 +
992 + if log_size == 0:
993 + continue
994 +
995 + return log_path
996 +
997 + return None
998 +
999 + def _add_packages(self):
1000 + pkg_queue = self._pkg_queue
1001 + for pkg in self._mergelist:
1002 + if isinstance(pkg, Package):
1003 + pkg_queue.append(pkg)
1004 + elif isinstance(pkg, Blocker):
1005 + pass
1006 +
1007 + def _system_merge_started(self, merge):
1008 + """
1009 + Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
1010 + """
1011 + graph = self._digraph
1012 + if graph is None:
1013 + return
1014 + pkg = merge.merge.pkg
1015 +
1016 + # Skip this if $ROOT != / since it shouldn't matter if there
1017 + # are unsatisfied system runtime deps in this case.
1018 + if pkg.root != '/':
1019 + return
1020 +
1021 + completed_tasks = self._completed_tasks
1022 + unsatisfied = self._unsatisfied_system_deps
1023 +
1024 + def ignore_non_runtime_or_satisfied(priority):
1025 + """
1026 + Ignore non-runtime and satisfied runtime priorities.
1027 + """
1028 + if isinstance(priority, DepPriority) and \
1029 + not priority.satisfied and \
1030 + (priority.runtime or priority.runtime_post):
1031 + return False
1032 + return True
1033 +
1034 + # When checking for unsatisfied runtime deps, only check
1035 + # direct deps since indirect deps are checked when the
1036 + # corresponding parent is merged.
1037 + for child in graph.child_nodes(pkg,
1038 + ignore_priority=ignore_non_runtime_or_satisfied):
1039 + if not isinstance(child, Package) or \
1040 + child.operation == 'uninstall':
1041 + continue
1042 + if child is pkg:
1043 + continue
1044 + if child.operation == 'merge' and \
1045 + child not in completed_tasks:
1046 + unsatisfied.add(child)
1047 +
1048 + def _merge_wait_exit_handler(self, task):
1049 + self._merge_wait_scheduled.remove(task)
1050 + self._merge_exit(task)
1051 +
1052 + def _merge_exit(self, merge):
1053 + self._do_merge_exit(merge)
1054 + self._deallocate_config(merge.merge.settings)
1055 + if merge.returncode == os.EX_OK and \
1056 + not merge.merge.pkg.installed:
1057 + self._status_display.curval += 1
1058 + self._status_display.merges = len(self._task_queues.merge)
1059 + self._schedule()
1060 +
1061 + def _do_merge_exit(self, merge):
1062 + pkg = merge.merge.pkg
1063 + if merge.returncode != os.EX_OK:
1064 + settings = merge.merge.settings
1065 + build_dir = settings.get("PORTAGE_BUILDDIR")
1066 + build_log = settings.get("PORTAGE_LOG_FILE")
1067 +
1068 + self._failed_pkgs.append(self._failed_pkg(
1069 + build_dir=build_dir, build_log=build_log,
1070 + pkg=pkg,
1071 + returncode=merge.returncode))
1072 + self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
1073 +
1074 + self._status_display.failed = len(self._failed_pkgs)
1075 + return
1076 +
1077 + self._task_complete(pkg)
1078 + pkg_to_replace = merge.merge.pkg_to_replace
1079 + if pkg_to_replace is not None:
1080 + # When a package is replaced, mark it's uninstall
1081 + # task complete (if any).
1082 + uninst_hash_key = \
1083 + ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
1084 + self._task_complete(uninst_hash_key)
1085 +
1086 + if pkg.installed:
1087 + return
1088 +
1089 + self._restart_if_necessary(pkg)
1090 +
1091 + # Call mtimedb.commit() after each merge so that
1092 + # --resume still works after being interrupted
1093 + # by reboot, sigkill or similar.
1094 + mtimedb = self._mtimedb
1095 + mtimedb["resume"]["mergelist"].remove(list(pkg))
1096 + if not mtimedb["resume"]["mergelist"]:
1097 + del mtimedb["resume"]
1098 + mtimedb.commit()
1099 +
1100 + def _build_exit(self, build):
1101 + if build.returncode == os.EX_OK:
1102 + self.curval += 1
1103 + merge = PackageMerge(merge=build)
1104 + if not build.build_opts.buildpkgonly and \
1105 + build.pkg in self._deep_system_deps:
1106 + # Since dependencies on system packages are frequently
1107 + # unspecified, merge them only when no builds are executing.
1108 + self._merge_wait_queue.append(merge)
1109 + merge.addStartListener(self._system_merge_started)
1110 + else:
1111 + merge.addExitListener(self._merge_exit)
1112 + self._task_queues.merge.add(merge)
1113 + self._status_display.merges = len(self._task_queues.merge)
1114 + else:
1115 + settings = build.settings
1116 + build_dir = settings.get("PORTAGE_BUILDDIR")
1117 + build_log = settings.get("PORTAGE_LOG_FILE")
1118 +
1119 + self._failed_pkgs.append(self._failed_pkg(
1120 + build_dir=build_dir, build_log=build_log,
1121 + pkg=build.pkg,
1122 + returncode=build.returncode))
1123 + self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
1124 +
1125 + self._status_display.failed = len(self._failed_pkgs)
1126 + self._deallocate_config(build.settings)
1127 + self._jobs -= 1
1128 + self._status_display.running = self._jobs
1129 + self._schedule()
1130 +
1131 + def _extract_exit(self, build):
1132 + self._build_exit(build)
1133 +
1134 + def _task_complete(self, pkg):
1135 + self._completed_tasks.add(pkg)
1136 + self._unsatisfied_system_deps.discard(pkg)
1137 + self._choose_pkg_return_early = False
1138 +
1139 + def _merge(self):
1140 +
1141 + self._add_prefetchers()
1142 + self._add_packages()
1143 + pkg_queue = self._pkg_queue
1144 + failed_pkgs = self._failed_pkgs
1145 + portage.locks._quiet = self._background
1146 + portage.elog._emerge_elog_listener = self._elog_listener
1147 + rval = os.EX_OK
1148 +
1149 + try:
1150 + self._main_loop()
1151 + finally:
1152 + self._main_loop_cleanup()
1153 + portage.locks._quiet = False
1154 + portage.elog._emerge_elog_listener = None
1155 + if failed_pkgs:
1156 + rval = failed_pkgs[-1].returncode
1157 +
1158 + return rval
1159 +
1160 + def _main_loop_cleanup(self):
1161 + del self._pkg_queue[:]
1162 + self._completed_tasks.clear()
1163 + self._deep_system_deps.clear()
1164 + self._unsatisfied_system_deps.clear()
1165 + self._choose_pkg_return_early = False
1166 + self._status_display.reset()
1167 + self._digraph = None
1168 + self._task_queues.fetch.clear()
1169 +
1170 + def _choose_pkg(self):
1171 + """
1172 + Choose a task that has all it's dependencies satisfied.
1173 + """
1174 +
1175 + if self._choose_pkg_return_early:
1176 + return None
1177 +
1178 + if self._digraph is None:
1179 + if (self._jobs or self._task_queues.merge) and \
1180 + not ("--nodeps" in self.myopts and \
1181 + (self._max_jobs is True or self._max_jobs > 1)):
1182 + self._choose_pkg_return_early = True
1183 + return None
1184 + return self._pkg_queue.pop(0)
1185 +
1186 + if not (self._jobs or self._task_queues.merge):
1187 + return self._pkg_queue.pop(0)
1188 +
1189 + self._prune_digraph()
1190 +
1191 + chosen_pkg = None
1192 + later = set(self._pkg_queue)
1193 + for pkg in self._pkg_queue:
1194 + later.remove(pkg)
1195 + if not self._dependent_on_scheduled_merges(pkg, later):
1196 + chosen_pkg = pkg
1197 + break
1198 +
1199 + if chosen_pkg is not None:
1200 + self._pkg_queue.remove(chosen_pkg)
1201 +
1202 + if chosen_pkg is None:
1203 + # There's no point in searching for a package to
1204 + # choose until at least one of the existing jobs
1205 + # completes.
1206 + self._choose_pkg_return_early = True
1207 +
1208 + return chosen_pkg
1209 +
1210 + def _dependent_on_scheduled_merges(self, pkg, later):
1211 + """
1212 + Traverse the subgraph of the given packages deep dependencies
1213 + to see if it contains any scheduled merges.
1214 + @param pkg: a package to check dependencies for
1215 + @type pkg: Package
1216 + @param later: packages for which dependence should be ignored
1217 + since they will be merged later than pkg anyway and therefore
1218 + delaying the merge of pkg will not result in a more optimal
1219 + merge order
1220 + @type later: set
1221 + @rtype: bool
1222 + @returns: True if the package is dependent, False otherwise.
1223 + """
1224 +
1225 + graph = self._digraph
1226 + completed_tasks = self._completed_tasks
1227 +
1228 + dependent = False
1229 + traversed_nodes = set([pkg])
1230 + direct_deps = graph.child_nodes(pkg)
1231 + node_stack = direct_deps
1232 + direct_deps = frozenset(direct_deps)
1233 + while node_stack:
1234 + node = node_stack.pop()
1235 + if node in traversed_nodes:
1236 + continue
1237 + traversed_nodes.add(node)
1238 + if not ((node.installed and node.operation == "nomerge") or \
1239 + (node.operation == "uninstall" and \
1240 + node not in direct_deps) or \
1241 + node in completed_tasks or \
1242 + node in later):
1243 + dependent = True
1244 + break
1245 + node_stack.extend(graph.child_nodes(node))
1246 +
1247 + return dependent
1248 +
1249 + def _allocate_config(self, root):
1250 + """
1251 + Allocate a unique config instance for a task in order
1252 + to prevent interference between parallel tasks.
1253 + """
1254 + if self._config_pool[root]:
1255 + temp_settings = self._config_pool[root].pop()
1256 + else:
1257 + temp_settings = portage.config(clone=self.pkgsettings[root])
1258 + # Since config.setcpv() isn't guaranteed to call config.reset() due to
1259 + # performance reasons, call it here to make sure all settings from the
1260 + # previous package get flushed out (such as PORTAGE_LOG_FILE).
1261 + temp_settings.reload()
1262 + temp_settings.reset()
1263 + return temp_settings
1264 +
1265 + def _deallocate_config(self, settings):
1266 + self._config_pool[settings["ROOT"]].append(settings)
1267 +
1268 + def _main_loop(self):
1269 +
1270 + # Only allow 1 job max if a restart is scheduled
1271 + # due to portage update.
1272 + if self._is_restart_scheduled() or \
1273 + self._opts_no_background.intersection(self.myopts):
1274 + self._set_max_jobs(1)
1275 +
1276 + merge_queue = self._task_queues.merge
1277 +
1278 + while self._schedule():
1279 + if self._poll_event_handlers:
1280 + self._poll_loop()
1281 +
1282 + while True:
1283 + self._schedule()
1284 + if not (self._jobs or merge_queue):
1285 + break
1286 + if self._poll_event_handlers:
1287 + self._poll_loop()
1288 +
1289 + def _keep_scheduling(self):
1290 + return bool(self._pkg_queue and \
1291 + not (self._failed_pkgs and not self._build_opts.fetchonly))
1292 +
1293 + def _schedule_tasks(self):
1294 +
1295 + # When the number of jobs drops to zero, process all waiting merges.
1296 + if not self._jobs and self._merge_wait_queue:
1297 + for task in self._merge_wait_queue:
1298 + task.addExitListener(self._merge_wait_exit_handler)
1299 + self._task_queues.merge.add(task)
1300 + self._status_display.merges = len(self._task_queues.merge)
1301 + self._merge_wait_scheduled.extend(self._merge_wait_queue)
1302 + del self._merge_wait_queue[:]
1303 +
1304 + self._schedule_tasks_imp()
1305 + self._status_display.display()
1306 +
1307 + state_change = 0
1308 + for q in self._task_queues.values():
1309 + if q.schedule():
1310 + state_change += 1
1311 +
1312 + # Cancel prefetchers if they're the only reason
1313 + # the main poll loop is still running.
1314 + if self._failed_pkgs and not self._build_opts.fetchonly and \
1315 + not (self._jobs or self._task_queues.merge) and \
1316 + self._task_queues.fetch:
1317 + self._task_queues.fetch.clear()
1318 + state_change += 1
1319 +
1320 + if state_change:
1321 + self._schedule_tasks_imp()
1322 + self._status_display.display()
1323 +
1324 + return self._keep_scheduling()
1325 +
1326 + def _job_delay(self):
1327 + """
1328 + @rtype: bool
1329 + @returns: True if job scheduling should be delayed, False otherwise.
1330 + """
1331 +
1332 + if self._jobs and self._max_load is not None:
1333 +
1334 + current_time = time.time()
1335 +
1336 + delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
1337 + if delay > self._job_delay_max:
1338 + delay = self._job_delay_max
1339 + if (current_time - self._previous_job_start_time) < delay:
1340 + return True
1341 +
1342 + return False
1343 +
1344 + def _schedule_tasks_imp(self):
1345 + """
1346 + @rtype: bool
1347 + @returns: True if state changed, False otherwise.
1348 + """
1349 +
1350 + state_change = 0
1351 +
1352 + while True:
1353 +
1354 + if not self._keep_scheduling():
1355 + return bool(state_change)
1356 +
1357 + if self._choose_pkg_return_early or \
1358 + self._merge_wait_scheduled or \
1359 + (self._jobs and self._unsatisfied_system_deps) or \
1360 + not self._can_add_job() or \
1361 + self._job_delay():
1362 + return bool(state_change)
1363 +
1364 + pkg = self._choose_pkg()
1365 + if pkg is None:
1366 + return bool(state_change)
1367 +
1368 + state_change += 1
1369 +
1370 + if not pkg.installed:
1371 + self._pkg_count.curval += 1
1372 +
1373 + task = self._task(pkg)
1374 +
1375 + if pkg.installed:
1376 + merge = PackageMerge(merge=task)
1377 + merge.addExitListener(self._merge_exit)
1378 + self._task_queues.merge.add(merge)
1379 +
1380 + elif pkg.built:
1381 + self._jobs += 1
1382 + self._previous_job_start_time = time.time()
1383 + self._status_display.running = self._jobs
1384 + task.addExitListener(self._extract_exit)
1385 + self._task_queues.jobs.add(task)
1386 +
1387 + else:
1388 + self._jobs += 1
1389 + self._previous_job_start_time = time.time()
1390 + self._status_display.running = self._jobs
1391 + task.addExitListener(self._build_exit)
1392 + self._task_queues.jobs.add(task)
1393 +
1394 + return bool(state_change)
1395 +
1396 + def _task(self, pkg):
1397 +
1398 + pkg_to_replace = None
1399 + if pkg.operation != "uninstall":
1400 + vardb = pkg.root_config.trees["vartree"].dbapi
1401 + previous_cpv = vardb.match(pkg.slot_atom)
1402 + if previous_cpv:
1403 + previous_cpv = previous_cpv.pop()
1404 + pkg_to_replace = self._pkg(previous_cpv,
1405 + "installed", pkg.root_config, installed=True)
1406 +
1407 + task = MergeListItem(args_set=self._args_set,
1408 + background=self._background, binpkg_opts=self._binpkg_opts,
1409 + build_opts=self._build_opts,
1410 + config_pool=self._ConfigPool(pkg.root,
1411 + self._allocate_config, self._deallocate_config),
1412 + emerge_opts=self.myopts,
1413 + find_blockers=self._find_blockers(pkg), logger=self._logger,
1414 + mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
1415 + pkg_to_replace=pkg_to_replace,
1416 + prefetcher=self._prefetchers.get(pkg),
1417 + scheduler=self._sched_iface,
1418 + settings=self._allocate_config(pkg.root),
1419 + statusMessage=self._status_msg,
1420 + world_atom=self._world_atom)
1421 +
1422 + return task
1423 +
1424 + def _failed_pkg_msg(self, failed_pkg, action, preposition):
1425 + pkg = failed_pkg.pkg
1426 + msg = "%s to %s %s" % \
1427 + (bad("Failed"), action, colorize("INFORM", pkg.cpv))
1428 + if pkg.root != "/":
1429 + msg += " %s %s" % (preposition, pkg.root)
1430 +
1431 + log_path = self._locate_failure_log(failed_pkg)
1432 + if log_path is not None:
1433 + msg += ", Log file:"
1434 + self._status_msg(msg)
1435 +
1436 + if log_path is not None:
1437 + self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
1438 +
1439 + def _status_msg(self, msg):
1440 + """
1441 + Display a brief status message (no newlines) in the status display.
1442 + This is called by tasks to provide feedback to the user. This
1443 + delegates the resposibility of generating \r and \n control characters,
1444 + to guarantee that lines are created or erased when necessary and
1445 + appropriate.
1446 +
1447 + @type msg: str
1448 + @param msg: a brief status message (no newlines allowed)
1449 + """
1450 + if not self._background:
1451 + writemsg_level("\n")
1452 + self._status_display.displayMessage(msg)
1453 +
1454 + def _save_resume_list(self):
1455 + """
1456 + Do this before verifying the ebuild Manifests since it might
1457 + be possible for the user to use --resume --skipfirst get past
1458 + a non-essential package with a broken digest.
1459 + """
1460 + mtimedb = self._mtimedb
1461 + mtimedb["resume"]["mergelist"] = [list(x) \
1462 + for x in self._mergelist \
1463 + if isinstance(x, Package) and x.operation == "merge"]
1464 +
1465 + mtimedb.commit()
1466 +
1467 + def _calc_resume_list(self):
1468 + """
1469 + Use the current resume list to calculate a new one,
1470 + dropping any packages with unsatisfied deps.
1471 + @rtype: bool
1472 + @returns: True if successful, False otherwise.
1473 + """
1474 + print colorize("GOOD", "*** Resuming merge...")
1475 +
1476 + if self._show_list():
1477 + if "--tree" in self.myopts:
1478 + portage.writemsg_stdout("\n" + \
1479 + darkgreen("These are the packages that " + \
1480 + "would be merged, in reverse order:\n\n"))
1481 +
1482 + else:
1483 + portage.writemsg_stdout("\n" + \
1484 + darkgreen("These are the packages that " + \
1485 + "would be merged, in order:\n\n"))
1486 +
1487 + show_spinner = "--quiet" not in self.myopts and \
1488 + "--nodeps" not in self.myopts
1489 +
1490 + if show_spinner:
1491 + print "Calculating dependencies ",
1492 +
1493 + myparams = create_depgraph_params(self.myopts, None)
1494 + success = False
1495 + e = None
1496 + try:
1497 + success, mydepgraph, dropped_tasks = resume_depgraph(
1498 + self.settings, self.trees, self._mtimedb, self.myopts,
1499 + myparams, self._spinner)
1500 + except depgraph.UnsatisfiedResumeDep, exc:
1501 + # rename variable to avoid python-3.0 error:
1502 + # SyntaxError: can not delete variable 'e' referenced in nested
1503 + # scope
1504 + e = exc
1505 + mydepgraph = e.depgraph
1506 + dropped_tasks = set()
1507 +
1508 + if show_spinner:
1509 + print "\b\b... done!"
1510 +
1511 + if e is not None:
1512 + def unsatisfied_resume_dep_msg():
1513 + mydepgraph.display_problems()
1514 + out = portage.output.EOutput()
1515 + out.eerror("One or more packages are either masked or " + \
1516 + "have missing dependencies:")
1517 + out.eerror("")
1518 + indent = " "
1519 + show_parents = set()
1520 + for dep in e.value:
1521 + if dep.parent in show_parents:
1522 + continue
1523 + show_parents.add(dep.parent)
1524 + if dep.atom is None:
1525 + out.eerror(indent + "Masked package:")
1526 + out.eerror(2 * indent + str(dep.parent))
1527 + out.eerror("")
1528 + else:
1529 + out.eerror(indent + str(dep.atom) + " pulled in by:")
1530 + out.eerror(2 * indent + str(dep.parent))
1531 + out.eerror("")
1532 + msg = "The resume list contains packages " + \
1533 + "that are either masked or have " + \
1534 + "unsatisfied dependencies. " + \
1535 + "Please restart/continue " + \
1536 + "the operation manually, or use --skipfirst " + \
1537 + "to skip the first package in the list and " + \
1538 + "any other packages that may be " + \
1539 + "masked or have missing dependencies."
1540 + for line in textwrap.wrap(msg, 72):
1541 + out.eerror(line)
1542 + self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
1543 + return False
1544 +
1545 + if success and self._show_list():
1546 + mylist = mydepgraph.altlist()
1547 + if mylist:
1548 + if "--tree" in self.myopts:
1549 + mylist.reverse()
1550 + mydepgraph.display(mylist, favorites=self._favorites)
1551 +
1552 + if not success:
1553 + self._post_mod_echo_msgs.append(mydepgraph.display_problems)
1554 + return False
1555 + mydepgraph.display_problems()
1556 +
1557 + mylist = mydepgraph.altlist()
1558 + mydepgraph.break_refs(mylist)
1559 + mydepgraph.break_refs(dropped_tasks)
1560 + self._mergelist = mylist
1561 + self._set_digraph(mydepgraph.schedulerGraph())
1562 +
1563 + msg_width = 75
1564 + for task in dropped_tasks:
1565 + if not (isinstance(task, Package) and task.operation == "merge"):
1566 + continue
1567 + pkg = task
1568 + msg = "emerge --keep-going:" + \
1569 + " %s" % (pkg.cpv,)
1570 + if pkg.root != "/":
1571 + msg += " for %s" % (pkg.root,)
1572 + msg += " dropped due to unsatisfied dependency."
1573 + for line in textwrap.wrap(msg, msg_width):
1574 + eerror(line, phase="other", key=pkg.cpv)
1575 + settings = self.pkgsettings[pkg.root]
1576 + # Ensure that log collection from $T is disabled inside
1577 + # elog_process(), since any logs that might exist are
1578 + # not valid here.
1579 + settings.pop("T", None)
1580 + portage.elog.elog_process(pkg.cpv, settings)
1581 + self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
1582 +
1583 + return True
1584 +
1585 + def _show_list(self):
1586 + myopts = self.myopts
1587 + if "--quiet" not in myopts and \
1588 + ("--ask" in myopts or "--tree" in myopts or \
1589 + "--verbose" in myopts):
1590 + return True
1591 + return False
1592 +
1593 + def _world_atom(self, pkg):
1594 + """
1595 + Add the package to the world file, but only if
1596 + it's supposed to be added. Otherwise, do nothing.
1597 + """
1598 +
1599 + if set(("--buildpkgonly", "--fetchonly",
1600 + "--fetch-all-uri",
1601 + "--oneshot", "--onlydeps",
1602 + "--pretend")).intersection(self.myopts):
1603 + return
1604 +
1605 + if pkg.root != self.target_root:
1606 + return
1607 +
1608 + args_set = self._args_set
1609 + if not args_set.findAtomForPackage(pkg):
1610 + return
1611 +
1612 + logger = self._logger
1613 + pkg_count = self._pkg_count
1614 + root_config = pkg.root_config
1615 + world_set = root_config.sets["world"]
1616 + world_locked = False
1617 + if hasattr(world_set, "lock"):
1618 + world_set.lock()
1619 + world_locked = True
1620 +
1621 + try:
1622 + if hasattr(world_set, "load"):
1623 + world_set.load() # maybe it's changed on disk
1624 +
1625 + atom = create_world_atom(pkg, args_set, root_config)
1626 + if atom:
1627 + if hasattr(world_set, "add"):
1628 + self._status_msg(('Recording %s in "world" ' + \
1629 + 'favorites file...') % atom)
1630 + logger.log(" === (%s of %s) Updating world file (%s)" % \
1631 + (pkg_count.curval, pkg_count.maxval, pkg.cpv))
1632 + world_set.add(atom)
1633 + else:
1634 + writemsg_level('\n!!! Unable to record %s in "world"\n' % \
1635 + (atom,), level=logging.WARN, noiselevel=-1)
1636 + finally:
1637 + if world_locked:
1638 + world_set.unlock()
1639 +
1640 + def _pkg(self, cpv, type_name, root_config, installed=False):
1641 + """
1642 + Get a package instance from the cache, or create a new
1643 + one if necessary. Raises KeyError from aux_get if it
1644 + failures for some reason (package does not exist or is
1645 + corrupt).
1646 + """
1647 + operation = "merge"
1648 + if installed:
1649 + operation = "nomerge"
1650 +
1651 + if self._digraph is not None:
1652 + # Reuse existing instance when available.
1653 + pkg = self._digraph.get(
1654 + (type_name, root_config.root, cpv, operation))
1655 + if pkg is not None:
1656 + return pkg
1657 +
1658 + tree_type = depgraph.pkg_tree_map[type_name]
1659 + db = root_config.trees[tree_type].dbapi
1660 + db_keys = list(self.trees[root_config.root][
1661 + tree_type].dbapi._aux_cache_keys)
1662 + metadata = izip(db_keys, db.aux_get(cpv, db_keys))
1663 + pkg = Package(cpv=cpv, metadata=metadata,
1664 + root_config=root_config, installed=installed)
1665 + if type_name == "ebuild":
1666 + settings = self.pkgsettings[root_config.root]
1667 + settings.setcpv(pkg)
1668 + pkg.metadata["USE"] = settings["PORTAGE_USE"]
1669 + pkg.metadata['CHOST'] = settings.get('CHOST', '')
1670 +
1671 + return pkg
1672
1673 Modified: main/branches/prefix/pym/_emerge/__init__.py
1674 ===================================================================
1675 --- main/branches/prefix/pym/_emerge/__init__.py 2009-06-27 13:36:49 UTC (rev 13709)
1676 +++ main/branches/prefix/pym/_emerge/__init__.py 2009-06-27 14:07:14 UTC (rev 13710)
1677 @@ -9,8 +9,6 @@
1678 import signal
1679 import sys
1680 import textwrap
1681 -import weakref
1682 -import gc
1683 import os, stat
1684 import platform
1685
1686 @@ -35,13 +33,11 @@
1687
1688 from portage import digraph
1689 from portage.const import NEWS_LIB_PATH
1690 -from portage.cache.mappings import slot_dict_class
1691
1692 import _emerge.help
1693 import portage.xpak, commands, errno, re, socket, time
1694 -from portage.output import blue, bold, colorize, darkblue, darkgreen, green, \
1695 - nc_len, red, teal, turquoise, \
1696 - xtermTitleReset, yellow
1697 +from portage.output import blue, bold, colorize, darkgreen, \
1698 + red, xtermTitleReset, yellow
1699 from portage.output import create_color_func
1700 good = create_color_func("GOOD")
1701 bad = create_color_func("BAD")
1702 @@ -55,7 +51,6 @@
1703 from portage.cache.cache_errors import CacheError
1704 from portage.const import EPREFIX, BPREFIX, EPREFIX_LSTRIP
1705 from portage.data import secpass
1706 -from portage.elog.messages import eerror
1707 from portage.util import normalize_path as normpath
1708 from portage.util import cmp_sort_key, writemsg, writemsg_level
1709 from portage.sets import load_default_config, SETPREFIX
1710 @@ -63,47 +58,30 @@
1711
1712 from itertools import chain, izip
1713
1714 -from _emerge.SlotObject import SlotObject
1715 -from _emerge.DepPriority import DepPriority
1716 -from _emerge.BlockerDepPriority import BlockerDepPriority
1717 -from _emerge.UnmergeDepPriority import UnmergeDepPriority
1718 -from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
1719 +from _emerge.clear_caches import clear_caches
1720 +from _emerge.countdown import countdown
1721 +from _emerge.create_depgraph_params import create_depgraph_params
1722 +from _emerge.Dependency import Dependency
1723 +from _emerge.depgraph import depgraph, resume_depgraph
1724 from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
1725 +from _emerge.emergelog import emergelog
1726 +from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo
1727 +from _emerge.is_valid_package_atom import is_valid_package_atom
1728 +from _emerge.MetadataRegen import MetadataRegen
1729 from _emerge.Package import Package
1730 -from _emerge.Blocker import Blocker
1731 -from _emerge.BlockerDB import BlockerDB
1732 -from _emerge.EbuildFetcher import EbuildFetcher
1733 -from _emerge.EbuildPhase import EbuildPhase
1734 -from _emerge.BinpkgPrefetcher import BinpkgPrefetcher
1735 -from _emerge.PackageMerge import PackageMerge
1736 -from _emerge.DependencyArg import DependencyArg
1737 -from _emerge.AtomArg import AtomArg
1738 -from _emerge.PackageArg import PackageArg
1739 -from _emerge.SetArg import SetArg
1740 -from _emerge.Dependency import Dependency
1741 -from _emerge.BlockerCache import BlockerCache
1742 -from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
1743 -from _emerge.RepoDisplay import RepoDisplay
1744 -from _emerge.UseFlagDisplay import UseFlagDisplay
1745 -from _emerge.SequentialTaskQueue import SequentialTaskQueue
1746 from _emerge.ProgressHandler import ProgressHandler
1747 -from _emerge.stdout_spinner import stdout_spinner
1748 -from _emerge.JobStatusDisplay import JobStatusDisplay
1749 -from _emerge.PollScheduler import PollScheduler
1750 -from _emerge.search import search
1751 -from _emerge.visible import visible
1752 -from _emerge.emergelog import emergelog, _emerge_log_dir
1753 -from _emerge.userquery import userquery
1754 -from _emerge.countdown import countdown
1755 -from _emerge.unmerge import unmerge
1756 -from _emerge.MergeListItem import MergeListItem
1757 -from _emerge.MetadataRegen import MetadataRegen
1758 from _emerge.RootConfig import RootConfig
1759 -from _emerge.format_size import format_size
1760 -from _emerge.PackageCounters import PackageCounters
1761 -from _emerge.FakeVartree import FakeVartree
1762 +from _emerge.Scheduler import Scheduler
1763 +from _emerge.search import search
1764 +from _emerge.SetArg import SetArg
1765 from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
1766 +from _emerge.stdout_spinner import stdout_spinner
1767 +from _emerge.unmerge import unmerge
1768 +from _emerge.UnmergeDepPriority import UnmergeDepPriority
1769 +from _emerge.UseFlagDisplay import UseFlagDisplay
1770 +from _emerge.userquery import userquery
1771
1772 +
1773 actions = frozenset([
1774 "clean", "config", "depclean",
1775 "info", "list-sets", "metadata",
1776 @@ -221,6559 +199,6 @@
1777
1778 return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
1779
1780 -def create_depgraph_params(myopts, myaction):
1781 - #configure emerge engine parameters
1782 - #
1783 - # self: include _this_ package regardless of if it is merged.
1784 - # selective: exclude the package if it is merged
1785 - # recurse: go into the dependencies
1786 - # deep: go into the dependencies of already merged packages
1787 - # empty: pretend nothing is merged
1788 - # complete: completely account for all known dependencies
1789 - # remove: build graph for use in removing packages
1790 - myparams = set(["recurse"])
1791 -
1792 - if myaction == "remove":
1793 - myparams.add("remove")
1794 - myparams.add("complete")
1795 - return myparams
1796 -
1797 - if "--update" in myopts or \
1798 - "--newuse" in myopts or \
1799 - "--reinstall" in myopts or \
1800 - "--noreplace" in myopts:
1801 - myparams.add("selective")
1802 - if "--emptytree" in myopts:
1803 - myparams.add("empty")
1804 - myparams.discard("selective")
1805 - if "--nodeps" in myopts:
1806 - myparams.discard("recurse")
1807 - if "--deep" in myopts:
1808 - myparams.add("deep")
1809 - if "--complete-graph" in myopts:
1810 - myparams.add("complete")
1811 - return myparams
1812 -
1813 -def create_world_atom(pkg, args_set, root_config):
1814 - """Create a new atom for the world file if one does not exist. If the
1815 - argument atom is precise enough to identify a specific slot then a slot
1816 - atom will be returned. Atoms that are in the system set may also be stored
1817 - in world since system atoms can only match one slot while world atoms can
1818 - be greedy with respect to slots. Unslotted system packages will not be
1819 - stored in world."""
1820 -
1821 - arg_atom = args_set.findAtomForPackage(pkg)
1822 - if not arg_atom:
1823 - return None
1824 - cp = portage.dep_getkey(arg_atom)
1825 - new_world_atom = cp
1826 - sets = root_config.sets
1827 - portdb = root_config.trees["porttree"].dbapi
1828 - vardb = root_config.trees["vartree"].dbapi
1829 - available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
1830 - for cpv in portdb.match(cp))
1831 - slotted = len(available_slots) > 1 or \
1832 - (len(available_slots) == 1 and "0" not in available_slots)
1833 - if not slotted:
1834 - # check the vdb in case this is multislot
1835 - available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
1836 - for cpv in vardb.match(cp))
1837 - slotted = len(available_slots) > 1 or \
1838 - (len(available_slots) == 1 and "0" not in available_slots)
1839 - if slotted and arg_atom != cp:
1840 - # If the user gave a specific atom, store it as a
1841 - # slot atom in the world file.
1842 - slot_atom = pkg.slot_atom
1843 -
1844 - # For USE=multislot, there are a couple of cases to
1845 - # handle here:
1846 - #
1847 - # 1) SLOT="0", but the real SLOT spontaneously changed to some
1848 - # unknown value, so just record an unslotted atom.
1849 - #
1850 - # 2) SLOT comes from an installed package and there is no
1851 - # matching SLOT in the portage tree.
1852 - #
1853 - # Make sure that the slot atom is available in either the
1854 - # portdb or the vardb, since otherwise the user certainly
1855 - # doesn't want the SLOT atom recorded in the world file
1856 - # (case 1 above). If it's only available in the vardb,
1857 - # the user may be trying to prevent a USE=multislot
1858 - # package from being removed by --depclean (case 2 above).
1859 -
1860 - mydb = portdb
1861 - if not portdb.match(slot_atom):
1862 - # SLOT seems to come from an installed multislot package
1863 - mydb = vardb
1864 - # If there is no installed package matching the SLOT atom,
1865 - # it probably changed SLOT spontaneously due to USE=multislot,
1866 - # so just record an unslotted atom.
1867 - if vardb.match(slot_atom):
1868 - # Now verify that the argument is precise
1869 - # enough to identify a specific slot.
1870 - matches = mydb.match(arg_atom)
1871 - matched_slots = set()
1872 - for cpv in matches:
1873 - matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
1874 - if len(matched_slots) == 1:
1875 - new_world_atom = slot_atom
1876 -
1877 - if new_world_atom == sets["world"].findAtomForPackage(pkg):
1878 - # Both atoms would be identical, so there's nothing to add.
1879 - return None
1880 - if not slotted:
1881 - # Unlike world atoms, system atoms are not greedy for slots, so they
1882 - # can't be safely excluded from world if they are slotted.
1883 - system_atom = sets["system"].findAtomForPackage(pkg)
1884 - if system_atom:
1885 - if not portage.dep_getkey(system_atom).startswith("virtual/"):
1886 - return None
1887 - # System virtuals aren't safe to exclude from world since they can
1888 - # match multiple old-style virtuals but only one of them will be
1889 - # pulled in by update or depclean.
1890 - providers = portdb.mysettings.getvirtuals().get(
1891 - portage.dep_getkey(system_atom))
1892 - if providers and len(providers) == 1 and providers[0] == cp:
1893 - return None
1894 - return new_world_atom
1895 -
1896 -def filter_iuse_defaults(iuse):
1897 - for flag in iuse:
1898 - if flag.startswith("+") or flag.startswith("-"):
1899 - yield flag[1:]
1900 - else:
1901 - yield flag
1902 -
1903 -def _find_deep_system_runtime_deps(graph):
1904 - deep_system_deps = set()
1905 - node_stack = []
1906 - for node in graph:
1907 - if not isinstance(node, Package) or \
1908 - node.operation == 'uninstall':
1909 - continue
1910 - if node.root_config.sets['system'].findAtomForPackage(node):
1911 - node_stack.append(node)
1912 -
1913 - def ignore_priority(priority):
1914 - """
1915 - Ignore non-runtime priorities.
1916 - """
1917 - if isinstance(priority, DepPriority) and \
1918 - (priority.runtime or priority.runtime_post):
1919 - return False
1920 - return True
1921 -
1922 - while node_stack:
1923 - node = node_stack.pop()
1924 - if node in deep_system_deps:
1925 - continue
1926 - deep_system_deps.add(node)
1927 - for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1928 - if not isinstance(child, Package) or \
1929 - child.operation == 'uninstall':
1930 - continue
1931 - node_stack.append(child)
1932 -
1933 - return deep_system_deps
1934 -
1935 -def get_masking_status(pkg, pkgsettings, root_config):
1936 -
1937 - mreasons = portage.getmaskingstatus(
1938 - pkg, settings=pkgsettings,
1939 - portdb=root_config.trees["porttree"].dbapi)
1940 -
1941 - if not pkg.installed:
1942 - if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1943 - mreasons.append("CHOST: %s" % \
1944 - pkg.metadata["CHOST"])
1945 -
1946 - if pkg.built and not pkg.installed:
1947 - if not "EPREFIX" in pkg.metadata or not pkg.metadata["EPREFIX"]:
1948 - mreasons.append("missing EPREFIX")
1949 - elif len(pkg.metadata["EPREFIX"].strip()) < len(pkgsettings["EPREFIX"]):
1950 - mreasons.append("EPREFIX: '%s' too small" % pkg.metadata["EPREFIX"])
1951 -
1952 - if not pkg.metadata["SLOT"]:
1953 - mreasons.append("invalid: SLOT is undefined")
1954 -
1955 - return mreasons
1956 -
1957 -def get_mask_info(root_config, cpv, pkgsettings,
1958 - db, pkg_type, built, installed, db_keys):
1959 - eapi_masked = False
1960 - try:
1961 - metadata = dict(izip(db_keys,
1962 - db.aux_get(cpv, db_keys)))
1963 - except KeyError:
1964 - metadata = None
1965 - if metadata and not built:
1966 - pkgsettings.setcpv(cpv, mydb=metadata)
1967 - metadata["USE"] = pkgsettings["PORTAGE_USE"]
1968 - metadata['CHOST'] = pkgsettings.get('CHOST', '')
1969 - if metadata is None:
1970 - mreasons = ["corruption"]
1971 - else:
1972 - eapi = metadata['EAPI']
1973 - if eapi[:1] == '-':
1974 - eapi = eapi[1:]
1975 - if not portage.eapi_is_supported(eapi):
1976 - mreasons = ['EAPI %s' % eapi]
1977 - else:
1978 - pkg = Package(type_name=pkg_type, root_config=root_config,
1979 - cpv=cpv, built=built, installed=installed, metadata=metadata)
1980 - mreasons = get_masking_status(pkg, pkgsettings, root_config)
1981 - return metadata, mreasons
1982 -
1983 -def show_masked_packages(masked_packages):
1984 - shown_licenses = set()
1985 - shown_comments = set()
1986 - # Maybe there is both an ebuild and a binary. Only
1987 - # show one of them to avoid redundant appearance.
1988 - shown_cpvs = set()
1989 - have_eapi_mask = False
1990 - for (root_config, pkgsettings, cpv,
1991 - metadata, mreasons) in masked_packages:
1992 - if cpv in shown_cpvs:
1993 - continue
1994 - shown_cpvs.add(cpv)
1995 - comment, filename = None, None
1996 - if "package.mask" in mreasons:
1997 - comment, filename = \
1998 - portage.getmaskingreason(
1999 - cpv, metadata=metadata,
2000 - settings=pkgsettings,
2001 - portdb=root_config.trees["porttree"].dbapi,
2002 - return_location=True)
2003 - missing_licenses = []
2004 - if metadata:
2005 - if not portage.eapi_is_supported(metadata["EAPI"]):
2006 - have_eapi_mask = True
2007 - try:
2008 - missing_licenses = \
2009 - pkgsettings._getMissingLicenses(
2010 - cpv, metadata)
2011 - except portage.exception.InvalidDependString:
2012 - # This will have already been reported
2013 - # above via mreasons.
2014 - pass
2015 -
2016 - print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
2017 - if comment and comment not in shown_comments:
2018 - print filename+":"
2019 - print comment
2020 - shown_comments.add(comment)
2021 - portdb = root_config.trees["porttree"].dbapi
2022 - for l in missing_licenses:
2023 - l_path = portdb.findLicensePath(l)
2024 - if l in shown_licenses:
2025 - continue
2026 - msg = ("A copy of the '%s' license" + \
2027 - " is located at '%s'.") % (l, l_path)
2028 - print msg
2029 - print
2030 - shown_licenses.add(l)
2031 - return have_eapi_mask
2032 -
2033 -class depgraph(object):
2034 -
2035 - pkg_tree_map = RootConfig.pkg_tree_map
2036 -
2037 - _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
2038 -
2039 - def __init__(self, settings, trees, myopts, myparams, spinner):
2040 - self.settings = settings
2041 - self.target_root = settings["ROOT"]
2042 - self.myopts = myopts
2043 - self.myparams = myparams
2044 - self.edebug = 0
2045 - if settings.get("PORTAGE_DEBUG", "") == "1":
2046 - self.edebug = 1
2047 - self.spinner = spinner
2048 - self._running_root = trees["/"]["root_config"]
2049 - self._opts_no_restart = Scheduler._opts_no_restart
2050 - self.pkgsettings = {}
2051 - # Maps slot atom to package for each Package added to the graph.
2052 - self._slot_pkg_map = {}
2053 - # Maps nodes to the reasons they were selected for reinstallation.
2054 - self._reinstall_nodes = {}
2055 - self.mydbapi = {}
2056 - self.trees = {}
2057 - self._trees_orig = trees
2058 - self.roots = {}
2059 - # Contains a filtered view of preferred packages that are selected
2060 - # from available repositories.
2061 - self._filtered_trees = {}
2062 - # Contains installed packages and new packages that have been added
2063 - # to the graph.
2064 - self._graph_trees = {}
2065 - # All Package instances
2066 - self._pkg_cache = {}
2067 - for myroot in trees:
2068 - self.trees[myroot] = {}
2069 - # Create a RootConfig instance that references
2070 - # the FakeVartree instead of the real one.
2071 - self.roots[myroot] = RootConfig(
2072 - trees[myroot]["vartree"].settings,
2073 - self.trees[myroot],
2074 - trees[myroot]["root_config"].setconfig)
2075 - for tree in ("porttree", "bintree"):
2076 - self.trees[myroot][tree] = trees[myroot][tree]
2077 - self.trees[myroot]["vartree"] = \
2078 - FakeVartree(trees[myroot]["root_config"],
2079 - pkg_cache=self._pkg_cache)
2080 - self.pkgsettings[myroot] = portage.config(
2081 - clone=self.trees[myroot]["vartree"].settings)
2082 - self._slot_pkg_map[myroot] = {}
2083 - vardb = self.trees[myroot]["vartree"].dbapi
2084 - preload_installed_pkgs = "--nodeps" not in self.myopts and \
2085 - "--buildpkgonly" not in self.myopts
2086 - # This fakedbapi instance will model the state that the vdb will
2087 - # have after new packages have been installed.
2088 - fakedb = PackageVirtualDbapi(vardb.settings)
2089 - if preload_installed_pkgs:
2090 - for pkg in vardb:
2091 - self.spinner.update()
2092 - # This triggers metadata updates via FakeVartree.
2093 - vardb.aux_get(pkg.cpv, [])
2094 - fakedb.cpv_inject(pkg)
2095 -
2096 - # Now that the vardb state is cached in our FakeVartree,
2097 - # we won't be needing the real vartree cache for awhile.
2098 - # To make some room on the heap, clear the vardbapi
2099 - # caches.
2100 - trees[myroot]["vartree"].dbapi._clear_cache()
2101 - gc.collect()
2102 -
2103 - self.mydbapi[myroot] = fakedb
2104 - def graph_tree():
2105 - pass
2106 - graph_tree.dbapi = fakedb
2107 - self._graph_trees[myroot] = {}
2108 - self._filtered_trees[myroot] = {}
2109 - # Substitute the graph tree for the vartree in dep_check() since we
2110 - # want atom selections to be consistent with package selections
2111 - # have already been made.
2112 - self._graph_trees[myroot]["porttree"] = graph_tree
2113 - self._graph_trees[myroot]["vartree"] = graph_tree
2114 - def filtered_tree():
2115 - pass
2116 - filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
2117 - self._filtered_trees[myroot]["porttree"] = filtered_tree
2118 -
2119 - # Passing in graph_tree as the vartree here could lead to better
2120 - # atom selections in some cases by causing atoms for packages that
2121 - # have been added to the graph to be preferred over other choices.
2122 - # However, it can trigger atom selections that result in
2123 - # unresolvable direct circular dependencies. For example, this
2124 - # happens with gwydion-dylan which depends on either itself or
2125 - # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
2126 - # gwydion-dylan-bin needs to be selected in order to avoid a
2127 - # an unresolvable direct circular dependency.
2128 - #
2129 - # To solve the problem described above, pass in "graph_db" so that
2130 - # packages that have been added to the graph are distinguishable
2131 - # from other available packages and installed packages. Also, pass
2132 - # the parent package into self._select_atoms() calls so that
2133 - # unresolvable direct circular dependencies can be detected and
2134 - # avoided when possible.
2135 - self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
2136 - self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
2137 -
2138 - dbs = []
2139 - portdb = self.trees[myroot]["porttree"].dbapi
2140 - bindb = self.trees[myroot]["bintree"].dbapi
2141 - vardb = self.trees[myroot]["vartree"].dbapi
2142 - # (db, pkg_type, built, installed, db_keys)
2143 - if "--usepkgonly" not in self.myopts:
2144 - db_keys = list(portdb._aux_cache_keys)
2145 - dbs.append((portdb, "ebuild", False, False, db_keys))
2146 - if "--usepkg" in self.myopts:
2147 - db_keys = list(bindb._aux_cache_keys)
2148 - dbs.append((bindb, "binary", True, False, db_keys))
2149 - db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
2150 - dbs.append((vardb, "installed", True, True, db_keys))
2151 - self._filtered_trees[myroot]["dbs"] = dbs
2152 - if "--usepkg" in self.myopts:
2153 - self.trees[myroot]["bintree"].populate(
2154 - "--getbinpkg" in self.myopts,
2155 - "--getbinpkgonly" in self.myopts)
2156 - del trees
2157 -
2158 - self.digraph=portage.digraph()
2159 - # contains all sets added to the graph
2160 - self._sets = {}
2161 - # contains atoms given as arguments
2162 - self._sets["args"] = InternalPackageSet()
2163 - # contains all atoms from all sets added to the graph, including
2164 - # atoms given as arguments
2165 - self._set_atoms = InternalPackageSet()
2166 - self._atom_arg_map = {}
2167 - # contains all nodes pulled in by self._set_atoms
2168 - self._set_nodes = set()
2169 - # Contains only Blocker -> Uninstall edges
2170 - self._blocker_uninstalls = digraph()
2171 - # Contains only Package -> Blocker edges
2172 - self._blocker_parents = digraph()
2173 - # Contains only irrelevant Package -> Blocker edges
2174 - self._irrelevant_blockers = digraph()
2175 - # Contains only unsolvable Package -> Blocker edges
2176 - self._unsolvable_blockers = digraph()
2177 - # Contains all Blocker -> Blocked Package edges
2178 - self._blocked_pkgs = digraph()
2179 - # Contains world packages that have been protected from
2180 - # uninstallation but may not have been added to the graph
2181 - # if the graph is not complete yet.
2182 - self._blocked_world_pkgs = {}
2183 - self._slot_collision_info = {}
2184 - # Slot collision nodes are not allowed to block other packages since
2185 - # blocker validation is only able to account for one package per slot.
2186 - self._slot_collision_nodes = set()
2187 - self._parent_atoms = {}
2188 - self._slot_conflict_parent_atoms = set()
2189 - self._serialized_tasks_cache = None
2190 - self._scheduler_graph = None
2191 - self._displayed_list = None
2192 - self._pprovided_args = []
2193 - self._missing_args = []
2194 - self._masked_installed = set()
2195 - self._unsatisfied_deps_for_display = []
2196 - self._unsatisfied_blockers_for_display = None
2197 - self._circular_deps_for_display = None
2198 - self._dep_stack = []
2199 - self._dep_disjunctive_stack = []
2200 - self._unsatisfied_deps = []
2201 - self._initially_unsatisfied_deps = []
2202 - self._ignored_deps = []
2203 - self._required_set_names = set(["system", "world"])
2204 - self._select_atoms = self._select_atoms_highest_available
2205 - self._select_package = self._select_pkg_highest_available
2206 - self._highest_pkg_cache = {}
2207 -
2208 - def _show_slot_collision_notice(self):
2209 - """Show an informational message advising the user to mask one of the
2210 - the packages. In some cases it may be possible to resolve this
2211 - automatically, but support for backtracking (removal nodes that have
2212 - already been selected) will be required in order to handle all possible
2213 - cases.
2214 - """
2215 -
2216 - if not self._slot_collision_info:
2217 - return
2218 -
2219 - self._show_merge_list()
2220 -
2221 - msg = []
2222 - msg.append("\n!!! Multiple package instances within a single " + \
2223 - "package slot have been pulled\n")
2224 - msg.append("!!! into the dependency graph, resulting" + \
2225 - " in a slot conflict:\n\n")
2226 - indent = " "
2227 - # Max number of parents shown, to avoid flooding the display.
2228 - max_parents = 3
2229 - explanation_columns = 70
2230 - explanations = 0
2231 - for (slot_atom, root), slot_nodes \
2232 - in self._slot_collision_info.iteritems():
2233 - msg.append(str(slot_atom))
2234 - msg.append("\n\n")
2235 -
2236 - for node in slot_nodes:
2237 - msg.append(indent)
2238 - msg.append(str(node))
2239 - parent_atoms = self._parent_atoms.get(node)
2240 - if parent_atoms:
2241 - pruned_list = set()
2242 - # Prefer conflict atoms over others.
2243 - for parent_atom in parent_atoms:
2244 - if len(pruned_list) >= max_parents:
2245 - break
2246 - if parent_atom in self._slot_conflict_parent_atoms:
2247 - pruned_list.add(parent_atom)
2248 -
2249 - # If this package was pulled in by conflict atoms then
2250 - # show those alone since those are the most interesting.
2251 - if not pruned_list:
2252 - # When generating the pruned list, prefer instances
2253 - # of DependencyArg over instances of Package.
2254 - for parent_atom in parent_atoms:
2255 - if len(pruned_list) >= max_parents:
2256 - break
2257 - parent, atom = parent_atom
2258 - if isinstance(parent, DependencyArg):
2259 - pruned_list.add(parent_atom)
2260 - # Prefer Packages instances that themselves have been
2261 - # pulled into collision slots.
2262 - for parent_atom in parent_atoms:
2263 - if len(pruned_list) >= max_parents:
2264 - break
2265 - parent, atom = parent_atom
2266 - if isinstance(parent, Package) and \
2267 - (parent.slot_atom, parent.root) \
2268 - in self._slot_collision_info:
2269 - pruned_list.add(parent_atom)
2270 - for parent_atom in parent_atoms:
2271 - if len(pruned_list) >= max_parents:
2272 - break
2273 - pruned_list.add(parent_atom)
2274 - omitted_parents = len(parent_atoms) - len(pruned_list)
2275 - parent_atoms = pruned_list
2276 - msg.append(" pulled in by\n")
2277 - for parent_atom in parent_atoms:
2278 - parent, atom = parent_atom
2279 - msg.append(2*indent)
2280 - if isinstance(parent,
2281 - (PackageArg, AtomArg)):
2282 - # For PackageArg and AtomArg types, it's
2283 - # redundant to display the atom attribute.
2284 - msg.append(str(parent))
2285 - else:
2286 - # Display the specific atom from SetArg or
2287 - # Package types.
2288 - msg.append("%s required by %s" % (atom, parent))
2289 - msg.append("\n")
2290 - if omitted_parents:
2291 - msg.append(2*indent)
2292 - msg.append("(and %d more)\n" % omitted_parents)
2293 - else:
2294 - msg.append(" (no parents)\n")
2295 - msg.append("\n")
2296 - explanation = self._slot_conflict_explanation(slot_nodes)
2297 - if explanation:
2298 - explanations += 1
2299 - msg.append(indent + "Explanation:\n\n")
2300 - for line in textwrap.wrap(explanation, explanation_columns):
2301 - msg.append(2*indent + line + "\n")
2302 - msg.append("\n")
2303 - msg.append("\n")
2304 - sys.stderr.write("".join(msg))
2305 - sys.stderr.flush()
2306 -
2307 - explanations_for_all = explanations == len(self._slot_collision_info)
2308 -
2309 - if explanations_for_all or "--quiet" in self.myopts:
2310 - return
2311 -
2312 - msg = []
2313 - msg.append("It may be possible to solve this problem ")
2314 - msg.append("by using package.mask to prevent one of ")
2315 - msg.append("those packages from being selected. ")
2316 - msg.append("However, it is also possible that conflicting ")
2317 - msg.append("dependencies exist such that they are impossible to ")
2318 - msg.append("satisfy simultaneously. If such a conflict exists in ")
2319 - msg.append("the dependencies of two different packages, then those ")
2320 - msg.append("packages can not be installed simultaneously.")
2321 -
2322 - from formatter import AbstractFormatter, DumbWriter
2323 - f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
2324 - for x in msg:
2325 - f.add_flowing_data(x)
2326 - f.end_paragraph(1)
2327 -
2328 - msg = []
2329 - msg.append("For more information, see MASKED PACKAGES ")
2330 - msg.append("section in the emerge man page or refer ")
2331 - msg.append("to the Gentoo Handbook.")
2332 - for x in msg:
2333 - f.add_flowing_data(x)
2334 - f.end_paragraph(1)
2335 - f.writer.flush()
2336 -
2337 - def _slot_conflict_explanation(self, slot_nodes):
2338 - """
2339 - When a slot conflict occurs due to USE deps, there are a few
2340 - different cases to consider:
2341 -
2342 - 1) New USE are correctly set but --newuse wasn't requested so an
2343 - installed package with incorrect USE happened to get pulled
2344 - into graph before the new one.
2345 -
2346 - 2) New USE are incorrectly set but an installed package has correct
2347 - USE so it got pulled into the graph, and a new instance also got
2348 - pulled in due to --newuse or an upgrade.
2349 -
2350 - 3) Multiple USE deps exist that can't be satisfied simultaneously,
2351 - and multiple package instances got pulled into the same slot to
2352 - satisfy the conflicting deps.
2353 -
2354 - Currently, explanations and suggested courses of action are generated
2355 - for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
2356 - """
2357 -
2358 - if len(slot_nodes) != 2:
2359 - # Suggestions are only implemented for
2360 - # conflicts between two packages.
2361 - return None
2362 -
2363 - all_conflict_atoms = self._slot_conflict_parent_atoms
2364 - matched_node = None
2365 - matched_atoms = None
2366 - unmatched_node = None
2367 - for node in slot_nodes:
2368 - parent_atoms = self._parent_atoms.get(node)
2369 - if not parent_atoms:
2370 - # Normally, there are always parent atoms. If there are
2371 - # none then something unexpected is happening and there's
2372 - # currently no suggestion for this case.
2373 - return None
2374 - conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
2375 - for parent_atom in conflict_atoms:
2376 - parent, atom = parent_atom
2377 - if not atom.use:
2378 - # Suggestions are currently only implemented for cases
2379 - # in which all conflict atoms have USE deps.
2380 - return None
2381 - if conflict_atoms:
2382 - if matched_node is not None:
2383 - # If conflict atoms match multiple nodes
2384 - # then there's no suggestion.
2385 - return None
2386 - matched_node = node
2387 - matched_atoms = conflict_atoms
2388 - else:
2389 - if unmatched_node is not None:
2390 - # Neither node is matched by conflict atoms, and
2391 - # there is no suggestion for this case.
2392 - return None
2393 - unmatched_node = node
2394 -
2395 - if matched_node is None or unmatched_node is None:
2396 - # This shouldn't happen.
2397 - return None
2398 -
2399 - if unmatched_node.installed and not matched_node.installed and \
2400 - unmatched_node.cpv == matched_node.cpv:
2401 - # If the conflicting packages are the same version then
2402 - # --newuse should be all that's needed. If they are different
2403 - # versions then there's some other problem.
2404 - return "New USE are correctly set, but --newuse wasn't" + \
2405 - " requested, so an installed package with incorrect USE " + \
2406 - "happened to get pulled into the dependency graph. " + \
2407 - "In order to solve " + \
2408 - "this, either specify the --newuse option or explicitly " + \
2409 - " reinstall '%s'." % matched_node.slot_atom
2410 -
2411 - if matched_node.installed and not unmatched_node.installed:
2412 - atoms = sorted(set(atom for parent, atom in matched_atoms))
2413 - explanation = ("New USE for '%s' are incorrectly set. " + \
2414 - "In order to solve this, adjust USE to satisfy '%s'") % \
2415 - (matched_node.slot_atom, atoms[0])
2416 - if len(atoms) > 1:
2417 - for atom in atoms[1:-1]:
2418 - explanation += ", '%s'" % (atom,)
2419 - if len(atoms) > 2:
2420 - explanation += ","
2421 - explanation += " and '%s'" % (atoms[-1],)
2422 - explanation += "."
2423 - return explanation
2424 -
2425 - return None
2426 -
2427 - def _process_slot_conflicts(self):
2428 - """
2429 - Process slot conflict data to identify specific atoms which
2430 - lead to conflict. These atoms only match a subset of the
2431 - packages that have been pulled into a given slot.
2432 - """
2433 - for (slot_atom, root), slot_nodes \
2434 - in self._slot_collision_info.iteritems():
2435 -
2436 - all_parent_atoms = set()
2437 - for pkg in slot_nodes:
2438 - parent_atoms = self._parent_atoms.get(pkg)
2439 - if not parent_atoms:
2440 - continue
2441 - all_parent_atoms.update(parent_atoms)
2442 -
2443 - for pkg in slot_nodes:
2444 - parent_atoms = self._parent_atoms.get(pkg)
2445 - if parent_atoms is None:
2446 - parent_atoms = set()
2447 - self._parent_atoms[pkg] = parent_atoms
2448 - for parent_atom in all_parent_atoms:
2449 - if parent_atom in parent_atoms:
2450 - continue
2451 - # Use package set for matching since it will match via
2452 - # PROVIDE when necessary, while match_from_list does not.
2453 - parent, atom = parent_atom
2454 - atom_set = InternalPackageSet(
2455 - initial_atoms=(atom,))
2456 - if atom_set.findAtomForPackage(pkg):
2457 - parent_atoms.add(parent_atom)
2458 - else:
2459 - self._slot_conflict_parent_atoms.add(parent_atom)
2460 -
2461 - def _reinstall_for_flags(self, forced_flags,
2462 - orig_use, orig_iuse, cur_use, cur_iuse):
2463 - """Return a set of flags that trigger reinstallation, or None if there
2464 - are no such flags."""
2465 - if "--newuse" in self.myopts:
2466 - flags = set(orig_iuse.symmetric_difference(
2467 - cur_iuse).difference(forced_flags))
2468 - flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
2469 - cur_iuse.intersection(cur_use)))
2470 - if flags:
2471 - return flags
2472 - elif "changed-use" == self.myopts.get("--reinstall"):
2473 - flags = orig_iuse.intersection(orig_use).symmetric_difference(
2474 - cur_iuse.intersection(cur_use))
2475 - if flags:
2476 - return flags
2477 - return None
2478 -
2479 - def _create_graph(self, allow_unsatisfied=False):
2480 - dep_stack = self._dep_stack
2481 - dep_disjunctive_stack = self._dep_disjunctive_stack
2482 - while dep_stack or dep_disjunctive_stack:
2483 - self.spinner.update()
2484 - while dep_stack:
2485 - dep = dep_stack.pop()
2486 - if isinstance(dep, Package):
2487 - if not self._add_pkg_deps(dep,
2488 - allow_unsatisfied=allow_unsatisfied):
2489 - return 0
2490 - continue
2491 - if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
2492 - return 0
2493 - if dep_disjunctive_stack:
2494 - if not self._pop_disjunction(allow_unsatisfied):
2495 - return 0
2496 - return 1
2497 -
2498 - def _add_dep(self, dep, allow_unsatisfied=False):
2499 - debug = "--debug" in self.myopts
2500 - buildpkgonly = "--buildpkgonly" in self.myopts
2501 - nodeps = "--nodeps" in self.myopts
2502 - empty = "empty" in self.myparams
2503 - deep = "deep" in self.myparams
2504 - update = "--update" in self.myopts and dep.depth <= 1
2505 - if dep.blocker:
2506 - if not buildpkgonly and \
2507 - not nodeps and \
2508 - dep.parent not in self._slot_collision_nodes:
2509 - if dep.parent.onlydeps:
2510 - # It's safe to ignore blockers if the
2511 - # parent is an --onlydeps node.
2512 - return 1
2513 - # The blocker applies to the root where
2514 - # the parent is or will be installed.
2515 - blocker = Blocker(atom=dep.atom,
2516 - eapi=dep.parent.metadata["EAPI"],
2517 - root=dep.parent.root)
2518 - self._blocker_parents.add(blocker, dep.parent)
2519 - return 1
2520 - dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
2521 - onlydeps=dep.onlydeps)
2522 - if not dep_pkg:
2523 - if dep.priority.optional:
2524 - # This could be an unecessary build-time dep
2525 - # pulled in by --with-bdeps=y.
2526 - return 1
2527 - if allow_unsatisfied:
2528 - self._unsatisfied_deps.append(dep)
2529 - return 1
2530 - self._unsatisfied_deps_for_display.append(
2531 - ((dep.root, dep.atom), {"myparent":dep.parent}))
2532 - return 0
2533 - # In some cases, dep_check will return deps that shouldn't
2534 - # be proccessed any further, so they are identified and
2535 - # discarded here. Try to discard as few as possible since
2536 - # discarded dependencies reduce the amount of information
2537 - # available for optimization of merge order.
2538 - if dep.priority.satisfied and \
2539 - not dep_pkg.installed and \
2540 - not (existing_node or empty or deep or update):
2541 - myarg = None
2542 - if dep.root == self.target_root:
2543 - try:
2544 - myarg = self._iter_atoms_for_pkg(dep_pkg).next()
2545 - except StopIteration:
2546 - pass
2547 - except portage.exception.InvalidDependString:
2548 - if not dep_pkg.installed:
2549 - # This shouldn't happen since the package
2550 - # should have been masked.
2551 - raise
2552 - if not myarg:
2553 - self._ignored_deps.append(dep)
2554 - return 1
2555 -
2556 - if not self._add_pkg(dep_pkg, dep):
2557 - return 0
2558 - return 1
2559 -
2560 - def _add_pkg(self, pkg, dep):
2561 - myparent = None
2562 - priority = None
2563 - depth = 0
2564 - if dep is None:
2565 - dep = Dependency()
2566 - else:
2567 - myparent = dep.parent
2568 - priority = dep.priority
2569 - depth = dep.depth
2570 - if priority is None:
2571 - priority = DepPriority()
2572 - """
2573 - Fills the digraph with nodes comprised of packages to merge.
2574 - mybigkey is the package spec of the package to merge.
2575 - myparent is the package depending on mybigkey ( or None )
2576 - addme = Should we add this package to the digraph or are we just looking at it's deps?
2577 - Think --onlydeps, we need to ignore packages in that case.
2578 - #stuff to add:
2579 - #SLOT-aware emerge
2580 - #IUSE-aware emerge -> USE DEP aware depgraph
2581 - #"no downgrade" emerge
2582 - """
2583 - # Ensure that the dependencies of the same package
2584 - # are never processed more than once.
2585 - previously_added = pkg in self.digraph
2586 -
2587 - # select the correct /var database that we'll be checking against
2588 - vardbapi = self.trees[pkg.root]["vartree"].dbapi
2589 - pkgsettings = self.pkgsettings[pkg.root]
2590 -
2591 - arg_atoms = None
2592 - if True:
2593 - try:
2594 - arg_atoms = list(self._iter_atoms_for_pkg(pkg))
2595 - except portage.exception.InvalidDependString, e:
2596 - if not pkg.installed:
2597 - show_invalid_depstring_notice(
2598 - pkg, pkg.metadata["PROVIDE"], str(e))
2599 - return 0
2600 - del e
2601 -
2602 - if not pkg.onlydeps:
2603 - if not pkg.installed and \
2604 - "empty" not in self.myparams and \
2605 - vardbapi.match(pkg.slot_atom):
2606 - # Increase the priority of dependencies on packages that
2607 - # are being rebuilt. This optimizes merge order so that
2608 - # dependencies are rebuilt/updated as soon as possible,
2609 - # which is needed especially when emerge is called by
2610 - # revdep-rebuild since dependencies may be affected by ABI
2611 - # breakage that has rendered them useless. Don't adjust
2612 - # priority here when in "empty" mode since all packages
2613 - # are being merged in that case.
2614 - priority.rebuild = True
2615 -
2616 - existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
2617 - slot_collision = False
2618 - if existing_node:
2619 - existing_node_matches = pkg.cpv == existing_node.cpv
2620 - if existing_node_matches and \
2621 - pkg != existing_node and \
2622 - dep.atom is not None:
2623 - # Use package set for matching since it will match via
2624 - # PROVIDE when necessary, while match_from_list does not.
2625 - atom_set = InternalPackageSet(initial_atoms=[dep.atom])
2626 - if not atom_set.findAtomForPackage(existing_node):
2627 - existing_node_matches = False
2628 - if existing_node_matches:
2629 - # The existing node can be reused.
2630 - if arg_atoms:
2631 - for parent_atom in arg_atoms:
2632 - parent, atom = parent_atom
2633 - self.digraph.add(existing_node, parent,
2634 - priority=priority)
2635 - self._add_parent_atom(existing_node, parent_atom)
2636 - # If a direct circular dependency is not an unsatisfied
2637 - # buildtime dependency then drop it here since otherwise
2638 - # it can skew the merge order calculation in an unwanted
2639 - # way.
2640 - if existing_node != myparent or \
2641 - (priority.buildtime and not priority.satisfied):
2642 - self.digraph.addnode(existing_node, myparent,
2643 - priority=priority)
2644 - if dep.atom is not None and dep.parent is not None:
2645 - self._add_parent_atom(existing_node,
2646 - (dep.parent, dep.atom))
2647 - return 1
2648 - else:
2649 -
2650 - # A slot collision has occurred. Sometimes this coincides
2651 - # with unresolvable blockers, so the slot collision will be
2652 - # shown later if there are no unresolvable blockers.
2653 - self._add_slot_conflict(pkg)
2654 - slot_collision = True
2655 -
2656 - if slot_collision:
2657 - # Now add this node to the graph so that self.display()
2658 - # can show use flags and --tree portage.output. This node is
2659 - # only being partially added to the graph. It must not be
2660 - # allowed to interfere with the other nodes that have been
2661 - # added. Do not overwrite data for existing nodes in
2662 - # self.mydbapi since that data will be used for blocker
2663 - # validation.
2664 - # Even though the graph is now invalid, continue to process
2665 - # dependencies so that things like --fetchonly can still
2666 - # function despite collisions.
2667 - pass
2668 - elif not previously_added:
2669 - self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
2670 - self.mydbapi[pkg.root].cpv_inject(pkg)
2671 - self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
2672 -
2673 - if not pkg.installed:
2674 - # Allow this package to satisfy old-style virtuals in case it
2675 - # doesn't already. Any pre-existing providers will be preferred
2676 - # over this one.
2677 - try:
2678 - pkgsettings.setinst(pkg.cpv, pkg.metadata)
2679 - # For consistency, also update the global virtuals.
2680 - settings = self.roots[pkg.root].settings
2681 - settings.unlock()
2682 - settings.setinst(pkg.cpv, pkg.metadata)
2683 - settings.lock()
2684 - except portage.exception.InvalidDependString, e:
2685 - show_invalid_depstring_notice(
2686 - pkg, pkg.metadata["PROVIDE"], str(e))
2687 - del e
2688 - return 0
2689 -
2690 - if arg_atoms:
2691 - self._set_nodes.add(pkg)
2692 -
2693 - # Do this even when addme is False (--onlydeps) so that the
2694 - # parent/child relationship is always known in case
2695 - # self._show_slot_collision_notice() needs to be called later.
2696 - self.digraph.add(pkg, myparent, priority=priority)
2697 - if dep.atom is not None and dep.parent is not None:
2698 - self._add_parent_atom(pkg, (dep.parent, dep.atom))
2699 -
2700 - if arg_atoms:
2701 - for parent_atom in arg_atoms:
2702 - parent, atom = parent_atom
2703 - self.digraph.add(pkg, parent, priority=priority)
2704 - self._add_parent_atom(pkg, parent_atom)
2705 -
2706 - """ This section determines whether we go deeper into dependencies or not.
2707 - We want to go deeper on a few occasions:
2708 - Installing package A, we need to make sure package A's deps are met.
2709 - emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
2710 - If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
2711 - """
2712 - dep_stack = self._dep_stack
2713 - if "recurse" not in self.myparams:
2714 - return 1
2715 - elif pkg.installed and \
2716 - "deep" not in self.myparams:
2717 - dep_stack = self._ignored_deps
2718 -
2719 - self.spinner.update()
2720 -
2721 - if arg_atoms:
2722 - depth = 0
2723 - pkg.depth = depth
2724 - if not previously_added:
2725 - dep_stack.append(pkg)
2726 - return 1
2727 -
2728 - def _add_parent_atom(self, pkg, parent_atom):
2729 - parent_atoms = self._parent_atoms.get(pkg)
2730 - if parent_atoms is None:
2731 - parent_atoms = set()
2732 - self._parent_atoms[pkg] = parent_atoms
2733 - parent_atoms.add(parent_atom)
2734 -
2735 - def _add_slot_conflict(self, pkg):
2736 - self._slot_collision_nodes.add(pkg)
2737 - slot_key = (pkg.slot_atom, pkg.root)
2738 - slot_nodes = self._slot_collision_info.get(slot_key)
2739 - if slot_nodes is None:
2740 - slot_nodes = set()
2741 - slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
2742 - self._slot_collision_info[slot_key] = slot_nodes
2743 - slot_nodes.add(pkg)
2744 -
2745 - def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
2746 -
2747 - mytype = pkg.type_name
2748 - myroot = pkg.root
2749 - mykey = pkg.cpv
2750 - metadata = pkg.metadata
2751 - myuse = pkg.use.enabled
2752 - jbigkey = pkg
2753 - depth = pkg.depth + 1
2754 - removal_action = "remove" in self.myparams
2755 -
2756 - edepend={}
2757 - depkeys = ["DEPEND","RDEPEND","PDEPEND"]
2758 - for k in depkeys:
2759 - edepend[k] = metadata[k]
2760 -
2761 - if not pkg.built and \
2762 - "--buildpkgonly" in self.myopts and \
2763 - "deep" not in self.myparams and \
2764 - "empty" not in self.myparams:
2765 - edepend["RDEPEND"] = ""
2766 - edepend["PDEPEND"] = ""
2767 - bdeps_optional = False
2768 -
2769 - if pkg.built and not removal_action:
2770 - if self.myopts.get("--with-bdeps", "n") == "y":
2771 - # Pull in build time deps as requested, but marked them as
2772 - # "optional" since they are not strictly required. This allows
2773 - # more freedom in the merge order calculation for solving
2774 - # circular dependencies. Don't convert to PDEPEND since that
2775 - # could make --with-bdeps=y less effective if it is used to
2776 - # adjust merge order to prevent built_with_use() calls from
2777 - # failing.
2778 - bdeps_optional = True
2779 - else:
2780 - # built packages do not have build time dependencies.
2781 - edepend["DEPEND"] = ""
2782 -
2783 - if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
2784 - edepend["DEPEND"] = ""
2785 -
2786 - bdeps_root = "/"
2787 - root_deps = self.myopts.get("--root-deps")
2788 - if root_deps is not None:
2789 - if root_deps is True:
2790 - bdeps_root = myroot
2791 - elif root_deps == "rdeps":
2792 - edepend["DEPEND"] = ""
2793 -
2794 - deps = (
2795 - (bdeps_root, edepend["DEPEND"],
2796 - self._priority(buildtime=(not bdeps_optional),
2797 - optional=bdeps_optional)),
2798 - (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
2799 - (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
2800 - )
2801 -
2802 - debug = "--debug" in self.myopts
2803 - strict = mytype != "installed"
2804 - try:
2805 - if not strict:
2806 - portage.dep._dep_check_strict = False
2807 -
2808 - for dep_root, dep_string, dep_priority in deps:
2809 - if not dep_string:
2810 - continue
2811 - if debug:
2812 - print
2813 - print "Parent: ", jbigkey
2814 - print "Depstring:", dep_string
2815 - print "Priority:", dep_priority
2816 -
2817 - try:
2818 -
2819 - dep_string = portage.dep.paren_normalize(
2820 - portage.dep.use_reduce(
2821 - portage.dep.paren_reduce(dep_string),
2822 - uselist=pkg.use.enabled))
2823 -
2824 - dep_string = list(self._queue_disjunctive_deps(
2825 - pkg, dep_root, dep_priority, dep_string))
2826 -
2827 - except portage.exception.InvalidDependString, e:
2828 - if pkg.installed:
2829 - del e
2830 - continue
2831 - show_invalid_depstring_notice(pkg, dep_string, str(e))
2832 - return 0
2833 -
2834 - if not dep_string:
2835 - continue
2836 -
2837 - dep_string = portage.dep.paren_enclose(dep_string)
2838 -
2839 - if not self._add_pkg_dep_string(
2840 - pkg, dep_root, dep_priority, dep_string,
2841 - allow_unsatisfied):
2842 - return 0
2843 -
2844 - except portage.exception.AmbiguousPackageName, e:
2845 - pkgs = e.args[0]
2846 - portage.writemsg("\n\n!!! An atom in the dependencies " + \
2847 - "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
2848 - for cpv in pkgs:
2849 - portage.writemsg(" %s\n" % cpv, noiselevel=-1)
2850 - portage.writemsg("\n", noiselevel=-1)
2851 - if mytype == "binary":
2852 - portage.writemsg(
2853 - "!!! This binary package cannot be installed: '%s'\n" % \
2854 - mykey, noiselevel=-1)
2855 - elif mytype == "ebuild":
2856 - portdb = self.roots[myroot].trees["porttree"].dbapi
2857 - myebuild, mylocation = portdb.findname2(mykey)
2858 - portage.writemsg("!!! This ebuild cannot be installed: " + \
2859 - "'%s'\n" % myebuild, noiselevel=-1)
2860 - portage.writemsg("!!! Please notify the package maintainer " + \
2861 - "that atoms must be fully-qualified.\n", noiselevel=-1)
2862 - return 0
2863 - finally:
2864 - portage.dep._dep_check_strict = True
2865 - return 1
2866 -
2867 - def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
2868 - allow_unsatisfied):
2869 - depth = pkg.depth + 1
2870 - debug = "--debug" in self.myopts
2871 - strict = pkg.type_name != "installed"
2872 -
2873 - if debug:
2874 - print
2875 - print "Parent: ", pkg
2876 - print "Depstring:", dep_string
2877 - print "Priority:", dep_priority
2878 -
2879 - try:
2880 - selected_atoms = self._select_atoms(dep_root,
2881 - dep_string, myuse=pkg.use.enabled, parent=pkg,
2882 - strict=strict, priority=dep_priority)
2883 - except portage.exception.InvalidDependString, e:
2884 - show_invalid_depstring_notice(pkg, dep_string, str(e))
2885 - del e
2886 - if pkg.installed:
2887 - return 1
2888 - return 0
2889 -
2890 - if debug:
2891 - print "Candidates:", selected_atoms
2892 -
2893 - vardb = self.roots[dep_root].trees["vartree"].dbapi
2894 -
2895 - for atom in selected_atoms:
2896 - try:
2897 -
2898 - atom = portage.dep.Atom(atom)
2899 -
2900 - mypriority = dep_priority.copy()
2901 - if not atom.blocker and vardb.match(atom):
2902 - mypriority.satisfied = True
2903 -
2904 - if not self._add_dep(Dependency(atom=atom,
2905 - blocker=atom.blocker, depth=depth, parent=pkg,
2906 - priority=mypriority, root=dep_root),
2907 - allow_unsatisfied=allow_unsatisfied):
2908 - return 0
2909 -
2910 - except portage.exception.InvalidAtom, e:
2911 - show_invalid_depstring_notice(
2912 - pkg, dep_string, str(e))
2913 - del e
2914 - if not pkg.installed:
2915 - return 0
2916 -
2917 - if debug:
2918 - print "Exiting...", pkg
2919 -
2920 - return 1
2921 -
2922 - def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
2923 - """
2924 - Queue disjunctive (virtual and ||) deps in self._dep_disjunctive_stack.
2925 - Yields non-disjunctive deps. Raises InvalidDependString when
2926 - necessary.
2927 - """
2928 - i = 0
2929 - while i < len(dep_struct):
2930 - x = dep_struct[i]
2931 - if isinstance(x, list):
2932 - for y in self._queue_disjunctive_deps(
2933 - pkg, dep_root, dep_priority, x):
2934 - yield y
2935 - elif x == "||":
2936 - self._queue_disjunction(pkg, dep_root, dep_priority,
2937 - [ x, dep_struct[ i + 1 ] ] )
2938 - i += 1
2939 - else:
2940 - try:
2941 - x = portage.dep.Atom(x)
2942 - except portage.exception.InvalidAtom:
2943 - if not pkg.installed:
2944 - raise portage.exception.InvalidDependString(
2945 - "invalid atom: '%s'" % x)
2946 - else:
2947 - # Note: Eventually this will check for PROPERTIES=virtual
2948 - # or whatever other metadata gets implemented for this
2949 - # purpose.
2950 - if x.cp.startswith('virtual/'):
2951 - self._queue_disjunction( pkg, dep_root,
2952 - dep_priority, [ str(x) ] )
2953 - else:
2954 - yield str(x)
2955 - i += 1
2956 -
2957 - def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
2958 - self._dep_disjunctive_stack.append(
2959 - (pkg, dep_root, dep_priority, dep_struct))
2960 -
2961 - def _pop_disjunction(self, allow_unsatisfied):
2962 - """
2963 - Pop one disjunctive dep from self._dep_disjunctive_stack, and use it to
2964 - populate self._dep_stack.
2965 - """
2966 - pkg, dep_root, dep_priority, dep_struct = \
2967 - self._dep_disjunctive_stack.pop()
2968 - dep_string = portage.dep.paren_enclose(dep_struct)
2969 - if not self._add_pkg_dep_string(
2970 - pkg, dep_root, dep_priority, dep_string, allow_unsatisfied):
2971 - return 0
2972 - return 1
2973 -
2974 - def _priority(self, **kwargs):
2975 - if "remove" in self.myparams:
2976 - priority_constructor = UnmergeDepPriority
2977 - else:
2978 - priority_constructor = DepPriority
2979 - return priority_constructor(**kwargs)
2980 -
2981 - def _dep_expand(self, root_config, atom_without_category):
2982 - """
2983 - @param root_config: a root config instance
2984 - @type root_config: RootConfig
2985 - @param atom_without_category: an atom without a category component
2986 - @type atom_without_category: String
2987 - @rtype: list
2988 - @returns: a list of atoms containing categories (possibly empty)
2989 - """
2990 - null_cp = portage.dep_getkey(insert_category_into_atom(
2991 - atom_without_category, "null"))
2992 - cat, atom_pn = portage.catsplit(null_cp)
2993 -
2994 - dbs = self._filtered_trees[root_config.root]["dbs"]
2995 - categories = set()
2996 - for db, pkg_type, built, installed, db_keys in dbs:
2997 - for cat in db.categories:
2998 - if db.cp_list("%s/%s" % (cat, atom_pn)):
2999 - categories.add(cat)
3000 -
3001 - deps = []
3002 - for cat in categories:
3003 - deps.append(insert_category_into_atom(
3004 - atom_without_category, cat))
3005 - return deps
3006 -
3007 - def _have_new_virt(self, root, atom_cp):
3008 - ret = False
3009 - for db, pkg_type, built, installed, db_keys in \
3010 - self._filtered_trees[root]["dbs"]:
3011 - if db.cp_list(atom_cp):
3012 - ret = True
3013 - break
3014 - return ret
3015 -
3016 - def _iter_atoms_for_pkg(self, pkg):
3017 - # TODO: add multiple $ROOT support
3018 - if pkg.root != self.target_root:
3019 - return
3020 - atom_arg_map = self._atom_arg_map
3021 - root_config = self.roots[pkg.root]
3022 - for atom in self._set_atoms.iterAtomsForPackage(pkg):
3023 - atom_cp = portage.dep_getkey(atom)
3024 - if atom_cp != pkg.cp and \
3025 - self._have_new_virt(pkg.root, atom_cp):
3026 - continue
3027 - visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
3028 - visible_pkgs.reverse() # descending order
3029 - higher_slot = None
3030 - for visible_pkg in visible_pkgs:
3031 - if visible_pkg.cp != atom_cp:
3032 - continue
3033 - if pkg >= visible_pkg:
3034 - # This is descending order, and we're not
3035 - # interested in any versions <= pkg given.
3036 - break
3037 - if pkg.slot_atom != visible_pkg.slot_atom:
3038 - higher_slot = visible_pkg
3039 - break
3040 - if higher_slot is not None:
3041 - continue
3042 - for arg in atom_arg_map[(atom, pkg.root)]:
3043 - if isinstance(arg, PackageArg) and \
3044 - arg.package != pkg:
3045 - continue
3046 - yield arg, atom
3047 -
3048 - def select_files(self, myfiles):
3049 - """Given a list of .tbz2s, .ebuilds sets, and deps, create the
3050 - appropriate depgraph and return a favorite list."""
3051 - debug = "--debug" in self.myopts
3052 - root_config = self.roots[self.target_root]
3053 - sets = root_config.sets
3054 - getSetAtoms = root_config.setconfig.getSetAtoms
3055 - myfavorites=[]
3056 - myroot = self.target_root
3057 - dbs = self._filtered_trees[myroot]["dbs"]
3058 - vardb = self.trees[myroot]["vartree"].dbapi
3059 - real_vardb = self._trees_orig[myroot]["vartree"].dbapi
3060 - portdb = self.trees[myroot]["porttree"].dbapi
3061 - bindb = self.trees[myroot]["bintree"].dbapi
3062 - pkgsettings = self.pkgsettings[myroot]
3063 - args = []
3064 - onlydeps = "--onlydeps" in self.myopts
3065 - lookup_owners = []
3066 - for x in myfiles:
3067 - ext = os.path.splitext(x)[1]
3068 - if ext==".tbz2":
3069 - if not os.path.exists(x):
3070 - if os.path.exists(
3071 - os.path.join(pkgsettings["PKGDIR"], "All", x)):
3072 - x = os.path.join(pkgsettings["PKGDIR"], "All", x)
3073 - elif os.path.exists(
3074 - os.path.join(pkgsettings["PKGDIR"], x)):
3075 - x = os.path.join(pkgsettings["PKGDIR"], x)
3076 - else:
3077 - print "\n\n!!! Binary package '"+str(x)+"' does not exist."
3078 - print "!!! Please ensure the tbz2 exists as specified.\n"
3079 - return 0, myfavorites
3080 - mytbz2=portage.xpak.tbz2(x)
3081 - mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
3082 - if os.path.realpath(x) != \
3083 - os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
3084 - print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
3085 - return 0, myfavorites
3086 - db_keys = list(bindb._aux_cache_keys)
3087 - metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
3088 - pkg = Package(type_name="binary", root_config=root_config,
3089 - cpv=mykey, built=True, metadata=metadata,
3090 - onlydeps=onlydeps)
3091 - self._pkg_cache[pkg] = pkg
3092 - args.append(PackageArg(arg=x, package=pkg,
3093 - root_config=root_config))
3094 - elif ext==".ebuild":
3095 - ebuild_path = portage.util.normalize_path(os.path.abspath(x))
3096 - pkgdir = os.path.dirname(ebuild_path)
3097 - tree_root = os.path.dirname(os.path.dirname(pkgdir))
3098 - cp = pkgdir[len(tree_root)+1:]
3099 - e = portage.exception.PackageNotFound(
3100 - ("%s is not in a valid portage tree " + \
3101 - "hierarchy or does not exist") % x)
3102 - if not portage.isvalidatom(cp):
3103 - raise e
3104 - cat = portage.catsplit(cp)[0]
3105 - mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
3106 - if not portage.isvalidatom("="+mykey):
3107 - raise e
3108 - ebuild_path = portdb.findname(mykey)
3109 - if ebuild_path:
3110 - if ebuild_path != os.path.join(os.path.realpath(tree_root),
3111 - cp, os.path.basename(ebuild_path)):
3112 - print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
3113 - return 0, myfavorites
3114 - if mykey not in portdb.xmatch(
3115 - "match-visible", portage.dep_getkey(mykey)):
3116 - print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
3117 - print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
3118 - print colorize("BAD", "*** page for details.")
3119 - countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
3120 - "Continuing...")
3121 - else:
3122 - raise portage.exception.PackageNotFound(
3123 - "%s is not in a valid portage tree hierarchy or does not exist" % x)
3124 - db_keys = list(portdb._aux_cache_keys)
3125 - metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
3126 - pkg = Package(type_name="ebuild", root_config=root_config,
3127 - cpv=mykey, metadata=metadata, onlydeps=onlydeps)
3128 - pkgsettings.setcpv(pkg)
3129 - pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
3130 - pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
3131 - self._pkg_cache[pkg] = pkg
3132 - args.append(PackageArg(arg=x, package=pkg,
3133 - root_config=root_config))
3134 - elif x.startswith(os.path.sep):
3135 - if not x.startswith(myroot):
3136 - portage.writemsg(("\n\n!!! '%s' does not start with" + \
3137 - " $ROOT.\n") % x, noiselevel=-1)
3138 - return 0, []
3139 - # Queue these up since it's most efficient to handle
3140 - # multiple files in a single iter_owners() call.
3141 - lookup_owners.append(x)
3142 - else:
3143 - if x in ("system", "world"):
3144 - x = SETPREFIX + x
3145 - if x.startswith(SETPREFIX):
3146 - s = x[len(SETPREFIX):]
3147 - if s not in sets:
3148 - raise portage.exception.PackageSetNotFound(s)
3149 - if s in self._sets:
3150 - continue
3151 - # Recursively expand sets so that containment tests in
3152 - # self._get_parent_sets() properly match atoms in nested
3153 - # sets (like if world contains system).
3154 - expanded_set = InternalPackageSet(
3155 - initial_atoms=getSetAtoms(s))
3156 - self._sets[s] = expanded_set
3157 - args.append(SetArg(arg=x, set=expanded_set,
3158 - root_config=root_config))
3159 - continue
3160 - if not is_valid_package_atom(x):
3161 - portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
3162 - noiselevel=-1)
3163 - portage.writemsg("!!! Please check ebuild(5) for full details.\n")
3164 - portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
3165 - return (0,[])
3166 - # Don't expand categories or old-style virtuals here unless
3167 - # necessary. Expansion of old-style virtuals here causes at
3168 - # least the following problems:
3169 - # 1) It's more difficult to determine which set(s) an atom
3170 - # came from, if any.
3171 - # 2) It takes away freedom from the resolver to choose other
3172 - # possible expansions when necessary.
3173 - if "/" in x:
3174 - args.append(AtomArg(arg=x, atom=x,
3175 - root_config=root_config))
3176 - continue
3177 - expanded_atoms = self._dep_expand(root_config, x)
3178 - installed_cp_set = set()
3179 - for atom in expanded_atoms:
3180 - atom_cp = portage.dep_getkey(atom)
3181 - if vardb.cp_list(atom_cp):
3182 - installed_cp_set.add(atom_cp)
3183 -
3184 - if len(installed_cp_set) > 1:
3185 - non_virtual_cps = set()
3186 - for atom_cp in installed_cp_set:
3187 - if not atom_cp.startswith("virtual/"):
3188 - non_virtual_cps.add(atom_cp)
3189 - if len(non_virtual_cps) == 1:
3190 - installed_cp_set = non_virtual_cps
3191 -
3192 - if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
3193 - installed_cp = iter(installed_cp_set).next()
3194 - expanded_atoms = [atom for atom in expanded_atoms \
3195 - if portage.dep_getkey(atom) == installed_cp]
3196 -
3197 - if len(expanded_atoms) > 1:
3198 - print
3199 - print
3200 - ambiguous_package_name(x, expanded_atoms, root_config,
3201 - self.spinner, self.myopts)
3202 - return False, myfavorites
3203 - if expanded_atoms:
3204 - atom = expanded_atoms[0]
3205 - else:
3206 - null_atom = insert_category_into_atom(x, "null")
3207 - null_cp = portage.dep_getkey(null_atom)
3208 - cat, atom_pn = portage.catsplit(null_cp)
3209 - virts_p = root_config.settings.get_virts_p().get(atom_pn)
3210 - if virts_p:
3211 - # Allow the depgraph to choose which virtual.
3212 - atom = insert_category_into_atom(x, "virtual")
3213 - else:
3214 - atom = insert_category_into_atom(x, "null")
3215 -
3216 - args.append(AtomArg(arg=x, atom=atom,
3217 - root_config=root_config))
3218 -
3219 - if lookup_owners:
3220 - relative_paths = []
3221 - search_for_multiple = False
3222 - if len(lookup_owners) > 1:
3223 - search_for_multiple = True
3224 -
3225 - for x in lookup_owners:
3226 - if not search_for_multiple and os.path.isdir(x):
3227 - search_for_multiple = True
3228 - relative_paths.append(x[len(myroot):])
3229 -
3230 - owners = set()
3231 - for pkg, relative_path in \
3232 - real_vardb._owners.iter_owners(relative_paths):
3233 - owners.add(pkg.mycpv)
3234 - if not search_for_multiple:
3235 - break
3236 -
3237 - if not owners:
3238 - portage.writemsg(("\n\n!!! '%s' is not claimed " + \
3239 - "by any package.\n") % lookup_owners[0], noiselevel=-1)
3240 - return 0, []
3241 -
3242 - for cpv in owners:
3243 - slot = vardb.aux_get(cpv, ["SLOT"])[0]
3244 - if not slot:
3245 - # portage now masks packages with missing slot, but it's
3246 - # possible that one was installed by an older version
3247 - atom = portage.cpv_getkey(cpv)
3248 - else:
3249 - atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
3250 - args.append(AtomArg(arg=atom, atom=atom,
3251 - root_config=root_config))
3252 -
3253 - if "--update" in self.myopts:
3254 - # In some cases, the greedy slots behavior can pull in a slot that
3255 - # the user would want to uninstall due to it being blocked by a
3256 - # newer version in a different slot. Therefore, it's necessary to
3257 - # detect and discard any that should be uninstalled. Each time
3258 - # that arguments are updated, package selections are repeated in
3259 - # order to ensure consistency with the current arguments:
3260 - #
3261 - # 1) Initialize args
3262 - # 2) Select packages and generate initial greedy atoms
3263 - # 3) Update args with greedy atoms
3264 - # 4) Select packages and generate greedy atoms again, while
3265 - # accounting for any blockers between selected packages
3266 - # 5) Update args with revised greedy atoms
3267 -
3268 - self._set_args(args)
3269 - greedy_args = []
3270 - for arg in args:
3271 - greedy_args.append(arg)
3272 - if not isinstance(arg, AtomArg):
3273 - continue
3274 - for atom in self._greedy_slots(arg.root_config, arg.atom):
3275 - greedy_args.append(
3276 - AtomArg(arg=arg.arg, atom=atom,
3277 - root_config=arg.root_config))
3278 -
3279 - self._set_args(greedy_args)
3280 - del greedy_args
3281 -
3282 - # Revise greedy atoms, accounting for any blockers
3283 - # between selected packages.
3284 - revised_greedy_args = []
3285 - for arg in args:
3286 - revised_greedy_args.append(arg)
3287 - if not isinstance(arg, AtomArg):
3288 - continue
3289 - for atom in self._greedy_slots(arg.root_config, arg.atom,
3290 - blocker_lookahead=True):
3291 - revised_greedy_args.append(
3292 - AtomArg(arg=arg.arg, atom=atom,
3293 - root_config=arg.root_config))
3294 - args = revised_greedy_args
3295 - del revised_greedy_args
3296 -
3297 - self._set_args(args)
3298 -
3299 - myfavorites = set(myfavorites)
3300 - for arg in args:
3301 - if isinstance(arg, (AtomArg, PackageArg)):
3302 - myfavorites.add(arg.atom)
3303 - elif isinstance(arg, SetArg):
3304 - myfavorites.add(arg.arg)
3305 - myfavorites = list(myfavorites)
3306 -
3307 - pprovideddict = pkgsettings.pprovideddict
3308 - if debug:
3309 - portage.writemsg("\n", noiselevel=-1)
3310 - # Order needs to be preserved since a feature of --nodeps
3311 - # is to allow the user to force a specific merge order.
3312 - args.reverse()
3313 - while args:
3314 - arg = args.pop()
3315 - for atom in arg.set:
3316 - self.spinner.update()
3317 - dep = Dependency(atom=atom, onlydeps=onlydeps,
3318 - root=myroot, parent=arg)
3319 - atom_cp = portage.dep_getkey(atom)
3320 - try:
3321 - pprovided = pprovideddict.get(portage.dep_getkey(atom))
3322 - if pprovided and portage.match_from_list(atom, pprovided):
3323 - # A provided package has been specified on the command line.
3324 - self._pprovided_args.append((arg, atom))
3325 - continue
3326 - if isinstance(arg, PackageArg):
3327 - if not self._add_pkg(arg.package, dep) or \
3328 - not self._create_graph():
3329 - sys.stderr.write(("\n\n!!! Problem resolving " + \
3330 - "dependencies for %s\n") % arg.arg)
3331 - return 0, myfavorites
3332 - continue
3333 - if debug:
3334 - portage.writemsg(" Arg: %s\n Atom: %s\n" % \
3335 - (arg, atom), noiselevel=-1)
3336 - pkg, existing_node = self._select_package(
3337 - myroot, atom, onlydeps=onlydeps)
3338 - if not pkg:
3339 - if not (isinstance(arg, SetArg) and \
3340 - arg.name in ("system", "world")):
3341 - self._unsatisfied_deps_for_display.append(
3342 - ((myroot, atom), {}))
3343 - return 0, myfavorites
3344 - self._missing_args.append((arg, atom))
3345 - continue
3346 - if atom_cp != pkg.cp:
3347 - # For old-style virtuals, we need to repeat the
3348 - # package.provided check against the selected package.
3349 - expanded_atom = atom.replace(atom_cp, pkg.cp)
3350 - pprovided = pprovideddict.get(pkg.cp)
3351 - if pprovided and \
3352 - portage.match_from_list(expanded_atom, pprovided):
3353 - # A provided package has been
3354 - # specified on the command line.
3355 - self._pprovided_args.append((arg, atom))
3356 - continue
3357 - if pkg.installed and "selective" not in self.myparams:
3358 - self._unsatisfied_deps_for_display.append(
3359 - ((myroot, atom), {}))
3360 - # Previous behavior was to bail out in this case, but
3361 - # since the dep is satisfied by the installed package,
3362 - # it's more friendly to continue building the graph
3363 - # and just show a warning message. Therefore, only bail
3364 - # out here if the atom is not from either the system or
3365 - # world set.
3366 - if not (isinstance(arg, SetArg) and \
3367 - arg.name in ("system", "world")):
3368 - return 0, myfavorites
3369 -
3370 - # Add the selected package to the graph as soon as possible
3371 - # so that later dep_check() calls can use it as feedback
3372 - # for making more consistent atom selections.
3373 - if not self._add_pkg(pkg, dep):
3374 - if isinstance(arg, SetArg):
3375 - sys.stderr.write(("\n\n!!! Problem resolving " + \
3376 - "dependencies for %s from %s\n") % \
3377 - (atom, arg.arg))
3378 - else:
3379 - sys.stderr.write(("\n\n!!! Problem resolving " + \
3380 - "dependencies for %s\n") % atom)
3381 - return 0, myfavorites
3382 -
3383 - except portage.exception.MissingSignature, e:
3384 - portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
3385 - portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
3386 - portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
3387 - portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
3388 - portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
3389 - return 0, myfavorites
3390 - except portage.exception.InvalidSignature, e:
3391 - portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
3392 - portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
3393 - portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
3394 - portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
3395 - portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
3396 - return 0, myfavorites
3397 - except SystemExit, e:
3398 - raise # Needed else can't exit
3399 - except Exception, e:
3400 - print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
3401 - print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
3402 - raise
3403 -
3404 - # Now that the root packages have been added to the graph,
3405 - # process the dependencies.
3406 - if not self._create_graph():
3407 - return 0, myfavorites
3408 -
3409 - missing=0
3410 - if "--usepkgonly" in self.myopts:
3411 - for xs in self.digraph.all_nodes():
3412 - if not isinstance(xs, Package):
3413 - continue
3414 - if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
3415 - if missing == 0:
3416 - print
3417 - missing += 1
3418 - print "Missing binary for:",xs[2]
3419 -
3420 - try:
3421 - self.altlist()
3422 - except self._unknown_internal_error:
3423 - return False, myfavorites
3424 -
3425 - # We're true here unless we are missing binaries.
3426 - return (not missing,myfavorites)
3427 -
3428 - def _set_args(self, args):
3429 - """
3430 - Create the "args" package set from atoms and packages given as
3431 - arguments. This method can be called multiple times if necessary.
3432 - The package selection cache is automatically invalidated, since
3433 - arguments influence package selections.
3434 - """
3435 - args_set = self._sets["args"]
3436 - args_set.clear()
3437 - for arg in args:
3438 - if not isinstance(arg, (AtomArg, PackageArg)):
3439 - continue
3440 - atom = arg.atom
3441 - if atom in args_set:
3442 - continue
3443 - args_set.add(atom)
3444 -
3445 - self._set_atoms.clear()
3446 - self._set_atoms.update(chain(*self._sets.itervalues()))
3447 - atom_arg_map = self._atom_arg_map
3448 - atom_arg_map.clear()
3449 - for arg in args:
3450 - for atom in arg.set:
3451 - atom_key = (atom, arg.root_config.root)
3452 - refs = atom_arg_map.get(atom_key)
3453 - if refs is None:
3454 - refs = []
3455 - atom_arg_map[atom_key] = refs
3456 - if arg not in refs:
3457 - refs.append(arg)
3458 -
3459 - # Invalidate the package selection cache, since
3460 - # arguments influence package selections.
3461 - self._highest_pkg_cache.clear()
3462 - for trees in self._filtered_trees.itervalues():
3463 - trees["porttree"].dbapi._clear_cache()
3464 -
3465 - def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
3466 - """
3467 - Return a list of slot atoms corresponding to installed slots that
3468 - differ from the slot of the highest visible match. When
3469 - blocker_lookahead is True, slot atoms that would trigger a blocker
3470 - conflict are automatically discarded, potentially allowing automatic
3471 - uninstallation of older slots when appropriate.
3472 - """
3473 - highest_pkg, in_graph = self._select_package(root_config.root, atom)
3474 - if highest_pkg is None:
3475 - return []
3476 - vardb = root_config.trees["vartree"].dbapi
3477 - slots = set()
3478 - for cpv in vardb.match(atom):
3479 - # don't mix new virtuals with old virtuals
3480 - if portage.cpv_getkey(cpv) == highest_pkg.cp:
3481 - slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
3482 -
3483 - slots.add(highest_pkg.metadata["SLOT"])
3484 - if len(slots) == 1:
3485 - return []
3486 - greedy_pkgs = []
3487 - slots.remove(highest_pkg.metadata["SLOT"])
3488 - while slots:
3489 - slot = slots.pop()
3490 - slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
3491 - pkg, in_graph = self._select_package(root_config.root, slot_atom)
3492 - if pkg is not None and \
3493 - pkg.cp == highest_pkg.cp and pkg < highest_pkg:
3494 - greedy_pkgs.append(pkg)
3495 - if not greedy_pkgs:
3496 - return []
3497 - if not blocker_lookahead:
3498 - return [pkg.slot_atom for pkg in greedy_pkgs]
3499 -
3500 - blockers = {}
3501 - blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
3502 - for pkg in greedy_pkgs + [highest_pkg]:
3503 - dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
3504 - try:
3505 - atoms = self._select_atoms(
3506 - pkg.root, dep_str, pkg.use.enabled,
3507 - parent=pkg, strict=True)
3508 - except portage.exception.InvalidDependString:
3509 - continue
3510 - blocker_atoms = (x for x in atoms if x.blocker)
3511 - blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
3512 -
3513 - if highest_pkg not in blockers:
3514 - return []
3515 -
3516 - # filter packages with invalid deps
3517 - greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
3518 -
3519 - # filter packages that conflict with highest_pkg
3520 - greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
3521 - (blockers[highest_pkg].findAtomForPackage(pkg) or \
3522 - blockers[pkg].findAtomForPackage(highest_pkg))]
3523 -
3524 - if not greedy_pkgs:
3525 - return []
3526 -
3527 - # If two packages conflict, discard the lower version.
3528 - discard_pkgs = set()
3529 - greedy_pkgs.sort(reverse=True)
3530 - for i in xrange(len(greedy_pkgs) - 1):
3531 - pkg1 = greedy_pkgs[i]
3532 - if pkg1 in discard_pkgs:
3533 - continue
3534 - for j in xrange(i + 1, len(greedy_pkgs)):
3535 - pkg2 = greedy_pkgs[j]
3536 - if pkg2 in discard_pkgs:
3537 - continue
3538 - if blockers[pkg1].findAtomForPackage(pkg2) or \
3539 - blockers[pkg2].findAtomForPackage(pkg1):
3540 - # pkg1 > pkg2
3541 - discard_pkgs.add(pkg2)
3542 -
3543 - return [pkg.slot_atom for pkg in greedy_pkgs \
3544 - if pkg not in discard_pkgs]
3545 -
3546 - def _select_atoms_from_graph(self, *pargs, **kwargs):
3547 - """
3548 - Prefer atoms matching packages that have already been
3549 - added to the graph or those that are installed and have
3550 - not been scheduled for replacement.
3551 - """
3552 - kwargs["trees"] = self._graph_trees
3553 - return self._select_atoms_highest_available(*pargs, **kwargs)
3554 -
3555 - def _select_atoms_highest_available(self, root, depstring,
3556 - myuse=None, parent=None, strict=True, trees=None, priority=None):
3557 - """This will raise InvalidDependString if necessary. If trees is
3558 - None then self._filtered_trees is used."""
3559 - pkgsettings = self.pkgsettings[root]
3560 - if trees is None:
3561 - trees = self._filtered_trees
3562 - if not getattr(priority, "buildtime", False):
3563 - # The parent should only be passed to dep_check() for buildtime
3564 - # dependencies since that's the only case when it's appropriate
3565 - # to trigger the circular dependency avoidance code which uses it.
3566 - # It's important not to trigger the same circular dependency
3567 - # avoidance code for runtime dependencies since it's not needed
3568 - # and it can promote an incorrect package choice.
3569 - parent = None
3570 - if True:
3571 - try:
3572 - if parent is not None:
3573 - trees[root]["parent"] = parent
3574 - if not strict:
3575 - portage.dep._dep_check_strict = False
3576 - mycheck = portage.dep_check(depstring, None,
3577 - pkgsettings, myuse=myuse,
3578 - myroot=root, trees=trees)
3579 - finally:
3580 - if parent is not None:
3581 - trees[root].pop("parent")
3582 - portage.dep._dep_check_strict = True
3583 - if not mycheck[0]:
3584 - raise portage.exception.InvalidDependString(mycheck[1])
3585 - selected_atoms = mycheck[1]
3586 - return selected_atoms
3587 -
3588 - def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
3589 - atom = portage.dep.Atom(atom)
3590 - atom_set = InternalPackageSet(initial_atoms=(atom,))
3591 - atom_without_use = atom
3592 - if atom.use:
3593 - atom_without_use = portage.dep.remove_slot(atom)
3594 - if atom.slot:
3595 - atom_without_use += ":" + atom.slot
3596 - atom_without_use = portage.dep.Atom(atom_without_use)
3597 - xinfo = '"%s"' % atom
3598 - if arg:
3599 - xinfo='"%s"' % arg
3600 - # Discard null/ from failed cpv_expand category expansion.
3601 - xinfo = xinfo.replace("null/", "")
3602 - masked_packages = []
3603 - missing_use = []
3604 - masked_pkg_instances = set()
3605 - missing_licenses = []
3606 - have_eapi_mask = False
3607 - pkgsettings = self.pkgsettings[root]
3608 - implicit_iuse = pkgsettings._get_implicit_iuse()
3609 - root_config = self.roots[root]
3610 - portdb = self.roots[root].trees["porttree"].dbapi
3611 - dbs = self._filtered_trees[root]["dbs"]
3612 - for db, pkg_type, built, installed, db_keys in dbs:
3613 - if installed:
3614 - continue
3615 - match = db.match
3616 - if hasattr(db, "xmatch"):
3617 - cpv_list = db.xmatch("match-all", atom_without_use)
3618 - else:
3619 - cpv_list = db.match(atom_without_use)
3620 - # descending order
3621 - cpv_list.reverse()
3622 - for cpv in cpv_list:
3623 - metadata, mreasons = get_mask_info(root_config, cpv,
3624 - pkgsettings, db, pkg_type, built, installed, db_keys)
3625 - if metadata is not None:
3626 - pkg = Package(built=built, cpv=cpv,
3627 - installed=installed, metadata=metadata,
3628 - root_config=root_config)
3629 - if pkg.cp != atom.cp:
3630 - # A cpv can be returned from dbapi.match() as an
3631 - # old-style virtual match even in cases when the
3632 - # package does not actually PROVIDE the virtual.
3633 - # Filter out any such false matches here.
3634 - if not atom_set.findAtomForPackage(pkg):
3635 - continue
3636 - if mreasons:
3637 - masked_pkg_instances.add(pkg)
3638 - if atom.use:
3639 - missing_use.append(pkg)
3640 - if not mreasons:
3641 - continue
3642 - masked_packages.append(
3643 - (root_config, pkgsettings, cpv, metadata, mreasons))
3644 -
3645 - missing_use_reasons = []
3646 - missing_iuse_reasons = []
3647 - for pkg in missing_use:
3648 - use = pkg.use.enabled
3649 - iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
3650 - iuse_re = re.compile("^(%s)$" % "|".join(iuse))
3651 - missing_iuse = []
3652 - for x in atom.use.required:
3653 - if iuse_re.match(x) is None:
3654 - missing_iuse.append(x)
3655 - mreasons = []
3656 - if missing_iuse:
3657 - mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
3658 - missing_iuse_reasons.append((pkg, mreasons))
3659 - else:
3660 - need_enable = sorted(atom.use.enabled.difference(use))
3661 - need_disable = sorted(atom.use.disabled.intersection(use))
3662 - if need_enable or need_disable:
3663 - changes = []
3664 - changes.extend(colorize("red", "+" + x) \
3665 - for x in need_enable)
3666 - changes.extend(colorize("blue", "-" + x) \
3667 - for x in need_disable)
3668 - mreasons.append("Change USE: %s" % " ".join(changes))
3669 - missing_use_reasons.append((pkg, mreasons))
3670 -
3671 - unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
3672 - in missing_use_reasons if pkg not in masked_pkg_instances]
3673 -
3674 - unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
3675 - in missing_iuse_reasons if pkg not in masked_pkg_instances]
3676 -
3677 - show_missing_use = False
3678 - if unmasked_use_reasons:
3679 - # Only show the latest version.
3680 - show_missing_use = unmasked_use_reasons[:1]
3681 - elif unmasked_iuse_reasons:
3682 - if missing_use_reasons:
3683 - # All packages with required IUSE are masked,
3684 - # so display a normal masking message.
3685 - pass
3686 - else:
3687 - show_missing_use = unmasked_iuse_reasons
3688 -
3689 - if show_missing_use:
3690 - print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
3691 - print "!!! One of the following packages is required to complete your request:"
3692 - for pkg, mreasons in show_missing_use:
3693 - print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
3694 -
3695 - elif masked_packages:
3696 - print "\n!!! " + \
3697 - colorize("BAD", "All ebuilds that could satisfy ") + \
3698 - colorize("INFORM", xinfo) + \
3699 - colorize("BAD", " have been masked.")
3700 - print "!!! One of the following masked packages is required to complete your request:"
3701 - have_eapi_mask = show_masked_packages(masked_packages)
3702 - if have_eapi_mask:
3703 - print
3704 - msg = ("The current version of portage supports " + \
3705 - "EAPI '%s'. You must upgrade to a newer version" + \
3706 - " of portage before EAPI masked packages can" + \
3707 - " be installed.") % portage.const.EAPI
3708 - from textwrap import wrap
3709 - for line in wrap(msg, 75):
3710 - print line
3711 - print
3712 - show_mask_docs()
3713 - else:
3714 - print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
3715 -
3716 - # Show parent nodes and the argument that pulled them in.
3717 - traversed_nodes = set()
3718 - node = myparent
3719 - msg = []
3720 - while node is not None:
3721 - traversed_nodes.add(node)
3722 - msg.append('(dependency required by "%s" [%s])' % \
3723 - (colorize('INFORM', str(node.cpv)), node.type_name))
3724 - # When traversing to parents, prefer arguments over packages
3725 - # since arguments are root nodes. Never traverse the same
3726 - # package twice, in order to prevent an infinite loop.
3727 - selected_parent = None
3728 - for parent in self.digraph.parent_nodes(node):
3729 - if isinstance(parent, DependencyArg):
3730 - msg.append('(dependency required by "%s" [argument])' % \
3731 - (colorize('INFORM', str(parent))))
3732 - selected_parent = None
3733 - break
3734 - if parent not in traversed_nodes:
3735 - selected_parent = parent
3736 - node = selected_parent
3737 - for line in msg:
3738 - print line
3739 -
3740 - print
3741 -
3742 - def _select_pkg_highest_available(self, root, atom, onlydeps=False):
3743 - cache_key = (root, atom, onlydeps)
3744 - ret = self._highest_pkg_cache.get(cache_key)
3745 - if ret is not None:
3746 - pkg, existing = ret
3747 - if pkg and not existing:
3748 - existing = self._slot_pkg_map[root].get(pkg.slot_atom)
3749 - if existing and existing == pkg:
3750 - # Update the cache to reflect that the
3751 - # package has been added to the graph.
3752 - ret = pkg, pkg
3753 - self._highest_pkg_cache[cache_key] = ret
3754 - return ret
3755 - ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
3756 - self._highest_pkg_cache[cache_key] = ret
3757 - pkg, existing = ret
3758 - if pkg is not None:
3759 - settings = pkg.root_config.settings
3760 - if visible(settings, pkg) and not (pkg.installed and \
3761 - settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
3762 - pkg.root_config.visible_pkgs.cpv_inject(pkg)
3763 - return ret
3764 -
3765 - def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
3766 - root_config = self.roots[root]
3767 - pkgsettings = self.pkgsettings[root]
3768 - dbs = self._filtered_trees[root]["dbs"]
3769 - vardb = self.roots[root].trees["vartree"].dbapi
3770 - portdb = self.roots[root].trees["porttree"].dbapi
3771 - # List of acceptable packages, ordered by type preference.
3772 - matched_packages = []
3773 - highest_version = None
3774 - if not isinstance(atom, portage.dep.Atom):
3775 - atom = portage.dep.Atom(atom)
3776 - atom_cp = atom.cp
3777 - atom_set = InternalPackageSet(initial_atoms=(atom,))
3778 - existing_node = None
3779 - myeb = None
3780 - usepkgonly = "--usepkgonly" in self.myopts
3781 - empty = "empty" in self.myparams
3782 - selective = "selective" in self.myparams
3783 - reinstall = False
3784 - noreplace = "--noreplace" in self.myopts
3785 - # Behavior of the "selective" parameter depends on
3786 - # whether or not a package matches an argument atom.
3787 - # If an installed package provides an old-style
3788 - # virtual that is no longer provided by an available
3789 - # package, the installed package may match an argument
3790 - # atom even though none of the available packages do.
3791 - # Therefore, "selective" logic does not consider
3792 - # whether or not an installed package matches an
3793 - # argument atom. It only considers whether or not
3794 - # available packages match argument atoms, which is
3795 - # represented by the found_available_arg flag.
3796 - found_available_arg = False
3797 - for find_existing_node in True, False:
3798 - if existing_node:
3799 - break
3800 - for db, pkg_type, built, installed, db_keys in dbs:
3801 - if existing_node:
3802 - break
3803 - if installed and not find_existing_node:
3804 - want_reinstall = reinstall or empty or \
3805 - (found_available_arg and not selective)
3806 - if want_reinstall and matched_packages:
3807 - continue
3808 - if hasattr(db, "xmatch"):
3809 - cpv_list = db.xmatch("match-all", atom)
3810 - else:
3811 - cpv_list = db.match(atom)
3812 -
3813 - # USE=multislot can make an installed package appear as if
3814 - # it doesn't satisfy a slot dependency. Rebuilding the ebuild
3815 - # won't do any good as long as USE=multislot is enabled since
3816 - # the newly built package still won't have the expected slot.
3817 - # Therefore, assume that such SLOT dependencies are already
3818 - # satisfied rather than forcing a rebuild.
3819 - if installed and not cpv_list and atom.slot:
3820 - for cpv in db.match(atom.cp):
3821 - slot_available = False
3822 - for other_db, other_type, other_built, \
3823 - other_installed, other_keys in dbs:
3824 - try:
3825 - if atom.slot == \
3826 - other_db.aux_get(cpv, ["SLOT"])[0]:
3827 - slot_available = True
3828 - break
3829 - except KeyError:
3830 - pass
3831 - if not slot_available:
3832 - continue
3833 - inst_pkg = self._pkg(cpv, "installed",
3834 - root_config, installed=installed)
3835 - # Remove the slot from the atom and verify that
3836 - # the package matches the resulting atom.
3837 - atom_without_slot = portage.dep.remove_slot(atom)
3838 - if atom.use:
3839 - atom_without_slot += str(atom.use)
3840 - atom_without_slot = portage.dep.Atom(atom_without_slot)
3841 - if portage.match_from_list(
3842 - atom_without_slot, [inst_pkg]):
3843 - cpv_list = [inst_pkg.cpv]
3844 - break
3845 -
3846 - if not cpv_list:
3847 - continue
3848 - pkg_status = "merge"
3849 - if installed or onlydeps:
3850 - pkg_status = "nomerge"
3851 - # descending order
3852 - cpv_list.reverse()
3853 - for cpv in cpv_list:
3854 - # Make --noreplace take precedence over --newuse.
3855 - if not installed and noreplace and \
3856 - cpv in vardb.match(atom):
3857 - # If the installed version is masked, it may
3858 - # be necessary to look at lower versions,
3859 - # in case there is a visible downgrade.
3860 - continue
3861 - reinstall_for_flags = None
3862 - cache_key = (pkg_type, root, cpv, pkg_status)
3863 - calculated_use = True
3864 - pkg = self._pkg_cache.get(cache_key)
3865 - if pkg is None:
3866 - calculated_use = False
3867 - try:
3868 - metadata = izip(db_keys, db.aux_get(cpv, db_keys))
3869 - except KeyError:
3870 - continue
3871 - pkg = Package(built=built, cpv=cpv,
3872 - installed=installed, metadata=metadata,
3873 - onlydeps=onlydeps, root_config=root_config,
3874 - type_name=pkg_type)
3875 - metadata = pkg.metadata
3876 - if not built:
3877 - metadata['CHOST'] = pkgsettings.get('CHOST', '')
3878 - if not built and ("?" in metadata["LICENSE"] or \
3879 - "?" in metadata["PROVIDE"]):
3880 - # This is avoided whenever possible because
3881 - # it's expensive. It only needs to be done here
3882 - # if it has an effect on visibility.
3883 - pkgsettings.setcpv(pkg)
3884 - metadata["USE"] = pkgsettings["PORTAGE_USE"]
3885 - calculated_use = True
3886 - self._pkg_cache[pkg] = pkg
3887 -
3888 - if not installed or (built and matched_packages):
3889 - # Only enforce visibility on installed packages
3890 - # if there is at least one other visible package
3891 - # available. By filtering installed masked packages
3892 - # here, packages that have been masked since they
3893 - # were installed can be automatically downgraded
3894 - # to an unmasked version.
3895 - try:
3896 - if not visible(pkgsettings, pkg):
3897 - continue
3898 - except portage.exception.InvalidDependString:
3899 - if not installed:
3900 - continue
3901 -
3902 - # Enable upgrade or downgrade to a version
3903 - # with visible KEYWORDS when the installed
3904 - # version is masked by KEYWORDS, but never
3905 - # reinstall the same exact version only due
3906 - # to a KEYWORDS mask.
3907 - if built and matched_packages:
3908 -
3909 - different_version = None
3910 - for avail_pkg in matched_packages:
3911 - if not portage.dep.cpvequal(
3912 - pkg.cpv, avail_pkg.cpv):
3913 - different_version = avail_pkg
3914 - break
3915 - if different_version is not None:
3916 -
3917 - if installed and \
3918 - pkgsettings._getMissingKeywords(
3919 - pkg.cpv, pkg.metadata):
3920 - continue
3921 -
3922 - # If the ebuild no longer exists or it's
3923 - # keywords have been dropped, reject built
3924 - # instances (installed or binary).
3925 - # If --usepkgonly is enabled, assume that
3926 - # the ebuild status should be ignored.
3927 - if not usepkgonly:
3928 - try:
3929 - pkg_eb = self._pkg(
3930 - pkg.cpv, "ebuild", root_config)
3931 - except portage.exception.PackageNotFound:
3932 - continue
3933 - else:
3934 - if not visible(pkgsettings, pkg_eb):
3935 - continue
3936 -
3937 - if not pkg.built and not calculated_use:
3938 - # This is avoided whenever possible because
3939 - # it's expensive.
3940 - pkgsettings.setcpv(pkg)
3941 - pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
3942 -
3943 - if pkg.cp != atom.cp:
3944 - # A cpv can be returned from dbapi.match() as an
3945 - # old-style virtual match even in cases when the
3946 - # package does not actually PROVIDE the virtual.
3947 - # Filter out any such false matches here.
3948 - if not atom_set.findAtomForPackage(pkg):
3949 - continue
3950 -
3951 - myarg = None
3952 - if root == self.target_root:
3953 - try:
3954 - # Ebuild USE must have been calculated prior
3955 - # to this point, in case atoms have USE deps.
3956 - myarg = self._iter_atoms_for_pkg(pkg).next()
3957 - except StopIteration:
3958 - pass
3959 - except portage.exception.InvalidDependString:
3960 - if not installed:
3961 - # masked by corruption
3962 - continue
3963 - if not installed and myarg:
3964 - found_available_arg = True
3965 -
3966 - if atom.use and not pkg.built:
3967 - use = pkg.use.enabled
3968 - if atom.use.enabled.difference(use):
3969 - continue
3970 - if atom.use.disabled.intersection(use):
3971 - continue
3972 - if pkg.cp == atom_cp:
3973 - if highest_version is None:
3974 - highest_version = pkg
3975 - elif pkg > highest_version:
3976 - highest_version = pkg
3977 - # At this point, we've found the highest visible
3978 - # match from the current repo. Any lower versions
3979 - # from this repo are ignored, so this so the loop
3980 - # will always end with a break statement below
3981 - # this point.
3982 - if find_existing_node:
3983 - e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
3984 - if not e_pkg:
3985 - break
3986 - if portage.dep.match_from_list(atom, [e_pkg]):
3987 - if highest_version and \
3988 - e_pkg.cp == atom_cp and \
3989 - e_pkg < highest_version and \
3990 - e_pkg.slot_atom != highest_version.slot_atom:
3991 - # There is a higher version available in a
3992 - # different slot, so this existing node is
3993 - # irrelevant.
3994 - pass
3995 - else:
3996 - matched_packages.append(e_pkg)
3997 - existing_node = e_pkg
3998 - break
3999 - # Compare built package to current config and
4000 - # reject the built package if necessary.
4001 - if built and not installed and \
4002 - ("--newuse" in self.myopts or \
4003 - "--reinstall" in self.myopts):
4004 - iuses = pkg.iuse.all
4005 - old_use = pkg.use.enabled
4006 - if myeb:
4007 - pkgsettings.setcpv(myeb)
4008 - else:
4009 - pkgsettings.setcpv(pkg)
4010 - now_use = pkgsettings["PORTAGE_USE"].split()
4011 - forced_flags = set()
4012 - forced_flags.update(pkgsettings.useforce)
4013 - forced_flags.update(pkgsettings.usemask)
4014 - cur_iuse = iuses
4015 - if myeb and not usepkgonly:
4016 - cur_iuse = myeb.iuse.all
4017 - if self._reinstall_for_flags(forced_flags,
4018 - old_use, iuses,
4019 - now_use, cur_iuse):
4020 - break
4021 - # Compare current config to installed package
4022 - # and do not reinstall if possible.
4023 - if not installed and \
4024 - ("--newuse" in self.myopts or \
4025 - "--reinstall" in self.myopts) and \
4026 - cpv in vardb.match(atom):
4027 - pkgsettings.setcpv(pkg)
4028 - forced_flags = set()
4029 - forced_flags.update(pkgsettings.useforce)
4030 - forced_flags.update(pkgsettings.usemask)
4031 - old_use = vardb.aux_get(cpv, ["USE"])[0].split()
4032 - old_iuse = set(filter_iuse_defaults(
4033 - vardb.aux_get(cpv, ["IUSE"])[0].split()))
4034 - cur_use = pkg.use.enabled
4035 - cur_iuse = pkg.iuse.all
4036 - reinstall_for_flags = \
4037 - self._reinstall_for_flags(
4038 - forced_flags, old_use, old_iuse,
4039 - cur_use, cur_iuse)
4040 - if reinstall_for_flags:
4041 - reinstall = True
4042 - if not built:
4043 - myeb = pkg
4044 - matched_packages.append(pkg)
4045 - if reinstall_for_flags:
4046 - self._reinstall_nodes[pkg] = \
4047 - reinstall_for_flags
4048 - break
4049 -
4050 - if not matched_packages:
4051 - return None, None
4052 -
4053 - if "--debug" in self.myopts:
4054 - for pkg in matched_packages:
4055 - portage.writemsg("%s %s\n" % \
4056 - ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
4057 -
4058 - # Filter out any old-style virtual matches if they are
4059 - # mixed with new-style virtual matches.
4060 - cp = portage.dep_getkey(atom)
4061 - if len(matched_packages) > 1 and \
4062 - "virtual" == portage.catsplit(cp)[0]:
4063 - for pkg in matched_packages:
4064 - if pkg.cp != cp:
4065 - continue
4066 - # Got a new-style virtual, so filter
4067 - # out any old-style virtuals.
4068 - matched_packages = [pkg for pkg in matched_packages \
4069 - if pkg.cp == cp]
4070 - break
4071 -
4072 - if len(matched_packages) > 1:
4073 - bestmatch = portage.best(
4074 - [pkg.cpv for pkg in matched_packages])
4075 - matched_packages = [pkg for pkg in matched_packages \
4076 - if portage.dep.cpvequal(pkg.cpv, bestmatch)]
4077 -
4078 - # ordered by type preference ("ebuild" type is the last resort)
4079 - return matched_packages[-1], existing_node
4080 -
4081 - def _select_pkg_from_graph(self, root, atom, onlydeps=False):
4082 - """
4083 - Select packages that have already been added to the graph or
4084 - those that are installed and have not been scheduled for
4085 - replacement.
4086 - """
4087 - graph_db = self._graph_trees[root]["porttree"].dbapi
4088 - matches = graph_db.match_pkgs(atom)
4089 - if not matches:
4090 - return None, None
4091 - pkg = matches[-1] # highest match
4092 - in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
4093 - return pkg, in_graph
4094 -
4095 - def _complete_graph(self):
4096 - """
4097 - Add any deep dependencies of required sets (args, system, world) that
4098 - have not been pulled into the graph yet. This ensures that the graph
4099 - is consistent such that initially satisfied deep dependencies are not
4100 - broken in the new graph. Initially unsatisfied dependencies are
4101 - irrelevant since we only want to avoid breaking dependencies that are
4102 - intially satisfied.
4103 -
4104 - Since this method can consume enough time to disturb users, it is
4105 - currently only enabled by the --complete-graph option.
4106 - """
4107 - if "--buildpkgonly" in self.myopts or \
4108 - "recurse" not in self.myparams:
4109 - return 1
4110 -
4111 - if "complete" not in self.myparams:
4112 - # Skip this to avoid consuming enough time to disturb users.
4113 - return 1
4114 -
4115 - # Put the depgraph into a mode that causes it to only
4116 - # select packages that have already been added to the
4117 - # graph or those that are installed and have not been
4118 - # scheduled for replacement. Also, toggle the "deep"
4119 - # parameter so that all dependencies are traversed and
4120 - # accounted for.
4121 - self._select_atoms = self._select_atoms_from_graph
4122 - self._select_package = self._select_pkg_from_graph
4123 - already_deep = "deep" in self.myparams
4124 - if not already_deep:
4125 - self.myparams.add("deep")
4126 -
4127 - for root in self.roots:
4128 - required_set_names = self._required_set_names.copy()
4129 - if root == self.target_root and \
4130 - (already_deep or "empty" in self.myparams):
4131 - required_set_names.difference_update(self._sets)
4132 - if not required_set_names and not self._ignored_deps:
4133 - continue
4134 - root_config = self.roots[root]
4135 - setconfig = root_config.setconfig
4136 - args = []
4137 - # Reuse existing SetArg instances when available.
4138 - for arg in self.digraph.root_nodes():
4139 - if not isinstance(arg, SetArg):
4140 - continue
4141 - if arg.root_config != root_config:
4142 - continue
4143 - if arg.name in required_set_names:
4144 - args.append(arg)
4145 - required_set_names.remove(arg.name)
4146 - # Create new SetArg instances only when necessary.
4147 - for s in required_set_names:
4148 - expanded_set = InternalPackageSet(
4149 - initial_atoms=setconfig.getSetAtoms(s))
4150 - atom = SETPREFIX + s
4151 - args.append(SetArg(arg=atom, set=expanded_set,
4152 - root_config=root_config))
4153 - vardb = root_config.trees["vartree"].dbapi
4154 - for arg in args:
4155 - for atom in arg.set:
4156 - self._dep_stack.append(
4157 - Dependency(atom=atom, root=root, parent=arg))
4158 - if self._ignored_deps:
4159 - self._dep_stack.extend(self._ignored_deps)
4160 - self._ignored_deps = []
4161 - if not self._create_graph(allow_unsatisfied=True):
4162 - return 0
4163 - # Check the unsatisfied deps to see if any initially satisfied deps
4164 - # will become unsatisfied due to an upgrade. Initially unsatisfied
4165 - # deps are irrelevant since we only want to avoid breaking deps
4166 - # that are initially satisfied.
4167 - while self._unsatisfied_deps:
4168 - dep = self._unsatisfied_deps.pop()
4169 - matches = vardb.match_pkgs(dep.atom)
4170 - if not matches:
4171 - self._initially_unsatisfied_deps.append(dep)
4172 - continue
4173 - # An scheduled installation broke a deep dependency.
4174 - # Add the installed package to the graph so that it
4175 - # will be appropriately reported as a slot collision
4176 - # (possibly solvable via backtracking).
4177 - pkg = matches[-1] # highest match
4178 - if not self._add_pkg(pkg, dep):
4179 - return 0
4180 - if not self._create_graph(allow_unsatisfied=True):
4181 - return 0
4182 - return 1
4183 -
4184 - def _pkg(self, cpv, type_name, root_config, installed=False):
4185 - """
4186 - Get a package instance from the cache, or create a new
4187 - one if necessary. Raises KeyError from aux_get if it
4188 - failures for some reason (package does not exist or is
4189 - corrupt).
4190 - """
4191 - operation = "merge"
4192 - if installed:
4193 - operation = "nomerge"
4194 - pkg = self._pkg_cache.get(
4195 - (type_name, root_config.root, cpv, operation))
4196 - if pkg is None:
4197 - tree_type = self.pkg_tree_map[type_name]
4198 - db = root_config.trees[tree_type].dbapi
4199 - db_keys = list(self._trees_orig[root_config.root][
4200 - tree_type].dbapi._aux_cache_keys)
4201 - try:
4202 - metadata = izip(db_keys, db.aux_get(cpv, db_keys))
4203 - except KeyError:
4204 - raise portage.exception.PackageNotFound(cpv)
4205 - pkg = Package(cpv=cpv, metadata=metadata,
4206 - root_config=root_config, installed=installed)
4207 - if type_name == "ebuild":
4208 - settings = self.pkgsettings[root_config.root]
4209 - settings.setcpv(pkg)
4210 - pkg.metadata["USE"] = settings["PORTAGE_USE"]
4211 - pkg.metadata['CHOST'] = settings.get('CHOST', '')
4212 - self._pkg_cache[pkg] = pkg
4213 - return pkg
4214 -
4215 - def validate_blockers(self):
4216 - """Remove any blockers from the digraph that do not match any of the
4217 - packages within the graph. If necessary, create hard deps to ensure
4218 - correct merge order such that mutually blocking packages are never
4219 - installed simultaneously."""
4220 -
4221 - if "--buildpkgonly" in self.myopts or \
4222 - "--nodeps" in self.myopts:
4223 - return True
4224 -
4225 - #if "deep" in self.myparams:
4226 - if True:
4227 - # Pull in blockers from all installed packages that haven't already
4228 - # been pulled into the depgraph. This is not enabled by default
4229 - # due to the performance penalty that is incurred by all the
4230 - # additional dep_check calls that are required.
4231 -
4232 - dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
4233 - for myroot in self.trees:
4234 - vardb = self.trees[myroot]["vartree"].dbapi
4235 - portdb = self.trees[myroot]["porttree"].dbapi
4236 - pkgsettings = self.pkgsettings[myroot]
4237 - final_db = self.mydbapi[myroot]
4238 -
4239 - blocker_cache = BlockerCache(myroot, vardb)
4240 - stale_cache = set(blocker_cache)
4241 - for pkg in vardb:
4242 - cpv = pkg.cpv
4243 - stale_cache.discard(cpv)
4244 - pkg_in_graph = self.digraph.contains(pkg)
4245 -
4246 - # Check for masked installed packages. Only warn about
4247 - # packages that are in the graph in order to avoid warning
4248 - # about those that will be automatically uninstalled during
4249 - # the merge process or by --depclean.
4250 - if pkg in final_db:
4251 - if pkg_in_graph and not visible(pkgsettings, pkg):
4252 - self._masked_installed.add(pkg)
4253 -
4254 - blocker_atoms = None
4255 - blockers = None
4256 - if pkg_in_graph:
4257 - blockers = []
4258 - try:
4259 - blockers.extend(
4260 - self._blocker_parents.child_nodes(pkg))
4261 - except KeyError:
4262 - pass
4263 - try:
4264 - blockers.extend(
4265 - self._irrelevant_blockers.child_nodes(pkg))
4266 - except KeyError:
4267 - pass
4268 - if blockers is not None:
4269 - blockers = set(str(blocker.atom) \
4270 - for blocker in blockers)
4271 -
4272 - # If this node has any blockers, create a "nomerge"
4273 - # node for it so that they can be enforced.
4274 - self.spinner.update()
4275 - blocker_data = blocker_cache.get(cpv)
4276 - if blocker_data is not None and \
4277 - blocker_data.counter != long(pkg.metadata["COUNTER"]):
4278 - blocker_data = None
4279 -
4280 - # If blocker data from the graph is available, use
4281 - # it to validate the cache and update the cache if
4282 - # it seems invalid.
4283 - if blocker_data is not None and \
4284 - blockers is not None:
4285 - if not blockers.symmetric_difference(
4286 - blocker_data.atoms):
4287 - continue
4288 - blocker_data = None
4289 -
4290 - if blocker_data is None and \
4291 - blockers is not None:
4292 - # Re-use the blockers from the graph.
4293 - blocker_atoms = sorted(blockers)
4294 - counter = long(pkg.metadata["COUNTER"])
4295 - blocker_data = \
4296 - blocker_cache.BlockerData(counter, blocker_atoms)
4297 - blocker_cache[pkg.cpv] = blocker_data
4298 - continue
4299 -
4300 - if blocker_data:
4301 - blocker_atoms = blocker_data.atoms
4302 - else:
4303 - # Use aux_get() to trigger FakeVartree global
4304 - # updates on *DEPEND when appropriate.
4305 - depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
4306 - # It is crucial to pass in final_db here in order to
4307 - # optimize dep_check calls by eliminating atoms via
4308 - # dep_wordreduce and dep_eval calls.
4309 - try:
4310 - portage.dep._dep_check_strict = False
4311 - try:
4312 - success, atoms = portage.dep_check(depstr,
4313 - final_db, pkgsettings, myuse=pkg.use.enabled,
4314 - trees=self._graph_trees, myroot=myroot)
4315 - except Exception, e:
4316 - if isinstance(e, SystemExit):
4317 - raise
4318 - # This is helpful, for example, if a ValueError
4319 - # is thrown from cpv_expand due to multiple
4320 - # matches (this can happen if an atom lacks a
4321 - # category).
4322 - show_invalid_depstring_notice(
4323 - pkg, depstr, str(e))
4324 - del e
4325 - raise
4326 - finally:
4327 - portage.dep._dep_check_strict = True
4328 - if not success:
4329 - replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
4330 - if replacement_pkg and \
4331 - replacement_pkg[0].operation == "merge":
4332 - # This package is being replaced anyway, so
4333 - # ignore invalid dependencies so as not to
4334 - # annoy the user too much (otherwise they'd be
4335 - # forced to manually unmerge it first).
4336 - continue
4337 - show_invalid_depstring_notice(pkg, depstr, atoms)
4338 - return False
4339 - blocker_atoms = [myatom for myatom in atoms \
4340 - if myatom.startswith("!")]
4341 - blocker_atoms.sort()
4342 - counter = long(pkg.metadata["COUNTER"])
4343 - blocker_cache[cpv] = \
4344 - blocker_cache.BlockerData(counter, blocker_atoms)
4345 - if blocker_atoms:
4346 - try:
4347 - for atom in blocker_atoms:
4348 - blocker = Blocker(atom=portage.dep.Atom(atom),
4349 - eapi=pkg.metadata["EAPI"], root=myroot)
4350 - self._blocker_parents.add(blocker, pkg)
4351 - except portage.exception.InvalidAtom, e:
4352 - depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
4353 - show_invalid_depstring_notice(
4354 - pkg, depstr, "Invalid Atom: %s" % (e,))
4355 - return False
4356 - for cpv in stale_cache:
4357 - del blocker_cache[cpv]
4358 - blocker_cache.flush()
4359 - del blocker_cache
4360 -
4361 - # Discard any "uninstall" tasks scheduled by previous calls
4362 - # to this method, since those tasks may not make sense given
4363 - # the current graph state.
4364 - previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
4365 - if previous_uninstall_tasks:
4366 - self._blocker_uninstalls = digraph()
4367 - self.digraph.difference_update(previous_uninstall_tasks)
4368 -
4369 - for blocker in self._blocker_parents.leaf_nodes():
4370 - self.spinner.update()
4371 - root_config = self.roots[blocker.root]
4372 - virtuals = root_config.settings.getvirtuals()
4373 - myroot = blocker.root
4374 - initial_db = self.trees[myroot]["vartree"].dbapi
4375 - final_db = self.mydbapi[myroot]
4376 -
4377 - provider_virtual = False
4378 - if blocker.cp in virtuals and \
4379 - not self._have_new_virt(blocker.root, blocker.cp):
4380 - provider_virtual = True
4381 -
4382 - # Use this to check PROVIDE for each matched package
4383 - # when necessary.
4384 - atom_set = InternalPackageSet(
4385 - initial_atoms=[blocker.atom])
4386 -
4387 - if provider_virtual:
4388 - atoms = []
4389 - for provider_entry in virtuals[blocker.cp]:
4390 - provider_cp = \
4391 - portage.dep_getkey(provider_entry)
4392 - atoms.append(blocker.atom.replace(
4393 - blocker.cp, provider_cp))
4394 - else:
4395 - atoms = [blocker.atom]
4396 -
4397 - blocked_initial = set()
4398 - for atom in atoms:
4399 - for pkg in initial_db.match_pkgs(atom):
4400 - if atom_set.findAtomForPackage(pkg):
4401 - blocked_initial.add(pkg)
4402 -
4403 - blocked_final = set()
4404 - for atom in atoms:
4405 - for pkg in final_db.match_pkgs(atom):
4406 - if atom_set.findAtomForPackage(pkg):
4407 - blocked_final.add(pkg)
4408 -
4409 - if not blocked_initial and not blocked_final:
4410 - parent_pkgs = self._blocker_parents.parent_nodes(blocker)
4411 - self._blocker_parents.remove(blocker)
4412 - # Discard any parents that don't have any more blockers.
4413 - for pkg in parent_pkgs:
4414 - self._irrelevant_blockers.add(blocker, pkg)
4415 - if not self._blocker_parents.child_nodes(pkg):
4416 - self._blocker_parents.remove(pkg)
4417 - continue
4418 - for parent in self._blocker_parents.parent_nodes(blocker):
4419 - unresolved_blocks = False
4420 - depends_on_order = set()
4421 - for pkg in blocked_initial:
4422 - if pkg.slot_atom == parent.slot_atom:
4423 - # TODO: Support blocks within slots in cases where it
4424 - # might make sense. For example, a new version might
4425 - # require that the old version be uninstalled at build
4426 - # time.
4427 - continue
4428 - if parent.installed:
4429 - # Two currently installed packages conflict with
4430 - # eachother. Ignore this case since the damage
4431 - # is already done and this would be likely to
4432 - # confuse users if displayed like a normal blocker.
4433 - continue
4434 -
4435 - self._blocked_pkgs.add(pkg, blocker)
4436 -
4437 - if parent.operation == "merge":
4438 - # Maybe the blocked package can be replaced or simply
4439 - # unmerged to resolve this block.
4440 - depends_on_order.add((pkg, parent))
4441 - continue
4442 - # None of the above blocker resolutions techniques apply,
4443 - # so apparently this one is unresolvable.
4444 - unresolved_blocks = True
4445 - for pkg in blocked_final:
4446 - if pkg.slot_atom == parent.slot_atom:
4447 - # TODO: Support blocks within slots.
4448 - continue
4449 - if parent.operation == "nomerge" and \
4450 - pkg.operation == "nomerge":
4451 - # This blocker will be handled the next time that a
4452 - # merge of either package is triggered.
4453 - continue
4454 -
4455 - self._blocked_pkgs.add(pkg, blocker)
4456 -
4457 - # Maybe the blocking package can be
4458 - # unmerged to resolve this block.
4459 - if parent.operation == "merge" and pkg.installed:
4460 - depends_on_order.add((pkg, parent))
4461 - continue
4462 - elif parent.operation == "nomerge":
4463 - depends_on_order.add((parent, pkg))
4464 - continue
4465 - # None of the above blocker resolutions techniques apply,
4466 - # so apparently this one is unresolvable.
4467 - unresolved_blocks = True
4468 -
4469 - # Make sure we don't unmerge any package that have been pulled
4470 - # into the graph.
4471 - if not unresolved_blocks and depends_on_order:
4472 - for inst_pkg, inst_task in depends_on_order:
4473 - if self.digraph.contains(inst_pkg) and \
4474 - self.digraph.parent_nodes(inst_pkg):
4475 - unresolved_blocks = True
4476 - break
4477 -
4478 - if not unresolved_blocks and depends_on_order:
4479 - for inst_pkg, inst_task in depends_on_order:
4480 - uninst_task = Package(built=inst_pkg.built,
4481 - cpv=inst_pkg.cpv, installed=inst_pkg.installed,
4482 - metadata=inst_pkg.metadata,
4483 - operation="uninstall",
4484 - root_config=inst_pkg.root_config,
4485 - type_name=inst_pkg.type_name)
4486 - self._pkg_cache[uninst_task] = uninst_task
4487 - # Enforce correct merge order with a hard dep.
4488 - self.digraph.addnode(uninst_task, inst_task,
4489 - priority=BlockerDepPriority.instance)
4490 - # Count references to this blocker so that it can be
4491 - # invalidated after nodes referencing it have been
4492 - # merged.
4493 - self._blocker_uninstalls.addnode(uninst_task, blocker)
4494 - if not unresolved_blocks and not depends_on_order:
4495 - self._irrelevant_blockers.add(blocker, parent)
4496 - self._blocker_parents.remove_edge(blocker, parent)
4497 - if not self._blocker_parents.parent_nodes(blocker):
4498 - self._blocker_parents.remove(blocker)
4499 - if not self._blocker_parents.child_nodes(parent):
4500 - self._blocker_parents.remove(parent)
4501 - if unresolved_blocks:
4502 - self._unsolvable_blockers.add(blocker, parent)
4503 -
4504 - return True
4505 -
4506 - def _accept_blocker_conflicts(self):
4507 - acceptable = False
4508 - for x in ("--buildpkgonly", "--fetchonly",
4509 - "--fetch-all-uri", "--nodeps"):
4510 - if x in self.myopts:
4511 - acceptable = True
4512 - break
4513 - return acceptable
4514 -
4515 - def _merge_order_bias(self, mygraph):
4516 - """
4517 - For optimal leaf node selection, promote deep system runtime deps and
4518 - order nodes from highest to lowest overall reference count.
4519 - """
4520 -
4521 - node_info = {}
4522 - for node in mygraph.order:
4523 - node_info[node] = len(mygraph.parent_nodes(node))
4524 - deep_system_deps = _find_deep_system_runtime_deps(mygraph)
4525 -
4526 - def cmp_merge_preference(node1, node2):
4527 -
4528 - if node1.operation == 'uninstall':
4529 - if node2.operation == 'uninstall':
4530 - return 0
4531 - return 1
4532 -
4533 - if node2.operation == 'uninstall':
4534 - if node1.operation == 'uninstall':
4535 - return 0
4536 - return -1
4537 -
4538 - node1_sys = node1 in deep_system_deps
4539 - node2_sys = node2 in deep_system_deps
4540 - if node1_sys != node2_sys:
4541 - if node1_sys:
4542 - return -1
4543 - return 1
4544 -
4545 - return node_info[node2] - node_info[node1]
4546 -
4547 - mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
4548 -
4549 - def altlist(self, reversed=False):
4550 -
4551 - while self._serialized_tasks_cache is None:
4552 - self._resolve_conflicts()
4553 - try:
4554 - self._serialized_tasks_cache, self._scheduler_graph = \
4555 - self._serialize_tasks()
4556 - except self._serialize_tasks_retry:
4557 - pass
4558 -
4559 - retlist = self._serialized_tasks_cache[:]
4560 - if reversed:
4561 - retlist.reverse()
4562 - return retlist
4563 -
4564 - def schedulerGraph(self):
4565 - """
4566 - The scheduler graph is identical to the normal one except that
4567 - uninstall edges are reversed in specific cases that require
4568 - conflicting packages to be temporarily installed simultaneously.
4569 - This is intended for use by the Scheduler in it's parallelization
4570 - logic. It ensures that temporary simultaneous installation of
4571 - conflicting packages is avoided when appropriate (especially for
4572 - !!atom blockers), but allowed in specific cases that require it.
4573 -
4574 - Note that this method calls break_refs() which alters the state of
4575 - internal Package instances such that this depgraph instance should
4576 - not be used to perform any more calculations.
4577 - """
4578 - if self._scheduler_graph is None:
4579 - self.altlist()
4580 - self.break_refs(self._scheduler_graph.order)
4581 - return self._scheduler_graph
4582 -
4583 - def break_refs(self, nodes):
4584 - """
4585 - Take a mergelist like that returned from self.altlist() and
4586 - break any references that lead back to the depgraph. This is
4587 - useful if you want to hold references to packages without
4588 - also holding the depgraph on the heap.
4589 - """
4590 - for node in nodes:
4591 - if hasattr(node, "root_config"):
4592 - # The FakeVartree references the _package_cache which
4593 - # references the depgraph. So that Package instances don't
4594 - # hold the depgraph and FakeVartree on the heap, replace
4595 - # the RootConfig that references the FakeVartree with the
4596 - # original RootConfig instance which references the actual
4597 - # vartree.
4598 - node.root_config = \
4599 - self._trees_orig[node.root_config.root]["root_config"]
4600 -
4601 - def _resolve_conflicts(self):
4602 - if not self._complete_graph():
4603 - raise self._unknown_internal_error()
4604 -
4605 - if not self.validate_blockers():
4606 - raise self._unknown_internal_error()
4607 -
4608 - if self._slot_collision_info:
4609 - self._process_slot_conflicts()
4610 -
4611 - def _serialize_tasks(self):
4612 -
4613 - if "--debug" in self.myopts:
4614 - writemsg("\ndigraph:\n\n", noiselevel=-1)
4615 - self.digraph.debug_print()
4616 - writemsg("\n", noiselevel=-1)
4617 -
4618 - scheduler_graph = self.digraph.copy()
4619 -
4620 - if '--nodeps' in self.myopts:
4621 - # Preserve the package order given on the command line.
4622 - return ([node for node in scheduler_graph \
4623 - if isinstance(node, Package) \
4624 - and node.operation == 'merge'], scheduler_graph)
4625 -
4626 - mygraph=self.digraph.copy()
4627 - # Prune "nomerge" root nodes if nothing depends on them, since
4628 - # otherwise they slow down merge order calculation. Don't remove
4629 - # non-root nodes since they help optimize merge order in some cases
4630 - # such as revdep-rebuild.
4631 - removed_nodes = set()
4632 - while True:
4633 - for node in mygraph.root_nodes():
4634 - if not isinstance(node, Package) or \
4635 - node.installed or node.onlydeps:
4636 - removed_nodes.add(node)
4637 - if removed_nodes:
4638 - self.spinner.update()
4639 - mygraph.difference_update(removed_nodes)
4640 - if not removed_nodes:
4641 - break
4642 - removed_nodes.clear()
4643 - self._merge_order_bias(mygraph)
4644 - def cmp_circular_bias(n1, n2):
4645 - """
4646 - RDEPEND is stronger than PDEPEND and this function
4647 - measures such a strength bias within a circular
4648 - dependency relationship.
4649 - """
4650 - n1_n2_medium = n2 in mygraph.child_nodes(n1,
4651 - ignore_priority=priority_range.ignore_medium_soft)
4652 - n2_n1_medium = n1 in mygraph.child_nodes(n2,
4653 - ignore_priority=priority_range.ignore_medium_soft)
4654 - if n1_n2_medium == n2_n1_medium:
4655 - return 0
4656 - elif n1_n2_medium:
4657 - return 1
4658 - return -1
4659 - myblocker_uninstalls = self._blocker_uninstalls.copy()
4660 - retlist=[]
4661 - # Contains uninstall tasks that have been scheduled to
4662 - # occur after overlapping blockers have been installed.
4663 - scheduled_uninstalls = set()
4664 - # Contains any Uninstall tasks that have been ignored
4665 - # in order to avoid the circular deps code path. These
4666 - # correspond to blocker conflicts that could not be
4667 - # resolved.
4668 - ignored_uninstall_tasks = set()
4669 - have_uninstall_task = False
4670 - complete = "complete" in self.myparams
4671 - asap_nodes = []
4672 -
4673 - def get_nodes(**kwargs):
4674 - """
4675 - Returns leaf nodes excluding Uninstall instances
4676 - since those should be executed as late as possible.
4677 - """
4678 - return [node for node in mygraph.leaf_nodes(**kwargs) \
4679 - if isinstance(node, Package) and \
4680 - (node.operation != "uninstall" or \
4681 - node in scheduled_uninstalls)]
4682 -
4683 - # sys-apps/portage needs special treatment if ROOT="/"
4684 - running_root = self._running_root.root
4685 - from portage.const import PORTAGE_PACKAGE_ATOM
4686 - runtime_deps = InternalPackageSet(
4687 - initial_atoms=[PORTAGE_PACKAGE_ATOM])
4688 - running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
4689 - PORTAGE_PACKAGE_ATOM)
4690 - replacement_portage = self.mydbapi[running_root].match_pkgs(
4691 - PORTAGE_PACKAGE_ATOM)
4692 -
4693 - if running_portage:
4694 - running_portage = running_portage[0]
4695 - else:
4696 - running_portage = None
4697 -
4698 - if replacement_portage:
4699 - replacement_portage = replacement_portage[0]
4700 - else:
4701 - replacement_portage = None
4702 -
4703 - if replacement_portage == running_portage:
4704 - replacement_portage = None
4705 -
4706 - if replacement_portage is not None:
4707 - # update from running_portage to replacement_portage asap
4708 - asap_nodes.append(replacement_portage)
4709 -
4710 - if running_portage is not None:
4711 - try:
4712 - portage_rdepend = self._select_atoms_highest_available(
4713 - running_root, running_portage.metadata["RDEPEND"],
4714 - myuse=running_portage.use.enabled,
4715 - parent=running_portage, strict=False)
4716 - except portage.exception.InvalidDependString, e:
4717 - portage.writemsg("!!! Invalid RDEPEND in " + \
4718 - "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
4719 - (running_root, running_portage.cpv, e), noiselevel=-1)
4720 - del e
4721 - portage_rdepend = []
4722 - runtime_deps.update(atom for atom in portage_rdepend \
4723 - if not atom.startswith("!"))
4724 -
4725 - def gather_deps(ignore_priority, mergeable_nodes,
4726 - selected_nodes, node):
4727 - """
4728 - Recursively gather a group of nodes that RDEPEND on
4729 - eachother. This ensures that they are merged as a group
4730 - and get their RDEPENDs satisfied as soon as possible.
4731 - """
4732 - if node in selected_nodes:
4733 - return True
4734 - if node not in mergeable_nodes:
4735 - return False
4736 - if node == replacement_portage and \
4737 - mygraph.child_nodes(node,
4738 - ignore_priority=priority_range.ignore_medium_soft):
4739 - # Make sure that portage always has all of it's
4740 - # RDEPENDs installed first.
4741 - return False
4742 - selected_nodes.add(node)
4743 - for child in mygraph.child_nodes(node,
4744 - ignore_priority=ignore_priority):
4745 - if not gather_deps(ignore_priority,
4746 - mergeable_nodes, selected_nodes, child):
4747 - return False
4748 - return True
4749 -
4750 - def ignore_uninst_or_med(priority):
4751 - if priority is BlockerDepPriority.instance:
4752 - return True
4753 - return priority_range.ignore_medium(priority)
4754 -
4755 - def ignore_uninst_or_med_soft(priority):
4756 - if priority is BlockerDepPriority.instance:
4757 - return True
4758 - return priority_range.ignore_medium_soft(priority)
4759 -
4760 - tree_mode = "--tree" in self.myopts
4761 - # Tracks whether or not the current iteration should prefer asap_nodes
4762 - # if available. This is set to False when the previous iteration
4763 - # failed to select any nodes. It is reset whenever nodes are
4764 - # successfully selected.
4765 - prefer_asap = True
4766 -
4767 - # Controls whether or not the current iteration should drop edges that
4768 - # are "satisfied" by installed packages, in order to solve circular
4769 - # dependencies. The deep runtime dependencies of installed packages are
4770 - # not checked in this case (bug #199856), so it must be avoided
4771 - # whenever possible.
4772 - drop_satisfied = False
4773 -
4774 - # State of variables for successive iterations that loosen the
4775 - # criteria for node selection.
4776 - #
4777 - # iteration prefer_asap drop_satisfied
4778 - # 1 True False
4779 - # 2 False False
4780 - # 3 False True
4781 - #
4782 - # If no nodes are selected on the last iteration, it is due to
4783 - # unresolved blockers or circular dependencies.
4784 -
4785 - while not mygraph.empty():
4786 - self.spinner.update()
4787 - selected_nodes = None
4788 - ignore_priority = None
4789 - if drop_satisfied or (prefer_asap and asap_nodes):
4790 - priority_range = DepPrioritySatisfiedRange
4791 - else:
4792 - priority_range = DepPriorityNormalRange
4793 - if prefer_asap and asap_nodes:
4794 - # ASAP nodes are merged before their soft deps. Go ahead and
4795 - # select root nodes here if necessary, since it's typical for
4796 - # the parent to have been removed from the graph already.
4797 - asap_nodes = [node for node in asap_nodes \
4798 - if mygraph.contains(node)]
4799 - for node in asap_nodes:
4800 - if not mygraph.child_nodes(node,
4801 - ignore_priority=priority_range.ignore_soft):
4802 - selected_nodes = [node]
4803 - asap_nodes.remove(node)
4804 - break
4805 - if not selected_nodes and \
4806 - not (prefer_asap and asap_nodes):
4807 - for i in xrange(priority_range.NONE,
4808 - priority_range.MEDIUM_SOFT + 1):
4809 - ignore_priority = priority_range.ignore_priority[i]
4810 - nodes = get_nodes(ignore_priority=ignore_priority)
4811 - if nodes:
4812 - # If there is a mix of uninstall nodes with other
4813 - # types, save the uninstall nodes for later since
4814 - # sometimes a merge node will render an uninstall
4815 - # node unnecessary (due to occupying the same slot),
4816 - # and we want to avoid executing a separate uninstall
4817 - # task in that case.
4818 - if len(nodes) > 1:
4819 - good_uninstalls = []
4820 - with_some_uninstalls_excluded = []
4821 - for node in nodes:
4822 - if node.operation == "uninstall":
4823 - slot_node = self.mydbapi[node.root
4824 - ].match_pkgs(node.slot_atom)
4825 - if slot_node and \
4826 - slot_node[0].operation == "merge":
4827 - continue
4828 - good_uninstalls.append(node)
4829 - with_some_uninstalls_excluded.append(node)
4830 - if good_uninstalls:
4831 - nodes = good_uninstalls
4832 - elif with_some_uninstalls_excluded:
4833 - nodes = with_some_uninstalls_excluded
4834 - else:
4835 - nodes = nodes
4836 -
4837 - if ignore_priority is None and not tree_mode:
4838 - # Greedily pop all of these nodes since no
4839 - # relationship has been ignored. This optimization
4840 - # destroys --tree output, so it's disabled in tree
4841 - # mode.
4842 - selected_nodes = nodes
4843 - else:
4844 - # For optimal merge order:
4845 - # * Only pop one node.
4846 - # * Removing a root node (node without a parent)
4847 - # will not produce a leaf node, so avoid it.
4848 - # * It's normal for a selected uninstall to be a
4849 - # root node, so don't check them for parents.
4850 - for node in nodes:
4851 - if node.operation == "uninstall" or \
4852 - mygraph.parent_nodes(node):
4853 - selected_nodes = [node]
4854 - break
4855 -
4856 - if selected_nodes:
4857 - break
4858 -
4859 - if not selected_nodes:
4860 - nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
4861 - if nodes:
4862 - mergeable_nodes = set(nodes)
4863 - if prefer_asap and asap_nodes:
4864 - nodes = asap_nodes
4865 - for i in xrange(priority_range.SOFT,
4866 - priority_range.MEDIUM_SOFT + 1):
4867 - ignore_priority = priority_range.ignore_priority[i]
4868 - for node in nodes:
4869 - if not mygraph.parent_nodes(node):
4870 - continue
4871 - selected_nodes = set()
4872 - if gather_deps(ignore_priority,
4873 - mergeable_nodes, selected_nodes, node):
4874 - break
4875 - else:
4876 - selected_nodes = None
4877 - if selected_nodes:
4878 - break
4879 -
4880 - if prefer_asap and asap_nodes and not selected_nodes:
4881 - # We failed to find any asap nodes to merge, so ignore
4882 - # them for the next iteration.
4883 - prefer_asap = False
4884 - continue
4885 -
4886 - if selected_nodes and ignore_priority is not None:
4887 - # Try to merge ignored medium_soft deps as soon as possible
4888 - # if they're not satisfied by installed packages.
4889 - for node in selected_nodes:
4890 - children = set(mygraph.child_nodes(node))
4891 - soft = children.difference(
4892 - mygraph.child_nodes(node,
4893 - ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
4894 - medium_soft = children.difference(
4895 - mygraph.child_nodes(node,
4896 - ignore_priority = \
4897 - DepPrioritySatisfiedRange.ignore_medium_soft))
4898 - medium_soft.difference_update(soft)
4899 - for child in medium_soft:
4900 - if child in selected_nodes:
4901 - continue
4902 - if child in asap_nodes:
4903 - continue
4904 - asap_nodes.append(child)
4905 -
4906 - if selected_nodes and len(selected_nodes) > 1:
4907 - if not isinstance(selected_nodes, list):
4908 - selected_nodes = list(selected_nodes)
4909 - selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
4910 -
4911 - if not selected_nodes and not myblocker_uninstalls.is_empty():
4912 - # An Uninstall task needs to be executed in order to
4913 - # avoid conflict if possible.
4914 -
4915 - if drop_satisfied:
4916 - priority_range = DepPrioritySatisfiedRange
4917 - else:
4918 - priority_range = DepPriorityNormalRange
4919 -
4920 - mergeable_nodes = get_nodes(
4921 - ignore_priority=ignore_uninst_or_med)
4922 -
4923 - min_parent_deps = None
4924 - uninst_task = None
4925 - for task in myblocker_uninstalls.leaf_nodes():
4926 - # Do some sanity checks so that system or world packages
4927 - # don't get uninstalled inappropriately here (only really
4928 - # necessary when --complete-graph has not been enabled).
4929 -
4930 - if task in ignored_uninstall_tasks:
4931 - continue
4932 -
4933 - if task in scheduled_uninstalls:
4934 - # It's been scheduled but it hasn't
4935 - # been executed yet due to dependence
4936 - # on installation of blocking packages.
4937 - continue
4938 -
4939 - root_config = self.roots[task.root]
4940 - inst_pkg = self._pkg_cache[
4941 - ("installed", task.root, task.cpv, "nomerge")]
4942 -
4943 - if self.digraph.contains(inst_pkg):
4944 - continue
4945 -
4946 - forbid_overlap = False
4947 - heuristic_overlap = False
4948 - for blocker in myblocker_uninstalls.parent_nodes(task):
4949 - if blocker.eapi in ("0", "1"):
4950 - heuristic_overlap = True
4951 - elif blocker.atom.blocker.overlap.forbid:
4952 - forbid_overlap = True
4953 - break
4954 - if forbid_overlap and running_root == task.root:
4955 - continue
4956 -
4957 - if heuristic_overlap and running_root == task.root:
4958 - # Never uninstall sys-apps/portage or it's essential
4959 - # dependencies, except through replacement.
4960 - try:
4961 - runtime_dep_atoms = \
4962 - list(runtime_deps.iterAtomsForPackage(task))
4963 - except portage.exception.InvalidDependString, e:
4964 - portage.writemsg("!!! Invalid PROVIDE in " + \
4965 - "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
4966 - (task.root, task.cpv, e), noiselevel=-1)
4967 - del e
4968 - continue
4969 -
4970 - # Don't uninstall a runtime dep if it appears
4971 - # to be the only suitable one installed.
4972 - skip = False
4973 - vardb = root_config.trees["vartree"].dbapi
4974 - for atom in runtime_dep_atoms:
4975 - other_version = None
4976 - for pkg in vardb.match_pkgs(atom):
4977 - if pkg.cpv == task.cpv and \
4978 - pkg.metadata["COUNTER"] == \
4979 - task.metadata["COUNTER"]:
4980 - continue
4981 - other_version = pkg
4982 - break
4983 - if other_version is None:
4984 - skip = True
4985 - break
4986 - if skip:
4987 - continue
4988 -
4989 - # For packages in the system set, don't take
4990 - # any chances. If the conflict can't be resolved
4991 - # by a normal replacement operation then abort.
4992 - skip = False
4993 - try:
4994 - for atom in root_config.sets[
4995 - "system"].iterAtomsForPackage(task):
4996 - skip = True
4997 - break
4998 - except portage.exception.InvalidDependString, e:
4999 - portage.writemsg("!!! Invalid PROVIDE in " + \
5000 - "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
5001 - (task.root, task.cpv, e), noiselevel=-1)
5002 - del e
5003 - skip = True
5004 - if skip:
5005 - continue
5006 -
5007 - # Note that the world check isn't always
5008 - # necessary since self._complete_graph() will
5009 - # add all packages from the system and world sets to the
5010 - # graph. This just allows unresolved conflicts to be
5011 - # detected as early as possible, which makes it possible
5012 - # to avoid calling self._complete_graph() when it is
5013 - # unnecessary due to blockers triggering an abortion.
5014 - if not complete:
5015 - # For packages in the world set, go ahead an uninstall
5016 - # when necessary, as long as the atom will be satisfied
5017 - # in the final state.
5018 - graph_db = self.mydbapi[task.root]
5019 - skip = False
5020 - try:
5021 - for atom in root_config.sets[
5022 - "world"].iterAtomsForPackage(task):
5023 - satisfied = False
5024 - for pkg in graph_db.match_pkgs(atom):
5025 - if pkg == inst_pkg:
5026 - continue
5027 - satisfied = True
5028 - break
5029 - if not satisfied:
5030 - skip = True
5031 - self._blocked_world_pkgs[inst_pkg] = atom
5032 - break
5033 - except portage.exception.InvalidDependString, e:
5034 - portage.writemsg("!!! Invalid PROVIDE in " + \
5035 - "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
5036 - (task.root, task.cpv, e), noiselevel=-1)
5037 - del e
5038 - skip = True
5039 - if skip:
5040 - continue
5041 -
5042 - # Check the deps of parent nodes to ensure that
5043 - # the chosen task produces a leaf node. Maybe
5044 - # this can be optimized some more to make the
5045 - # best possible choice, but the current algorithm
5046 - # is simple and should be near optimal for most
5047 - # common cases.
5048 - mergeable_parent = False
5049 - parent_deps = set()
5050 - for parent in mygraph.parent_nodes(task):
5051 - parent_deps.update(mygraph.child_nodes(parent,
5052 - ignore_priority=priority_range.ignore_medium_soft))
5053 - if parent in mergeable_nodes and \
5054 - gather_deps(ignore_uninst_or_med_soft,
5055 - mergeable_nodes, set(), parent):
5056 - mergeable_parent = True
5057 -
5058 - if not mergeable_parent:
5059 - continue
5060 -
5061 - parent_deps.remove(task)
5062 - if min_parent_deps is None or \
5063 - len(parent_deps) < min_parent_deps:
5064 - min_parent_deps = len(parent_deps)
5065 - uninst_task = task
5066 -
5067 - if uninst_task is not None:
5068 - # The uninstall is performed only after blocking
5069 - # packages have been merged on top of it. File
5070 - # collisions between blocking packages are detected
5071 - # and removed from the list of files to be uninstalled.
5072 - scheduled_uninstalls.add(uninst_task)
5073 - parent_nodes = mygraph.parent_nodes(uninst_task)
5074 -
5075 - # Reverse the parent -> uninstall edges since we want
5076 - # to do the uninstall after blocking packages have
5077 - # been merged on top of it.
5078 - mygraph.remove(uninst_task)
5079 - for blocked_pkg in parent_nodes:
5080 - mygraph.add(blocked_pkg, uninst_task,
5081 - priority=BlockerDepPriority.instance)
5082 - scheduler_graph.remove_edge(uninst_task, blocked_pkg)
5083 - scheduler_graph.add(blocked_pkg, uninst_task,
5084 - priority=BlockerDepPriority.instance)
5085 -
5086 - # Reset the state variables for leaf node selection and
5087 - # continue trying to select leaf nodes.
5088 - prefer_asap = True
5089 - drop_satisfied = False
5090 - continue
5091 -
5092 - if not selected_nodes:
5093 - # Only select root nodes as a last resort. This case should
5094 - # only trigger when the graph is nearly empty and the only
5095 - # remaining nodes are isolated (no parents or children). Since
5096 - # the nodes must be isolated, ignore_priority is not needed.
5097 - selected_nodes = get_nodes()
5098 -
5099 - if not selected_nodes and not drop_satisfied:
5100 - drop_satisfied = True
5101 - continue
5102 -
5103 - if not selected_nodes and not myblocker_uninstalls.is_empty():
5104 - # If possible, drop an uninstall task here in order to avoid
5105 - # the circular deps code path. The corresponding blocker will
5106 - # still be counted as an unresolved conflict.
5107 - uninst_task = None
5108 - for node in myblocker_uninstalls.leaf_nodes():
5109 - try:
5110 - mygraph.remove(node)
5111 - except KeyError:
5112 - pass
5113 - else:
5114 - uninst_task = node
5115 - ignored_uninstall_tasks.add(node)
5116 - break
5117 -
5118 - if uninst_task is not None:
5119 - # Reset the state variables for leaf node selection and
5120 - # continue trying to select leaf nodes.
5121 - prefer_asap = True
5122 - drop_satisfied = False
5123 - continue
5124 -
5125 - if not selected_nodes:
5126 - self._circular_deps_for_display = mygraph
5127 - raise self._unknown_internal_error()
5128 -
5129 - # At this point, we've succeeded in selecting one or more nodes, so
5130 - # reset state variables for leaf node selection.
5131 - prefer_asap = True
5132 - drop_satisfied = False
5133 -
5134 - mygraph.difference_update(selected_nodes)
5135 -
5136 - for node in selected_nodes:
5137 - if isinstance(node, Package) and \
5138 - node.operation == "nomerge":
5139 - continue
5140 -
5141 - # Handle interactions between blockers
5142 - # and uninstallation tasks.
5143 - solved_blockers = set()
5144 - uninst_task = None
5145 - if isinstance(node, Package) and \
5146 - "uninstall" == node.operation:
5147 - have_uninstall_task = True
5148 - uninst_task = node
5149 - else:
5150 - vardb = self.trees[node.root]["vartree"].dbapi
5151 - previous_cpv = vardb.match(node.slot_atom)
5152 - if previous_cpv:
5153 - # The package will be replaced by this one, so remove
5154 - # the corresponding Uninstall task if necessary.
5155 - previous_cpv = previous_cpv[0]
5156 - uninst_task = \
5157 - ("installed", node.root, previous_cpv, "uninstall")
5158 - try:
5159 - mygraph.remove(uninst_task)
5160 - except KeyError:
5161 - pass
5162 -
5163 - if uninst_task is not None and \
5164 - uninst_task not in ignored_uninstall_tasks and \
5165 - myblocker_uninstalls.contains(uninst_task):
5166 - blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
5167 - myblocker_uninstalls.remove(uninst_task)
5168 - # Discard any blockers that this Uninstall solves.
5169 - for blocker in blocker_nodes:
5170 - if not myblocker_uninstalls.child_nodes(blocker):
5171 - myblocker_uninstalls.remove(blocker)
5172 - solved_blockers.add(blocker)
5173 -
5174 - retlist.append(node)
5175 -
5176 - if (isinstance(node, Package) and \
5177 - "uninstall" == node.operation) or \
5178 - (uninst_task is not None and \
5179 - uninst_task in scheduled_uninstalls):
5180 - # Include satisfied blockers in the merge list
5181 - # since the user might be interested and also
5182 - # it serves as an indicator that blocking packages
5183 - # will be temporarily installed simultaneously.
5184 - for blocker in solved_blockers:
5185 - retlist.append(Blocker(atom=blocker.atom,
5186 - root=blocker.root, eapi=blocker.eapi,
5187 - satisfied=True))
5188 -
5189 - unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
5190 - for node in myblocker_uninstalls.root_nodes():
5191 - unsolvable_blockers.add(node)
5192 -
5193 - for blocker in unsolvable_blockers:
5194 - retlist.append(blocker)
5195 -
5196 - # If any Uninstall tasks need to be executed in order
5197 - # to avoid a conflict, complete the graph with any
5198 - # dependencies that may have been initially
5199 - # neglected (to ensure that unsafe Uninstall tasks
5200 - # are properly identified and blocked from execution).
5201 - if have_uninstall_task and \
5202 - not complete and \
5203 - not unsolvable_blockers:
5204 - self.myparams.add("complete")
5205 - raise self._serialize_tasks_retry("")
5206 -
5207 - if unsolvable_blockers and \
5208 - not self._accept_blocker_conflicts():
5209 - self._unsatisfied_blockers_for_display = unsolvable_blockers
5210 - self._serialized_tasks_cache = retlist[:]
5211 - self._scheduler_graph = scheduler_graph
5212 - raise self._unknown_internal_error()
5213 -
5214 - if self._slot_collision_info and \
5215 - not self._accept_blocker_conflicts():
5216 - self._serialized_tasks_cache = retlist[:]
5217 - self._scheduler_graph = scheduler_graph
5218 - raise self._unknown_internal_error()
5219 -
5220 - return retlist, scheduler_graph
5221 -
5222 - def _show_circular_deps(self, mygraph):
5223 - # No leaf nodes are available, so we have a circular
5224 - # dependency panic situation. Reduce the noise level to a
5225 - # minimum via repeated elimination of root nodes since they
5226 - # have no parents and thus can not be part of a cycle.
5227 - while True:
5228 - root_nodes = mygraph.root_nodes(
5229 - ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
5230 - if not root_nodes:
5231 - break
5232 - mygraph.difference_update(root_nodes)
5233 - # Display the USE flags that are enabled on nodes that are part
5234 - # of dependency cycles in case that helps the user decide to
5235 - # disable some of them.
5236 - display_order = []
5237 - tempgraph = mygraph.copy()
5238 - while not tempgraph.empty():
5239 - nodes = tempgraph.leaf_nodes()
5240 - if not nodes:
5241 - node = tempgraph.order[0]
5242 - else:
5243 - node = nodes[0]
5244 - display_order.append(node)
5245 - tempgraph.remove(node)
5246 - display_order.reverse()
5247 - self.myopts.pop("--quiet", None)
5248 - self.myopts.pop("--verbose", None)
5249 - self.myopts["--tree"] = True
5250 - portage.writemsg("\n\n", noiselevel=-1)
5251 - self.display(display_order)
5252 - prefix = colorize("BAD", " * ")
5253 - portage.writemsg("\n", noiselevel=-1)
5254 - portage.writemsg(prefix + "Error: circular dependencies:\n",
5255 - noiselevel=-1)
5256 - portage.writemsg("\n", noiselevel=-1)
5257 - mygraph.debug_print()
5258 - portage.writemsg("\n", noiselevel=-1)
5259 - portage.writemsg(prefix + "Note that circular dependencies " + \
5260 - "can often be avoided by temporarily\n", noiselevel=-1)
5261 - portage.writemsg(prefix + "disabling USE flags that trigger " + \
5262 - "optional dependencies.\n", noiselevel=-1)
5263 -
5264 - def _show_merge_list(self):
5265 - if self._serialized_tasks_cache is not None and \
5266 - not (self._displayed_list and \
5267 - (self._displayed_list == self._serialized_tasks_cache or \
5268 - self._displayed_list == \
5269 - list(reversed(self._serialized_tasks_cache)))):
5270 - display_list = self._serialized_tasks_cache[:]
5271 - if "--tree" in self.myopts:
5272 - display_list.reverse()
5273 - self.display(display_list)
5274 -
5275 - def _show_unsatisfied_blockers(self, blockers):
5276 - self._show_merge_list()
5277 - msg = "Error: The above package list contains " + \
5278 - "packages which cannot be installed " + \
5279 - "at the same time on the same system."
5280 - prefix = colorize("BAD", " * ")
5281 - from textwrap import wrap
5282 - portage.writemsg("\n", noiselevel=-1)
5283 - for line in wrap(msg, 70):
5284 - portage.writemsg(prefix + line + "\n", noiselevel=-1)
5285 -
5286 - # Display the conflicting packages along with the packages
5287 - # that pulled them in. This is helpful for troubleshooting
5288 - # cases in which blockers don't solve automatically and
5289 - # the reasons are not apparent from the normal merge list
5290 - # display.
5291 -
5292 - conflict_pkgs = {}
5293 - for blocker in blockers:
5294 - for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
5295 - self._blocker_parents.parent_nodes(blocker)):
5296 - parent_atoms = self._parent_atoms.get(pkg)
5297 - if not parent_atoms:
5298 - atom = self._blocked_world_pkgs.get(pkg)
5299 - if atom is not None:
5300 - parent_atoms = set([("@world", atom)])
5301 - if parent_atoms:
5302 - conflict_pkgs[pkg] = parent_atoms
5303 -
5304 - if conflict_pkgs:
5305 - # Reduce noise by pruning packages that are only
5306 - # pulled in by other conflict packages.
5307 - pruned_pkgs = set()
5308 - for pkg, parent_atoms in conflict_pkgs.iteritems():
5309 - relevant_parent = False
5310 - for parent, atom in parent_atoms:
5311 - if parent not in conflict_pkgs:
5312 - relevant_parent = True
5313 - break
5314 - if not relevant_parent:
5315 - pruned_pkgs.add(pkg)
5316 - for pkg in pruned_pkgs:
5317 - del conflict_pkgs[pkg]
5318 -
5319 - if conflict_pkgs:
5320 - msg = []
5321 - msg.append("\n")
5322 - indent = " "
5323 - # Max number of parents shown, to avoid flooding the display.
5324 - max_parents = 3
5325 - for pkg, parent_atoms in conflict_pkgs.iteritems():
5326 -
5327 - pruned_list = set()
5328 -
5329 - # Prefer packages that are not directly involved in a conflict.
5330 - for parent_atom in parent_atoms:
5331 - if len(pruned_list) >= max_parents:
5332 - break
5333 - parent, atom = parent_atom
5334 - if parent not in conflict_pkgs:
5335 - pruned_list.add(parent_atom)
5336 -
5337 - for parent_atom in parent_atoms:
5338 - if len(pruned_list) >= max_parents:
5339 - break
5340 - pruned_list.add(parent_atom)
5341 -
5342 - omitted_parents = len(parent_atoms) - len(pruned_list)
5343 - msg.append(indent + "%s pulled in by\n" % pkg)
5344 -
5345 - for parent_atom in pruned_list:
5346 - parent, atom = parent_atom
5347 - msg.append(2*indent)
5348 - if isinstance(parent,
5349 - (PackageArg, AtomArg)):
5350 - # For PackageArg and AtomArg types, it's
5351 - # redundant to display the atom attribute.
5352 - msg.append(str(parent))
5353 - else:
5354 - # Display the specific atom from SetArg or
5355 - # Package types.
5356 - msg.append("%s required by %s" % (atom, parent))
5357 - msg.append("\n")
5358 -
5359 - if omitted_parents:
5360 - msg.append(2*indent)
5361 - msg.append("(and %d more)\n" % omitted_parents)
5362 -
5363 - msg.append("\n")
5364 -
5365 - sys.stderr.write("".join(msg))
5366 - sys.stderr.flush()
5367 -
5368 - if "--quiet" not in self.myopts:
5369 - show_blocker_docs_link()
5370 -
5371 - def display(self, mylist, favorites=[], verbosity=None):
5372 -
5373 - # This is used to prevent display_problems() from
5374 - # redundantly displaying this exact same merge list
5375 - # again via _show_merge_list().
5376 - self._displayed_list = mylist
5377 -
5378 - if verbosity is None:
5379 - verbosity = ("--quiet" in self.myopts and 1 or \
5380 - "--verbose" in self.myopts and 3 or 2)
5381 - favorites_set = InternalPackageSet(favorites)
5382 - oneshot = "--oneshot" in self.myopts or \
5383 - "--onlydeps" in self.myopts
5384 - columns = "--columns" in self.myopts
5385 - changelogs=[]
5386 - p=[]
5387 - blockers = []
5388 -
5389 - counters = PackageCounters()
5390 -
5391 - if verbosity == 1 and "--verbose" not in self.myopts:
5392 - def create_use_string(*args):
5393 - return ""
5394 - else:
5395 - def create_use_string(name, cur_iuse, iuse_forced, cur_use,
5396 - old_iuse, old_use,
5397 - is_new, reinst_flags,
5398 - all_flags=(verbosity == 3 or "--quiet" in self.myopts),
5399 - alphabetical=("--alphabetical" in self.myopts)):
5400 - enabled = []
5401 - if alphabetical:
5402 - disabled = enabled
5403 - removed = enabled
5404 - else:
5405 - disabled = []
5406 - removed = []
5407 - cur_iuse = set(cur_iuse)
5408 - enabled_flags = cur_iuse.intersection(cur_use)
5409 - removed_iuse = set(old_iuse).difference(cur_iuse)
5410 - any_iuse = cur_iuse.union(old_iuse)
5411 - any_iuse = list(any_iuse)
5412 - any_iuse.sort()
5413 - for flag in any_iuse:
5414 - flag_str = None
5415 - isEnabled = False
5416 - reinst_flag = reinst_flags and flag in reinst_flags
5417 - if flag in enabled_flags:
5418 - isEnabled = True
5419 - if is_new or flag in old_use and \
5420 - (all_flags or reinst_flag):
5421 - flag_str = red(flag)
5422 - elif flag not in old_iuse:
5423 - flag_str = yellow(flag) + "%*"
5424 - elif flag not in old_use:
5425 - flag_str = green(flag) + "*"
5426 - elif flag in removed_iuse:
5427 - if all_flags or reinst_flag:
5428 - flag_str = yellow("-" + flag) + "%"
5429 - if flag in old_use:
5430 - flag_str += "*"
5431 - flag_str = "(" + flag_str + ")"
5432 - removed.append(flag_str)
5433 - continue
5434 - else:
5435 - if is_new or flag in old_iuse and \
5436 - flag not in old_use and \
5437 - (all_flags or reinst_flag):
5438 - flag_str = blue("-" + flag)
5439 - elif flag not in old_iuse:
5440 - flag_str = yellow("-" + flag)
5441 - if flag not in iuse_forced:
5442 - flag_str += "%"
5443 - elif flag in old_use:
5444 - flag_str = green("-" + flag) + "*"
5445 - if flag_str:
5446 - if flag in iuse_forced:
5447 - flag_str = "(" + flag_str + ")"
5448 - if isEnabled:
5449 - enabled.append(flag_str)
5450 - else:
5451 - disabled.append(flag_str)
5452 -
5453 - if alphabetical:
5454 - ret = " ".join(enabled)
5455 - else:
5456 - ret = " ".join(enabled + disabled + removed)
5457 - if ret:
5458 - ret = '%s="%s" ' % (name, ret)
5459 - return ret
5460 -
5461 - repo_display = RepoDisplay(self.roots)
5462 -
5463 - tree_nodes = []
5464 - display_list = []
5465 - mygraph = self.digraph.copy()
5466 -
5467 - # If there are any Uninstall instances, add the corresponding
5468 - # blockers to the digraph (useful for --tree display).
5469 -
5470 - executed_uninstalls = set(node for node in mylist \
5471 - if isinstance(node, Package) and node.operation == "unmerge")
5472 -
5473 - for uninstall in self._blocker_uninstalls.leaf_nodes():
5474 - uninstall_parents = \
5475 - self._blocker_uninstalls.parent_nodes(uninstall)
5476 - if not uninstall_parents:
5477 - continue
5478 -
5479 - # Remove the corresponding "nomerge" node and substitute
5480 - # the Uninstall node.
5481 - inst_pkg = self._pkg_cache[
5482 - ("installed", uninstall.root, uninstall.cpv, "nomerge")]
5483 - try:
5484 - mygraph.remove(inst_pkg)
5485 - except KeyError:
5486 - pass
5487 -
5488 - try:
5489 - inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
5490 - except KeyError:
5491 - inst_pkg_blockers = []
5492 -
5493 - # Break the Package -> Uninstall edges.
5494 - mygraph.remove(uninstall)
5495 -
5496 - # Resolution of a package's blockers
5497 - # depend on it's own uninstallation.
5498 - for blocker in inst_pkg_blockers:
5499 - mygraph.add(uninstall, blocker)
5500 -
5501 - # Expand Package -> Uninstall edges into
5502 - # Package -> Blocker -> Uninstall edges.
5503 - for blocker in uninstall_parents:
5504 - mygraph.add(uninstall, blocker)
5505 - for parent in self._blocker_parents.parent_nodes(blocker):
5506 - if parent != inst_pkg:
5507 - mygraph.add(blocker, parent)
5508 -
5509 - # If the uninstall task did not need to be executed because
5510 - # of an upgrade, display Blocker -> Upgrade edges since the
5511 - # corresponding Blocker -> Uninstall edges will not be shown.
5512 - upgrade_node = \
5513 - self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
5514 - if upgrade_node is not None and \
5515 - uninstall not in executed_uninstalls:
5516 - for blocker in uninstall_parents:
5517 - mygraph.add(upgrade_node, blocker)
5518 -
5519 - unsatisfied_blockers = []
5520 - i = 0
5521 - depth = 0
5522 - shown_edges = set()
5523 - for x in mylist:
5524 - if isinstance(x, Blocker) and not x.satisfied:
5525 - unsatisfied_blockers.append(x)
5526 - continue
5527 - graph_key = x
5528 - if "--tree" in self.myopts:
5529 - depth = len(tree_nodes)
5530 - while depth and graph_key not in \
5531 - mygraph.child_nodes(tree_nodes[depth-1]):
5532 - depth -= 1
5533 - if depth:
5534 - tree_nodes = tree_nodes[:depth]
5535 - tree_nodes.append(graph_key)
5536 - display_list.append((x, depth, True))
5537 - shown_edges.add((graph_key, tree_nodes[depth-1]))
5538 - else:
5539 - traversed_nodes = set() # prevent endless circles
5540 - traversed_nodes.add(graph_key)
5541 - def add_parents(current_node, ordered):
5542 - parent_nodes = None
5543 - # Do not traverse to parents if this node is an
5544 - # an argument or a direct member of a set that has
5545 - # been specified as an argument (system or world).
5546 - if current_node not in self._set_nodes:
5547 - parent_nodes = mygraph.parent_nodes(current_node)
5548 - if parent_nodes:
5549 - child_nodes = set(mygraph.child_nodes(current_node))
5550 - selected_parent = None
5551 - # First, try to avoid a direct cycle.
5552 - for node in parent_nodes:
5553 - if not isinstance(node, (Blocker, Package)):
5554 - continue
5555 - if node not in traversed_nodes and \
5556 - node not in child_nodes:
5557 - edge = (current_node, node)
5558 - if edge in shown_edges:
5559 - continue
5560 - selected_parent = node
5561 - break
5562 - if not selected_parent:
5563 - # A direct cycle is unavoidable.
5564 - for node in parent_nodes:
5565 - if not isinstance(node, (Blocker, Package)):
5566 - continue
5567 - if node not in traversed_nodes:
5568 - edge = (current_node, node)
5569 - if edge in shown_edges:
5570 - continue
5571 - selected_parent = node
5572 - break
5573 - if selected_parent:
5574 - shown_edges.add((current_node, selected_parent))
5575 - traversed_nodes.add(selected_parent)
5576 - add_parents(selected_parent, False)
5577 - display_list.append((current_node,
5578 - len(tree_nodes), ordered))
5579 - tree_nodes.append(current_node)
5580 - tree_nodes = []
5581 - add_parents(graph_key, True)
5582 - else:
5583 - display_list.append((x, depth, True))
5584 - mylist = display_list
5585 - for x in unsatisfied_blockers:
5586 - mylist.append((x, 0, True))
5587 -
5588 - last_merge_depth = 0
5589 - for i in xrange(len(mylist)-1,-1,-1):
5590 - graph_key, depth, ordered = mylist[i]
5591 - if not ordered and depth == 0 and i > 0 \
5592 - and graph_key == mylist[i-1][0] and \
5593 - mylist[i-1][1] == 0:
5594 - # An ordered node got a consecutive duplicate when the tree was
5595 - # being filled in.
5596 - del mylist[i]
5597 - continue
5598 - if ordered and graph_key[-1] != "nomerge":
5599 - last_merge_depth = depth
5600 - continue
5601 - if depth >= last_merge_depth or \
5602 - i < len(mylist) - 1 and \
5603 - depth >= mylist[i+1][1]:
5604 - del mylist[i]
5605 -
5606 - from portage import flatten
5607 - from portage.dep import use_reduce, paren_reduce
5608 - # files to fetch list - avoids counting a same file twice
5609 - # in size display (verbose mode)
5610 - myfetchlist=[]
5611 -
5612 - # Use this set to detect when all the "repoadd" strings are "[0]"
5613 - # and disable the entire repo display in this case.
5614 - repoadd_set = set()
5615 -
5616 - for mylist_index in xrange(len(mylist)):
5617 - x, depth, ordered = mylist[mylist_index]
5618 - pkg_type = x[0]
5619 - myroot = x[1]
5620 - pkg_key = x[2]
5621 - portdb = self.trees[myroot]["porttree"].dbapi
5622 - bindb = self.trees[myroot]["bintree"].dbapi
5623 - vardb = self.trees[myroot]["vartree"].dbapi
5624 - vartree = self.trees[myroot]["vartree"]
5625 - pkgsettings = self.pkgsettings[myroot]
5626 -
5627 - fetch=" "
5628 - indent = " " * depth
5629 -
5630 - if isinstance(x, Blocker):
5631 - if x.satisfied:
5632 - blocker_style = "PKG_BLOCKER_SATISFIED"
5633 - addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
5634 - else:
5635 - blocker_style = "PKG_BLOCKER"
5636 - addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
5637 - if ordered:
5638 - counters.blocks += 1
5639 - if x.satisfied:
5640 - counters.blocks_satisfied += 1
5641 - resolved = portage.key_expand(
5642 - str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
5643 - if "--columns" in self.myopts and "--quiet" in self.myopts:
5644 - addl += " " + colorize(blocker_style, resolved)
5645 - else:
5646 - addl = "[%s %s] %s%s" % \
5647 - (colorize(blocker_style, "blocks"),
5648 - addl, indent, colorize(blocker_style, resolved))
5649 - block_parents = self._blocker_parents.parent_nodes(x)
5650 - block_parents = set([pnode[2] for pnode in block_parents])
5651 - block_parents = ", ".join(block_parents)
5652 - if resolved!=x[2]:
5653 - addl += colorize(blocker_style,
5654 - " (\"%s\" is blocking %s)") % \
5655 - (str(x.atom).lstrip("!"), block_parents)
5656 - else:
5657 - addl += colorize(blocker_style,
5658 - " (is blocking %s)") % block_parents
5659 - if isinstance(x, Blocker) and x.satisfied:
5660 - if columns:
5661 - continue
5662 - p.append(addl)
5663 - else:
5664 - blockers.append(addl)
5665 - else:
5666 - pkg_status = x[3]
5667 - pkg_merge = ordered and pkg_status == "merge"
5668 - if not pkg_merge and pkg_status == "merge":
5669 - pkg_status = "nomerge"
5670 - built = pkg_type != "ebuild"
5671 - installed = pkg_type == "installed"
5672 - pkg = x
5673 - metadata = pkg.metadata
5674 - ebuild_path = None
5675 - if pkg_type == "binary":
5676 - repo_name = self.roots[myroot].settings.get("PORTAGE_BINHOST")
5677 - else:
5678 - repo_name = metadata["repository"]
5679 - if pkg_type == "ebuild":
5680 - ebuild_path = portdb.findname(pkg_key)
5681 - if not ebuild_path: # shouldn't happen
5682 - raise portage.exception.PackageNotFound(pkg_key)
5683 - repo_path_real = os.path.dirname(os.path.dirname(
5684 - os.path.dirname(ebuild_path)))
5685 - elif pkg_type == "binary":
5686 - repo_path_real = repo_name
5687 - else:
5688 - repo_path_real = portdb.getRepositoryPath(repo_name)
5689 - pkg_use = list(pkg.use.enabled)
5690 - try:
5691 - restrict = flatten(use_reduce(paren_reduce(
5692 - pkg.metadata["RESTRICT"]), uselist=pkg_use))
5693 - except portage.exception.InvalidDependString, e:
5694 - if not pkg.installed:
5695 - show_invalid_depstring_notice(x,
5696 - pkg.metadata["RESTRICT"], str(e))
5697 - del e
5698 - return 1
5699 - restrict = []
5700 - if "ebuild" == pkg_type and x[3] != "nomerge" and \
5701 - "fetch" in restrict:
5702 - fetch = red("F")
5703 - if ordered:
5704 - counters.restrict_fetch += 1
5705 - if portdb.fetch_check(pkg_key, pkg_use):
5706 - fetch = green("f")
5707 - if ordered:
5708 - counters.restrict_fetch_satisfied += 1
5709 -
5710 - #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
5711 - #param is used for -u, where you still *do* want to see when something is being upgraded.
5712 - myoldbest = []
5713 - myinslotlist = None
5714 - installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
5715 - if vardb.cpv_exists(pkg_key):
5716 - addl=" "+yellow("R")+fetch+" "
5717 - if ordered:
5718 - if pkg_merge:
5719 - counters.reinst += 1
5720 - elif pkg_status == "uninstall":
5721 - counters.uninst += 1
5722 - # filter out old-style virtual matches
5723 - elif installed_versions and \
5724 - portage.cpv_getkey(installed_versions[0]) == \
5725 - portage.cpv_getkey(pkg_key):
5726 - myinslotlist = vardb.match(pkg.slot_atom)
5727 - # If this is the first install of a new-style virtual, we
5728 - # need to filter out old-style virtual matches.
5729 - if myinslotlist and \
5730 - portage.cpv_getkey(myinslotlist[0]) != \
5731 - portage.cpv_getkey(pkg_key):
5732 - myinslotlist = None
5733 - if myinslotlist:
5734 - myoldbest = myinslotlist[:]
5735 - addl = " " + fetch
5736 - if not portage.dep.cpvequal(pkg_key,
5737 - portage.best([pkg_key] + myoldbest)):
5738 - # Downgrade in slot
5739 - addl += turquoise("U")+blue("D")
5740 - if ordered:
5741 - counters.downgrades += 1
5742 - else:
5743 - # Update in slot
5744 - addl += turquoise("U") + " "
5745 - if ordered:
5746 - counters.upgrades += 1
5747 - else:
5748 - # New slot, mark it new.
5749 - addl = " " + green("NS") + fetch + " "
5750 - myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
5751 - if ordered:
5752 - counters.newslot += 1
5753 -
5754 - if "--changelog" in self.myopts:
5755 - inst_matches = vardb.match(pkg.slot_atom)
5756 - if inst_matches:
5757 - changelogs.extend(self.calc_changelog(
5758 - portdb.findname(pkg_key),
5759 - inst_matches[0], pkg_key))
5760 - else:
5761 - addl = " " + green("N") + " " + fetch + " "
5762 - if ordered:
5763 - counters.new += 1
5764 -
5765 - verboseadd = ""
5766 - repoadd = None
5767 -
5768 - if True:
5769 - # USE flag display
5770 - forced_flags = set()
5771 - pkgsettings.setcpv(pkg) # for package.use.{mask,force}
5772 - forced_flags.update(pkgsettings.useforce)
5773 - forced_flags.update(pkgsettings.usemask)
5774 -
5775 - cur_use = [flag for flag in pkg.use.enabled \
5776 - if flag in pkg.iuse.all]
5777 - cur_iuse = sorted(pkg.iuse.all)
5778 -
5779 - if myoldbest and myinslotlist:
5780 - previous_cpv = myoldbest[0]
5781 - else:
5782 - previous_cpv = pkg.cpv
5783 - if vardb.cpv_exists(previous_cpv):
5784 - old_iuse, old_use = vardb.aux_get(
5785 - previous_cpv, ["IUSE", "USE"])
5786 - old_iuse = list(set(
5787 - filter_iuse_defaults(old_iuse.split())))
5788 - old_iuse.sort()
5789 - old_use = old_use.split()
5790 - is_new = False
5791 - else:
5792 - old_iuse = []
5793 - old_use = []
5794 - is_new = True
5795 -
5796 - old_use = [flag for flag in old_use if flag in old_iuse]
5797 -
5798 - use_expand = pkgsettings["USE_EXPAND"].lower().split()
5799 - use_expand.sort()
5800 - use_expand.reverse()
5801 - use_expand_hidden = \
5802 - pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
5803 -
5804 - def map_to_use_expand(myvals, forcedFlags=False,
5805 - removeHidden=True):
5806 - ret = {}
5807 - forced = {}
5808 - for exp in use_expand:
5809 - ret[exp] = []
5810 - forced[exp] = set()
5811 - for val in myvals[:]:
5812 - if val.startswith(exp.lower()+"_"):
5813 - if val in forced_flags:
5814 - forced[exp].add(val[len(exp)+1:])
5815 - ret[exp].append(val[len(exp)+1:])
5816 - myvals.remove(val)
5817 - ret["USE"] = myvals
5818 - forced["USE"] = [val for val in myvals \
5819 - if val in forced_flags]
5820 - if removeHidden:
5821 - for exp in use_expand_hidden:
5822 - ret.pop(exp, None)
5823 - if forcedFlags:
5824 - return ret, forced
5825 - return ret
5826 -
5827 - # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
5828 - # are the only thing that triggered reinstallation.
5829 - reinst_flags_map = {}
5830 - reinstall_for_flags = self._reinstall_nodes.get(pkg)
5831 - reinst_expand_map = None
5832 - if reinstall_for_flags:
5833 - reinst_flags_map = map_to_use_expand(
5834 - list(reinstall_for_flags), removeHidden=False)
5835 - for k in list(reinst_flags_map):
5836 - if not reinst_flags_map[k]:
5837 - del reinst_flags_map[k]
5838 - if not reinst_flags_map.get("USE"):
5839 - reinst_expand_map = reinst_flags_map.copy()
5840 - reinst_expand_map.pop("USE", None)
5841 - if reinst_expand_map and \
5842 - not set(reinst_expand_map).difference(
5843 - use_expand_hidden):
5844 - use_expand_hidden = \
5845 - set(use_expand_hidden).difference(
5846 - reinst_expand_map)
5847 -
5848 - cur_iuse_map, iuse_forced = \
5849 - map_to_use_expand(cur_iuse, forcedFlags=True)
5850 - cur_use_map = map_to_use_expand(cur_use)
5851 - old_iuse_map = map_to_use_expand(old_iuse)
5852 - old_use_map = map_to_use_expand(old_use)
5853 -
5854 - use_expand.sort()
5855 - use_expand.insert(0, "USE")
5856 -
5857 - for key in use_expand:
5858 - if key in use_expand_hidden:
5859 - continue
5860 - verboseadd += create_use_string(key.upper(),
5861 - cur_iuse_map[key], iuse_forced[key],
5862 - cur_use_map[key], old_iuse_map[key],
5863 - old_use_map[key], is_new,
5864 - reinst_flags_map.get(key))
5865 -
5866 - if verbosity == 3:
5867 - # size verbose
5868 - mysize=0
5869 - if pkg_type == "ebuild" and pkg_merge:
5870 - try:
5871 - myfilesdict = portdb.getfetchsizes(pkg_key,
5872 - useflags=pkg_use, debug=self.edebug)
5873 - except portage.exception.InvalidDependString, e:
5874 - src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
5875 - show_invalid_depstring_notice(x, src_uri, str(e))
5876 - del e
5877 - return 1
5878 - if myfilesdict is None:
5879 - myfilesdict="[empty/missing/bad digest]"
5880 - else:
5881 - for myfetchfile in myfilesdict:
5882 - if myfetchfile not in myfetchlist:
5883 - mysize+=myfilesdict[myfetchfile]
5884 - myfetchlist.append(myfetchfile)
5885 - if ordered:
5886 - counters.totalsize += mysize
5887 - verboseadd += format_size(mysize)
5888 -
5889 - # overlay verbose
5890 - # assign index for a previous version in the same slot
5891 - has_previous = False
5892 - repo_name_prev = None
5893 - slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
5894 - metadata["SLOT"])
5895 - slot_matches = vardb.match(slot_atom)
5896 - if slot_matches:
5897 - has_previous = True
5898 - repo_name_prev = vardb.aux_get(slot_matches[0],
5899 - ["repository"])[0]
5900 -
5901 - # now use the data to generate output
5902 - if pkg.installed or not has_previous:
5903 - repoadd = repo_display.repoStr(repo_path_real)
5904 - else:
5905 - repo_path_prev = None
5906 - if repo_name_prev:
5907 - repo_path_prev = portdb.getRepositoryPath(
5908 - repo_name_prev)
5909 - if repo_path_prev == repo_path_real:
5910 - repoadd = repo_display.repoStr(repo_path_real)
5911 - else:
5912 - repoadd = "%s=>%s" % (
5913 - repo_display.repoStr(repo_path_prev),
5914 - repo_display.repoStr(repo_path_real))
5915 - if repoadd:
5916 - repoadd_set.add(repoadd)
5917 -
5918 - xs = [portage.cpv_getkey(pkg_key)] + \
5919 - list(portage.catpkgsplit(pkg_key)[2:])
5920 - if xs[2] == "r0":
5921 - xs[2] = ""
5922 - else:
5923 - xs[2] = "-" + xs[2]
5924 -
5925 - mywidth = 130
5926 - if "COLUMNWIDTH" in self.settings:
5927 - try:
5928 - mywidth = int(self.settings["COLUMNWIDTH"])
5929 - except ValueError, e:
5930 - portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
5931 - portage.writemsg(
5932 - "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
5933 - self.settings["COLUMNWIDTH"], noiselevel=-1)
5934 - del e
5935 - oldlp = mywidth - 30
5936 - newlp = oldlp - 30
5937 -
5938 - # Convert myoldbest from a list to a string.
5939 - if not myoldbest:
5940 - myoldbest = ""
5941 - else:
5942 - for pos, key in enumerate(myoldbest):
5943 - key = portage.catpkgsplit(key)[2] + \
5944 - "-" + portage.catpkgsplit(key)[3]
5945 - if key[-3:] == "-r0":
5946 - key = key[:-3]
5947 - myoldbest[pos] = key
5948 - myoldbest = blue("["+", ".join(myoldbest)+"]")
5949 -
5950 - pkg_cp = xs[0]
5951 - root_config = self.roots[myroot]
5952 - system_set = root_config.sets["system"]
5953 - world_set = root_config.sets["world"]
5954 -
5955 - pkg_system = False
5956 - pkg_world = False
5957 - try:
5958 - pkg_system = system_set.findAtomForPackage(pkg)
5959 - pkg_world = world_set.findAtomForPackage(pkg)
5960 - if not (oneshot or pkg_world) and \
5961 - myroot == self.target_root and \
5962 - favorites_set.findAtomForPackage(pkg):
5963 - # Maybe it will be added to world now.
5964 - if create_world_atom(pkg, favorites_set, root_config):
5965 - pkg_world = True
5966 - except portage.exception.InvalidDependString:
5967 - # This is reported elsewhere if relevant.
5968 - pass
5969 -
5970 - def pkgprint(pkg_str):
5971 - if pkg_merge:
5972 - if pkg_system:
5973 - return colorize("PKG_MERGE_SYSTEM", pkg_str)
5974 - elif pkg_world:
5975 - return colorize("PKG_MERGE_WORLD", pkg_str)
5976 - else:
5977 - return colorize("PKG_MERGE", pkg_str)
5978 - elif pkg_status == "uninstall":
5979 - return colorize("PKG_UNINSTALL", pkg_str)
5980 - else:
5981 - if pkg_system:
5982 - return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
5983 - elif pkg_world:
5984 - return colorize("PKG_NOMERGE_WORLD", pkg_str)
5985 - else:
5986 - return colorize("PKG_NOMERGE", pkg_str)
5987 -
5988 - try:
5989 - properties = flatten(use_reduce(paren_reduce(
5990 - pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
5991 - except portage.exception.InvalidDependString, e:
5992 - if not pkg.installed:
5993 - show_invalid_depstring_notice(pkg,
5994 - pkg.metadata["PROPERTIES"], str(e))
5995 - del e
5996 - return 1
5997 - properties = []
5998 - interactive = "interactive" in properties
5999 - if interactive and pkg.operation == "merge":
6000 - addl = colorize("WARN", "I") + addl[1:]
6001 - if ordered:
6002 - counters.interactive += 1
6003 -
6004 - if x[1]!="/":
6005 - if myoldbest:
6006 - myoldbest +=" "
6007 - if "--columns" in self.myopts:
6008 - if "--quiet" in self.myopts:
6009 - myprint=addl+" "+indent+pkgprint(pkg_cp)
6010 - myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
6011 - myprint=myprint+myoldbest
6012 - myprint=myprint+darkgreen("to "+x[1])
6013 - verboseadd = None
6014 - else:
6015 - if not pkg_merge:
6016 - myprint = "[%s] %s%s" % \
6017 - (pkgprint(pkg_status.ljust(13)),
6018 - indent, pkgprint(pkg.cp))
6019 - else:
6020 - myprint = "[%s %s] %s%s" % \
6021 - (pkgprint(pkg.type_name), addl,
6022 - indent, pkgprint(pkg.cp))
6023 - if (newlp-nc_len(myprint)) > 0:
6024 - myprint=myprint+(" "*(newlp-nc_len(myprint)))
6025 - myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
6026 - if (oldlp-nc_len(myprint)) > 0:
6027 - myprint=myprint+" "*(oldlp-nc_len(myprint))
6028 - myprint=myprint+myoldbest
6029 - myprint += darkgreen("to " + pkg.root)
6030 - else:
6031 - if not pkg_merge:
6032 - myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
6033 - else:
6034 - myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
6035 - myprint += indent + pkgprint(pkg_key) + " " + \
6036 - myoldbest + darkgreen("to " + myroot)
6037 - else:
6038 - if "--columns" in self.myopts:
6039 - if "--quiet" in self.myopts:
6040 - myprint=addl+" "+indent+pkgprint(pkg_cp)
6041 - myprint=myprint+" "+green(xs[1]+xs[2])+" "
6042 - myprint=myprint+myoldbest
6043 - verboseadd = None
6044 - else:
6045 - if not pkg_merge:
6046 - myprint = "[%s] %s%s" % \
6047 - (pkgprint(pkg_status.ljust(13)),
6048 - indent, pkgprint(pkg.cp))
6049 - else:
6050 - myprint = "[%s %s] %s%s" % \
6051 - (pkgprint(pkg.type_name), addl,
6052 - indent, pkgprint(pkg.cp))
6053 - if (newlp-nc_len(myprint)) > 0:
6054 - myprint=myprint+(" "*(newlp-nc_len(myprint)))
6055 - myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
6056 - if (oldlp-nc_len(myprint)) > 0:
6057 - myprint=myprint+(" "*(oldlp-nc_len(myprint)))
6058 - myprint += myoldbest
6059 - else:
6060 - if not pkg_merge:
6061 - myprint = "[%s] %s%s %s" % \
6062 - (pkgprint(pkg_status.ljust(13)),
6063 - indent, pkgprint(pkg.cpv),
6064 - myoldbest)
6065 - else:
6066 - myprint = "[%s %s] %s%s %s" % \
6067 - (pkgprint(pkg_type), addl, indent,
6068 - pkgprint(pkg.cpv), myoldbest)
6069 -
6070 - if columns and pkg.operation == "uninstall":
6071 - continue
6072 - p.append((myprint, verboseadd, repoadd))
6073 -
6074 - if "--tree" not in self.myopts and \
6075 - "--quiet" not in self.myopts and \
6076 - not self._opts_no_restart.intersection(self.myopts) and \
6077 - pkg.root == self._running_root.root and \
6078 - portage.match_from_list(
6079 - portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
6080 - not vardb.cpv_exists(pkg.cpv) and \
6081 - "--quiet" not in self.myopts:
6082 - if mylist_index < len(mylist) - 1:
6083 - p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
6084 - p.append(colorize("WARN", " then resume the merge."))
6085 -
6086 - out = sys.stdout
6087 - show_repos = repoadd_set and repoadd_set != set(["0"])
6088 -
6089 - for x in p:
6090 - if isinstance(x, basestring):
6091 - out.write("%s\n" % (x,))
6092 - continue
6093 -
6094 - myprint, verboseadd, repoadd = x
6095 -
6096 - if verboseadd:
6097 - myprint += " " + verboseadd
6098 -
6099 - if show_repos and repoadd:
6100 - myprint += " " + teal("[%s]" % repoadd)
6101 -
6102 - out.write("%s\n" % (myprint,))
6103 -
6104 - for x in blockers:
6105 - print x
6106 -
6107 - if verbosity == 3:
6108 - print
6109 - print counters
6110 - if show_repos:
6111 - sys.stdout.write(str(repo_display))
6112 -
6113 - if "--changelog" in self.myopts:
6114 - print
6115 - for revision,text in changelogs:
6116 - print bold('*'+revision)
6117 - sys.stdout.write(text)
6118 -
6119 - sys.stdout.flush()
6120 - return os.EX_OK
6121 -
6122 - def display_problems(self):
6123 - """
6124 - Display problems with the dependency graph such as slot collisions.
6125 - This is called internally by display() to show the problems _after_
6126 - the merge list where it is most likely to be seen, but if display()
6127 - is not going to be called then this method should be called explicitly
6128 - to ensure that the user is notified of problems with the graph.
6129 -
6130 - All output goes to stderr, except for unsatisfied dependencies which
6131 - go to stdout for parsing by programs such as autounmask.
6132 - """
6133 -
6134 - # Note that show_masked_packages() sends it's output to
6135 - # stdout, and some programs such as autounmask parse the
6136 - # output in cases when emerge bails out. However, when
6137 - # show_masked_packages() is called for installed packages
6138 - # here, the message is a warning that is more appropriate
6139 - # to send to stderr, so temporarily redirect stdout to
6140 - # stderr. TODO: Fix output code so there's a cleaner way
6141 - # to redirect everything to stderr.
6142 - sys.stdout.flush()
6143 - sys.stderr.flush()
6144 - stdout = sys.stdout
6145 - try:
6146 - sys.stdout = sys.stderr
6147 - self._display_problems()
6148 - finally:
6149 - sys.stdout = stdout
6150 - sys.stdout.flush()
6151 - sys.stderr.flush()
6152 -
6153 - # This goes to stdout for parsing by programs like autounmask.
6154 - for pargs, kwargs in self._unsatisfied_deps_for_display:
6155 - self._show_unsatisfied_dep(*pargs, **kwargs)
6156 -
6157 - def _display_problems(self):
6158 - if self._circular_deps_for_display is not None:
6159 - self._show_circular_deps(
6160 - self._circular_deps_for_display)
6161 -
6162 - # The user is only notified of a slot conflict if
6163 - # there are no unresolvable blocker conflicts.
6164 - if self._unsatisfied_blockers_for_display is not None:
6165 - self._show_unsatisfied_blockers(
6166 - self._unsatisfied_blockers_for_display)
6167 - else:
6168 - self._show_slot_collision_notice()
6169 -
6170 - # TODO: Add generic support for "set problem" handlers so that
6171 - # the below warnings aren't special cases for world only.
6172 -
6173 - if self._missing_args:
6174 - world_problems = False
6175 - if "world" in self._sets:
6176 - # Filter out indirect members of world (from nested sets)
6177 - # since only direct members of world are desired here.
6178 - world_set = self.roots[self.target_root].sets["world"]
6179 - for arg, atom in self._missing_args:
6180 - if arg.name == "world" and atom in world_set:
6181 - world_problems = True
6182 - break
6183 -
6184 - if world_problems:
6185 - sys.stderr.write("\n!!! Problems have been " + \
6186 - "detected with your world file\n")
6187 - sys.stderr.write("!!! Please run " + \
6188 - green("emaint --check world")+"\n\n")
6189 -
6190 - if self._missing_args:
6191 - sys.stderr.write("\n" + colorize("BAD", "!!!") + \
6192 - " Ebuilds for the following packages are either all\n")
6193 - sys.stderr.write(colorize("BAD", "!!!") + \
6194 - " masked or don't exist:\n")
6195 - sys.stderr.write(" ".join(str(atom) for arg, atom in \
6196 - self._missing_args) + "\n")
6197 -
6198 - if self._pprovided_args:
6199 - arg_refs = {}
6200 - for arg, atom in self._pprovided_args:
6201 - if isinstance(arg, SetArg):
6202 - parent = arg.name
6203 - arg_atom = (atom, atom)
6204 - else:
6205 - parent = "args"
6206 - arg_atom = (arg.arg, atom)
6207 - refs = arg_refs.setdefault(arg_atom, [])
6208 - if parent not in refs:
6209 - refs.append(parent)
6210 - msg = []
6211 - msg.append(bad("\nWARNING: "))
6212 - if len(self._pprovided_args) > 1:
6213 - msg.append("Requested packages will not be " + \
6214 - "merged because they are listed in\n")
6215 - else:
6216 - msg.append("A requested package will not be " + \
6217 - "merged because it is listed in\n")
6218 - msg.append("package.provided:\n\n")
6219 - problems_sets = set()
6220 - for (arg, atom), refs in arg_refs.iteritems():
6221 - ref_string = ""
6222 - if refs:
6223 - problems_sets.update(refs)
6224 - refs.sort()
6225 - ref_string = ", ".join(["'%s'" % name for name in refs])
6226 - ref_string = " pulled in by " + ref_string
6227 - msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
6228 - msg.append("\n")
6229 - if "world" in problems_sets:
6230 - msg.append("This problem can be solved in one of the following ways:\n\n")
6231 - msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
6232 - msg.append(" B) Uninstall offending packages (cleans them from world).\n")
6233 - msg.append(" C) Remove offending entries from package.provided.\n\n")
6234 - msg.append("The best course of action depends on the reason that an offending\n")
6235 - msg.append("package.provided entry exists.\n\n")
6236 - sys.stderr.write("".join(msg))
6237 -
6238 - masked_packages = []
6239 - for pkg in self._masked_installed:
6240 - root_config = pkg.root_config
6241 - pkgsettings = self.pkgsettings[pkg.root]
6242 - mreasons = get_masking_status(pkg, pkgsettings, root_config)
6243 - masked_packages.append((root_config, pkgsettings,
6244 - pkg.cpv, pkg.metadata, mreasons))
6245 - if masked_packages:
6246 - sys.stderr.write("\n" + colorize("BAD", "!!!") + \
6247 - " The following installed packages are masked:\n")
6248 - show_masked_packages(masked_packages)
6249 - show_mask_docs()
6250 - print
6251 -
6252 - def calc_changelog(self,ebuildpath,current,next):
6253 - if ebuildpath == None or not os.path.exists(ebuildpath):
6254 - return []
6255 - current = '-'.join(portage.catpkgsplit(current)[1:])
6256 - if current.endswith('-r0'):
6257 - current = current[:-3]
6258 - next = '-'.join(portage.catpkgsplit(next)[1:])
6259 - if next.endswith('-r0'):
6260 - next = next[:-3]
6261 - changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
6262 - try:
6263 - changelog = open(changelogpath).read()
6264 - except SystemExit, e:
6265 - raise # Needed else can't exit
6266 - except:
6267 - return []
6268 - divisions = self.find_changelog_tags(changelog)
6269 - #print 'XX from',current,'to',next
6270 - #for div,text in divisions: print 'XX',div
6271 - # skip entries for all revisions above the one we are about to emerge
6272 - for i in range(len(divisions)):
6273 - if divisions[i][0]==next:
6274 - divisions = divisions[i:]
6275 - break
6276 - # find out how many entries we are going to display
6277 - for i in range(len(divisions)):
6278 - if divisions[i][0]==current:
6279 - divisions = divisions[:i]
6280 - break
6281 - else:
6282 - # couldnt find the current revision in the list. display nothing
6283 - return []
6284 - return divisions
6285 -
6286 - def find_changelog_tags(self,changelog):
6287 - divs = []
6288 - release = None
6289 - while 1:
6290 - match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
6291 - if match is None:
6292 - if release is not None:
6293 - divs.append((release,changelog))
6294 - return divs
6295 - if release is not None:
6296 - divs.append((release,changelog[:match.start()]))
6297 - changelog = changelog[match.end():]
6298 - release = match.group(1)
6299 - if release.endswith('.ebuild'):
6300 - release = release[:-7]
6301 - if release.endswith('-r0'):
6302 - release = release[:-3]
6303 -
6304 - def saveNomergeFavorites(self):
6305 - """Find atoms in favorites that are not in the mergelist and add them
6306 - to the world file if necessary."""
6307 - for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
6308 - "--oneshot", "--onlydeps", "--pretend"):
6309 - if x in self.myopts:
6310 - return
6311 - root_config = self.roots[self.target_root]
6312 - world_set = root_config.sets["world"]
6313 -
6314 - world_locked = False
6315 - if hasattr(world_set, "lock"):
6316 - world_set.lock()
6317 - world_locked = True
6318 -
6319 - if hasattr(world_set, "load"):
6320 - world_set.load() # maybe it's changed on disk
6321 -
6322 - args_set = self._sets["args"]
6323 - portdb = self.trees[self.target_root]["porttree"].dbapi
6324 - added_favorites = set()
6325 - for x in self._set_nodes:
6326 - pkg_type, root, pkg_key, pkg_status = x
6327 - if pkg_status != "nomerge":
6328 - continue
6329 -
6330 - try:
6331 - myfavkey = create_world_atom(x, args_set, root_config)
6332 - if myfavkey:
6333 - if myfavkey in added_favorites:
6334 - continue
6335 - added_favorites.add(myfavkey)
6336 - except portage.exception.InvalidDependString, e:
6337 - writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
6338 - (pkg_key, str(e)), noiselevel=-1)
6339 - writemsg("!!! see '%s'\n\n" % os.path.join(
6340 - root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
6341 - del e
6342 - all_added = []
6343 - for k in self._sets:
6344 - if k in ("args", "world") or not root_config.sets[k].world_candidate:
6345 - continue
6346 - s = SETPREFIX + k
6347 - if s in world_set:
6348 - continue
6349 - all_added.append(SETPREFIX + k)
6350 - all_added.extend(added_favorites)
6351 - all_added.sort()
6352 - for a in all_added:
6353 - print ">>> Recording %s in \"world\" favorites file..." % \
6354 - colorize("INFORM", str(a))
6355 - if all_added:
6356 - world_set.update(all_added)
6357 -
6358 - if world_locked:
6359 - world_set.unlock()
6360 -
6361 - def loadResumeCommand(self, resume_data, skip_masked=True,
6362 - skip_missing=True):
6363 - """
6364 - Add a resume command to the graph and validate it in the process. This
6365 - will raise a PackageNotFound exception if a package is not available.
6366 - """
6367 -
6368 - if not isinstance(resume_data, dict):
6369 - return False
6370 -
6371 - mergelist = resume_data.get("mergelist")
6372 - if not isinstance(mergelist, list):
6373 - mergelist = []
6374 -
6375 - fakedb = self.mydbapi
6376 - trees = self.trees
6377 - serialized_tasks = []
6378 - masked_tasks = []
6379 - for x in mergelist:
6380 - if not (isinstance(x, list) and len(x) == 4):
6381 - continue
6382 - pkg_type, myroot, pkg_key, action = x
6383 - if pkg_type not in self.pkg_tree_map:
6384 - continue
6385 - if action != "merge":
6386 - continue
6387 - tree_type = self.pkg_tree_map[pkg_type]
6388 - mydb = trees[myroot][tree_type].dbapi
6389 - db_keys = list(self._trees_orig[myroot][
6390 - tree_type].dbapi._aux_cache_keys)
6391 - try:
6392 - metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
6393 - except KeyError:
6394 - # It does no exist or it is corrupt.
6395 - if action == "uninstall":
6396 - continue
6397 - if skip_missing:
6398 - # TODO: log these somewhere
6399 - continue
6400 - raise portage.exception.PackageNotFound(pkg_key)
6401 - installed = action == "uninstall"
6402 - built = pkg_type != "ebuild"
6403 - root_config = self.roots[myroot]
6404 - pkg = Package(built=built, cpv=pkg_key,
6405 - installed=installed, metadata=metadata,
6406 - operation=action, root_config=root_config,
6407 - type_name=pkg_type)
6408 - if pkg_type == "ebuild":
6409 - pkgsettings = self.pkgsettings[myroot]
6410 - pkgsettings.setcpv(pkg)
6411 - pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6412 - pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
6413 - self._pkg_cache[pkg] = pkg
6414 -
6415 - root_config = self.roots[pkg.root]
6416 - if "merge" == pkg.operation and \
6417 - not visible(root_config.settings, pkg):
6418 - if skip_masked:
6419 - masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
6420 - else:
6421 - self._unsatisfied_deps_for_display.append(
6422 - ((pkg.root, "="+pkg.cpv), {"myparent":None}))
6423 -
6424 - fakedb[myroot].cpv_inject(pkg)
6425 - serialized_tasks.append(pkg)
6426 - self.spinner.update()
6427 -
6428 - if self._unsatisfied_deps_for_display:
6429 - return False
6430 -
6431 - if not serialized_tasks or "--nodeps" in self.myopts:
6432 - self._serialized_tasks_cache = serialized_tasks
6433 - self._scheduler_graph = self.digraph
6434 - else:
6435 - self._select_package = self._select_pkg_from_graph
6436 - self.myparams.add("selective")
6437 - # Always traverse deep dependencies in order to account for
6438 - # potentially unsatisfied dependencies of installed packages.
6439 - # This is necessary for correct --keep-going or --resume operation
6440 - # in case a package from a group of circularly dependent packages
6441 - # fails. In this case, a package which has recently been installed
6442 - # may have an unsatisfied circular dependency (pulled in by
6443 - # PDEPEND, for example). So, even though a package is already
6444 - # installed, it may not have all of it's dependencies satisfied, so
6445 - # it may not be usable. If such a package is in the subgraph of
6446 - # deep depenedencies of a scheduled build, that build needs to
6447 - # be cancelled. In order for this type of situation to be
6448 - # recognized, deep traversal of dependencies is required.
6449 - self.myparams.add("deep")
6450 -
6451 - favorites = resume_data.get("favorites")
6452 - args_set = self._sets["args"]
6453 - if isinstance(favorites, list):
6454 - args = self._load_favorites(favorites)
6455 - else:
6456 - args = []
6457 -
6458 - for task in serialized_tasks:
6459 - if isinstance(task, Package) and \
6460 - task.operation == "merge":
6461 - if not self._add_pkg(task, None):
6462 - return False
6463 -
6464 - # Packages for argument atoms need to be explicitly
6465 - # added via _add_pkg() so that they are included in the
6466 - # digraph (needed at least for --tree display).
6467 - for arg in args:
6468 - for atom in arg.set:
6469 - pkg, existing_node = self._select_package(
6470 - arg.root_config.root, atom)
6471 - if existing_node is None and \
6472 - pkg is not None:
6473 - if not self._add_pkg(pkg, Dependency(atom=atom,
6474 - root=pkg.root, parent=arg)):
6475 - return False
6476 -
6477 - # Allow unsatisfied deps here to avoid showing a masking
6478 - # message for an unsatisfied dep that isn't necessarily
6479 - # masked.
6480 - if not self._create_graph(allow_unsatisfied=True):
6481 - return False
6482 -
6483 - unsatisfied_deps = []
6484 - for dep in self._unsatisfied_deps:
6485 - if not isinstance(dep.parent, Package):
6486 - continue
6487 - if dep.parent.operation == "merge":
6488 - unsatisfied_deps.append(dep)
6489 - continue
6490 -
6491 - # For unsatisfied deps of installed packages, only account for
6492 - # them if they are in the subgraph of dependencies of a package
6493 - # which is scheduled to be installed.
6494 - unsatisfied_install = False
6495 - traversed = set()
6496 - dep_stack = self.digraph.parent_nodes(dep.parent)
6497 - while dep_stack:
6498 - node = dep_stack.pop()
6499 - if not isinstance(node, Package):
6500 - continue
6501 - if node.operation == "merge":
6502 - unsatisfied_install = True
6503 - break
6504 - if node in traversed:
6505 - continue
6506 - traversed.add(node)
6507 - dep_stack.extend(self.digraph.parent_nodes(node))
6508 -
6509 - if unsatisfied_install:
6510 - unsatisfied_deps.append(dep)
6511 -
6512 - if masked_tasks or unsatisfied_deps:
6513 - # This probably means that a required package
6514 - # was dropped via --skipfirst. It makes the
6515 - # resume list invalid, so convert it to a
6516 - # UnsatisfiedResumeDep exception.
6517 - raise self.UnsatisfiedResumeDep(self,
6518 - masked_tasks + unsatisfied_deps)
6519 - self._serialized_tasks_cache = None
6520 - try:
6521 - self.altlist()
6522 - except self._unknown_internal_error:
6523 - return False
6524 -
6525 - return True
6526 -
6527 - def _load_favorites(self, favorites):
6528 - """
6529 - Use a list of favorites to resume state from a
6530 - previous select_files() call. This creates similar
6531 - DependencyArg instances to those that would have
6532 - been created by the original select_files() call.
6533 - This allows Package instances to be matched with
6534 - DependencyArg instances during graph creation.
6535 - """
6536 - root_config = self.roots[self.target_root]
6537 - getSetAtoms = root_config.setconfig.getSetAtoms
6538 - sets = root_config.sets
6539 - args = []
6540 - for x in favorites:
6541 - if not isinstance(x, basestring):
6542 - continue
6543 - if x in ("system", "world"):
6544 - x = SETPREFIX + x
6545 - if x.startswith(SETPREFIX):
6546 - s = x[len(SETPREFIX):]
6547 - if s not in sets:
6548 - continue
6549 - if s in self._sets:
6550 - continue
6551 - # Recursively expand sets so that containment tests in
6552 - # self._get_parent_sets() properly match atoms in nested
6553 - # sets (like if world contains system).
6554 - expanded_set = InternalPackageSet(
6555 - initial_atoms=getSetAtoms(s))
6556 - self._sets[s] = expanded_set
6557 - args.append(SetArg(arg=x, set=expanded_set,
6558 - root_config=root_config))
6559 - else:
6560 - if not portage.isvalidatom(x):
6561 - continue
6562 - args.append(AtomArg(arg=x, atom=x,
6563 - root_config=root_config))
6564 -
6565 - self._set_args(args)
6566 - return args
6567 -
6568 - class UnsatisfiedResumeDep(portage.exception.PortageException):
6569 - """
6570 - A dependency of a resume list is not installed. This
6571 - can occur when a required package is dropped from the
6572 - merge list via --skipfirst.
6573 - """
6574 - def __init__(self, depgraph, value):
6575 - portage.exception.PortageException.__init__(self, value)
6576 - self.depgraph = depgraph
6577 -
6578 - class _internal_exception(portage.exception.PortageException):
6579 - def __init__(self, value=""):
6580 - portage.exception.PortageException.__init__(self, value)
6581 -
6582 - class _unknown_internal_error(_internal_exception):
6583 - """
6584 - Used by the depgraph internally to terminate graph creation.
6585 - The specific reason for the failure should have been dumped
6586 - to stderr, unfortunately, the exact reason for the failure
6587 - may not be known.
6588 - """
6589 -
6590 - class _serialize_tasks_retry(_internal_exception):
6591 - """
6592 - This is raised by the _serialize_tasks() method when it needs to
6593 - be called again for some reason. The only case that it's currently
6594 - used for is when neglected dependencies need to be added to the
6595 - graph in order to avoid making a potentially unsafe decision.
6596 - """
6597 -
6598 - class _dep_check_composite_db(portage.dbapi):
6599 - """
6600 - A dbapi-like interface that is optimized for use in dep_check() calls.
6601 - This is built on top of the existing depgraph package selection logic.
6602 - Some packages that have been added to the graph may be masked from this
6603 - view in order to influence the atom preference selection that occurs
6604 - via dep_check().
6605 - """
6606 - def __init__(self, depgraph, root):
6607 - portage.dbapi.__init__(self)
6608 - self._depgraph = depgraph
6609 - self._root = root
6610 - self._match_cache = {}
6611 - self._cpv_pkg_map = {}
6612 -
6613 - def _clear_cache(self):
6614 - self._match_cache.clear()
6615 - self._cpv_pkg_map.clear()
6616 -
6617 - def match(self, atom):
6618 - ret = self._match_cache.get(atom)
6619 - if ret is not None:
6620 - return ret[:]
6621 - orig_atom = atom
6622 - if "/" not in atom:
6623 - atom = self._dep_expand(atom)
6624 - pkg, existing = self._depgraph._select_package(self._root, atom)
6625 - if not pkg:
6626 - ret = []
6627 - else:
6628 - # Return the highest available from select_package() as well as
6629 - # any matching slots in the graph db.
6630 - slots = set()
6631 - slots.add(pkg.metadata["SLOT"])
6632 - atom_cp = portage.dep_getkey(atom)
6633 - if pkg.cp.startswith("virtual/"):
6634 - # For new-style virtual lookahead that occurs inside
6635 - # dep_check(), examine all slots. This is needed
6636 - # so that newer slots will not unnecessarily be pulled in
6637 - # when a satisfying lower slot is already installed. For
6638 - # example, if virtual/jdk-1.4 is satisfied via kaffe then
6639 - # there's no need to pull in a newer slot to satisfy a
6640 - # virtual/jdk dependency.
6641 - for db, pkg_type, built, installed, db_keys in \
6642 - self._depgraph._filtered_trees[self._root]["dbs"]:
6643 - for cpv in db.match(atom):
6644 - if portage.cpv_getkey(cpv) != pkg.cp:
6645 - continue
6646 - slots.add(db.aux_get(cpv, ["SLOT"])[0])
6647 - ret = []
6648 - if self._visible(pkg):
6649 - self._cpv_pkg_map[pkg.cpv] = pkg
6650 - ret.append(pkg.cpv)
6651 - slots.remove(pkg.metadata["SLOT"])
6652 - while slots:
6653 - slot_atom = "%s:%s" % (atom_cp, slots.pop())
6654 - pkg, existing = self._depgraph._select_package(
6655 - self._root, slot_atom)
6656 - if not pkg:
6657 - continue
6658 - if not self._visible(pkg):
6659 - continue
6660 - self._cpv_pkg_map[pkg.cpv] = pkg
6661 - ret.append(pkg.cpv)
6662 - if ret:
6663 - self._cpv_sort_ascending(ret)
6664 - self._match_cache[orig_atom] = ret
6665 - return ret[:]
6666 -
6667 - def _visible(self, pkg):
6668 - if pkg.installed and "selective" not in self._depgraph.myparams:
6669 - try:
6670 - arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
6671 - except (StopIteration, portage.exception.InvalidDependString):
6672 - arg = None
6673 - if arg:
6674 - return False
6675 - if pkg.installed:
6676 - try:
6677 - if not visible(
6678 - self._depgraph.pkgsettings[pkg.root], pkg):
6679 - return False
6680 - except portage.exception.InvalidDependString:
6681 - pass
6682 - in_graph = self._depgraph._slot_pkg_map[
6683 - self._root].get(pkg.slot_atom)
6684 - if in_graph is None:
6685 - # Mask choices for packages which are not the highest visible
6686 - # version within their slot (since they usually trigger slot
6687 - # conflicts).
6688 - highest_visible, in_graph = self._depgraph._select_package(
6689 - self._root, pkg.slot_atom)
6690 - if pkg != highest_visible:
6691 - return False
6692 - elif in_graph != pkg:
6693 - # Mask choices for packages that would trigger a slot
6694 - # conflict with a previously selected package.
6695 - return False
6696 - return True
6697 -
6698 - def _dep_expand(self, atom):
6699 - """
6700 - This is only needed for old installed packages that may
6701 - contain atoms that are not fully qualified with a specific
6702 - category. Emulate the cpv_expand() function that's used by
6703 - dbapi.match() in cases like this. If there are multiple
6704 - matches, it's often due to a new-style virtual that has
6705 - been added, so try to filter those out to avoid raising
6706 - a ValueError.
6707 - """
6708 - root_config = self._depgraph.roots[self._root]
6709 - orig_atom = atom
6710 - expanded_atoms = self._depgraph._dep_expand(root_config, atom)
6711 - if len(expanded_atoms) > 1:
6712 - non_virtual_atoms = []
6713 - for x in expanded_atoms:
6714 - if not portage.dep_getkey(x).startswith("virtual/"):
6715 - non_virtual_atoms.append(x)
6716 - if len(non_virtual_atoms) == 1:
6717 - expanded_atoms = non_virtual_atoms
6718 - if len(expanded_atoms) > 1:
6719 - # compatible with portage.cpv_expand()
6720 - raise portage.exception.AmbiguousPackageName(
6721 - [portage.dep_getkey(x) for x in expanded_atoms])
6722 - if expanded_atoms:
6723 - atom = expanded_atoms[0]
6724 - else:
6725 - null_atom = insert_category_into_atom(atom, "null")
6726 - null_cp = portage.dep_getkey(null_atom)
6727 - cat, atom_pn = portage.catsplit(null_cp)
6728 - virts_p = root_config.settings.get_virts_p().get(atom_pn)
6729 - if virts_p:
6730 - # Allow the resolver to choose which virtual.
6731 - atom = insert_category_into_atom(atom, "virtual")
6732 - else:
6733 - atom = insert_category_into_atom(atom, "null")
6734 - return atom
6735 -
6736 - def aux_get(self, cpv, wants):
6737 - metadata = self._cpv_pkg_map[cpv].metadata
6738 - return [metadata.get(x, "") for x in wants]
6739 -
6740 -class Scheduler(PollScheduler):
6741 -
6742 - _opts_ignore_blockers = \
6743 - frozenset(["--buildpkgonly",
6744 - "--fetchonly", "--fetch-all-uri",
6745 - "--nodeps", "--pretend"])
6746 -
6747 - _opts_no_background = \
6748 - frozenset(["--pretend",
6749 - "--fetchonly", "--fetch-all-uri"])
6750 -
6751 - _opts_no_restart = frozenset(["--buildpkgonly",
6752 - "--fetchonly", "--fetch-all-uri", "--pretend"])
6753 -
6754 - _bad_resume_opts = set(["--ask", "--changelog",
6755 - "--resume", "--skipfirst"])
6756 -
6757 - _fetch_log = os.path.join(_emerge_log_dir, 'emerge-fetch.log')
6758 -
6759 - class _iface_class(SlotObject):
6760 - __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
6761 - "dblinkElog", "dblinkEmergeLog", "fetch", "register", "schedule",
6762 - "scheduleSetup", "scheduleUnpack", "scheduleYield",
6763 - "unregister")
6764 -
6765 - class _fetch_iface_class(SlotObject):
6766 - __slots__ = ("log_file", "schedule")
6767 -
6768 - _task_queues_class = slot_dict_class(
6769 - ("merge", "jobs", "fetch", "unpack"), prefix="")
6770 -
6771 - class _build_opts_class(SlotObject):
6772 - __slots__ = ("buildpkg", "buildpkgonly",
6773 - "fetch_all_uri", "fetchonly", "pretend")
6774 -
6775 - class _binpkg_opts_class(SlotObject):
6776 - __slots__ = ("fetchonly", "getbinpkg", "pretend")
6777 -
6778 - class _pkg_count_class(SlotObject):
6779 - __slots__ = ("curval", "maxval")
6780 -
6781 - class _emerge_log_class(SlotObject):
6782 - __slots__ = ("xterm_titles",)
6783 -
6784 - def log(self, *pargs, **kwargs):
6785 - if not self.xterm_titles:
6786 - # Avoid interference with the scheduler's status display.
6787 - kwargs.pop("short_msg", None)
6788 - emergelog(self.xterm_titles, *pargs, **kwargs)
6789 -
6790 - class _failed_pkg(SlotObject):
6791 - __slots__ = ("build_dir", "build_log", "pkg", "returncode")
6792 -
6793 - class _ConfigPool(object):
6794 - """Interface for a task to temporarily allocate a config
6795 - instance from a pool. This allows a task to be constructed
6796 - long before the config instance actually becomes needed, like
6797 - when prefetchers are constructed for the whole merge list."""
6798 - __slots__ = ("_root", "_allocate", "_deallocate")
6799 - def __init__(self, root, allocate, deallocate):
6800 - self._root = root
6801 - self._allocate = allocate
6802 - self._deallocate = deallocate
6803 - def allocate(self):
6804 - return self._allocate(self._root)
6805 - def deallocate(self, settings):
6806 - self._deallocate(settings)
6807 -
6808 - class _unknown_internal_error(portage.exception.PortageException):
6809 - """
6810 - Used internally to terminate scheduling. The specific reason for
6811 - the failure should have been dumped to stderr.
6812 - """
6813 - def __init__(self, value=""):
6814 - portage.exception.PortageException.__init__(self, value)
6815 -
6816 - def __init__(self, settings, trees, mtimedb, myopts,
6817 - spinner, mergelist, favorites, digraph):
6818 - PollScheduler.__init__(self)
6819 - self.settings = settings
6820 - self.target_root = settings["ROOT"]
6821 - self.trees = trees
6822 - self.myopts = myopts
6823 - self._spinner = spinner
6824 - self._mtimedb = mtimedb
6825 - self._mergelist = mergelist
6826 - self._favorites = favorites
6827 - self._args_set = InternalPackageSet(favorites)
6828 - self._build_opts = self._build_opts_class()
6829 - for k in self._build_opts.__slots__:
6830 - setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
6831 - self._binpkg_opts = self._binpkg_opts_class()
6832 - for k in self._binpkg_opts.__slots__:
6833 - setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
6834 -
6835 - self.curval = 0
6836 - self._logger = self._emerge_log_class()
6837 - self._task_queues = self._task_queues_class()
6838 - for k in self._task_queues.allowed_keys:
6839 - setattr(self._task_queues, k,
6840 - SequentialTaskQueue())
6841 -
6842 - # Holds merges that will wait to be executed when no builds are
6843 - # executing. This is useful for system packages since dependencies
6844 - # on system packages are frequently unspecified.
6845 - self._merge_wait_queue = []
6846 - # Holds merges that have been transfered from the merge_wait_queue to
6847 - # the actual merge queue. They are removed from this list upon
6848 - # completion. Other packages can start building only when this list is
6849 - # empty.
6850 - self._merge_wait_scheduled = []
6851 -
6852 - # Holds system packages and their deep runtime dependencies. Before
6853 - # being merged, these packages go to merge_wait_queue, to be merged
6854 - # when no other packages are building.
6855 - self._deep_system_deps = set()
6856 -
6857 - # Holds packages to merge which will satisfy currently unsatisfied
6858 - # deep runtime dependencies of system packages. If this is not empty
6859 - # then no parallel builds will be spawned until it is empty. This
6860 - # minimizes the possibility that a build will fail due to the system
6861 - # being in a fragile state. For example, see bug #259954.
6862 - self._unsatisfied_system_deps = set()
6863 -
6864 - self._status_display = JobStatusDisplay(
6865 - xterm_titles=('notitles' not in settings.features))
6866 - self._max_load = myopts.get("--load-average")
6867 - max_jobs = myopts.get("--jobs")
6868 - if max_jobs is None:
6869 - max_jobs = 1
6870 - self._set_max_jobs(max_jobs)
6871 -
6872 - # The root where the currently running
6873 - # portage instance is installed.
6874 - self._running_root = trees["/"]["root_config"]
6875 - self.edebug = 0
6876 - if settings.get("PORTAGE_DEBUG", "") == "1":
6877 - self.edebug = 1
6878 - self.pkgsettings = {}
6879 - self._config_pool = {}
6880 - self._blocker_db = {}
6881 - for root in trees:
6882 - self._config_pool[root] = []
6883 - self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
6884 -
6885 - fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
6886 - schedule=self._schedule_fetch)
6887 - self._sched_iface = self._iface_class(
6888 - dblinkEbuildPhase=self._dblink_ebuild_phase,
6889 - dblinkDisplayMerge=self._dblink_display_merge,
6890 - dblinkElog=self._dblink_elog,
6891 - dblinkEmergeLog=self._dblink_emerge_log,
6892 - fetch=fetch_iface, register=self._register,
6893 - schedule=self._schedule_wait,
6894 - scheduleSetup=self._schedule_setup,
6895 - scheduleUnpack=self._schedule_unpack,
6896 - scheduleYield=self._schedule_yield,
6897 - unregister=self._unregister)
6898 -
6899 - self._prefetchers = weakref.WeakValueDictionary()
6900 - self._pkg_queue = []
6901 - self._completed_tasks = set()
6902 -
6903 - self._failed_pkgs = []
6904 - self._failed_pkgs_all = []
6905 - self._failed_pkgs_die_msgs = []
6906 - self._post_mod_echo_msgs = []
6907 - self._parallel_fetch = False
6908 - merge_count = len([x for x in mergelist \
6909 - if isinstance(x, Package) and x.operation == "merge"])
6910 - self._pkg_count = self._pkg_count_class(
6911 - curval=0, maxval=merge_count)
6912 - self._status_display.maxval = self._pkg_count.maxval
6913 -
6914 - # The load average takes some time to respond when new
6915 - # jobs are added, so we need to limit the rate of adding
6916 - # new jobs.
6917 - self._job_delay_max = 10
6918 - self._job_delay_factor = 1.0
6919 - self._job_delay_exp = 1.5
6920 - self._previous_job_start_time = None
6921 -
6922 - self._set_digraph(digraph)
6923 -
6924 - # This is used to memoize the _choose_pkg() result when
6925 - # no packages can be chosen until one of the existing
6926 - # jobs completes.
6927 - self._choose_pkg_return_early = False
6928 -
6929 - features = self.settings.features
6930 - if "parallel-fetch" in features and \
6931 - not ("--pretend" in self.myopts or \
6932 - "--fetch-all-uri" in self.myopts or \
6933 - "--fetchonly" in self.myopts):
6934 - if "distlocks" not in features:
6935 - portage.writemsg(red("!!!")+"\n", noiselevel=-1)
6936 - portage.writemsg(red("!!!")+" parallel-fetching " + \
6937 - "requires the distlocks feature enabled"+"\n",
6938 - noiselevel=-1)
6939 - portage.writemsg(red("!!!")+" you have it disabled, " + \
6940 - "thus parallel-fetching is being disabled"+"\n",
6941 - noiselevel=-1)
6942 - portage.writemsg(red("!!!")+"\n", noiselevel=-1)
6943 - elif len(mergelist) > 1:
6944 - self._parallel_fetch = True
6945 -
6946 - if self._parallel_fetch:
6947 - # clear out existing fetch log if it exists
6948 - try:
6949 - open(self._fetch_log, 'w')
6950 - except EnvironmentError:
6951 - pass
6952 -
6953 - self._running_portage = None
6954 - portage_match = self._running_root.trees["vartree"].dbapi.match(
6955 - portage.const.PORTAGE_PACKAGE_ATOM)
6956 - if portage_match:
6957 - cpv = portage_match.pop()
6958 - self._running_portage = self._pkg(cpv, "installed",
6959 - self._running_root, installed=True)
6960 -
6961 - def _poll(self, timeout=None):
6962 - self._schedule()
6963 - PollScheduler._poll(self, timeout=timeout)
6964 -
6965 - def _set_max_jobs(self, max_jobs):
6966 - self._max_jobs = max_jobs
6967 - self._task_queues.jobs.max_jobs = max_jobs
6968 -
6969 - def _background_mode(self):
6970 - """
6971 - Check if background mode is enabled and adjust states as necessary.
6972 -
6973 - @rtype: bool
6974 - @returns: True if background mode is enabled, False otherwise.
6975 - """
6976 - background = (self._max_jobs is True or \
6977 - self._max_jobs > 1 or "--quiet" in self.myopts) and \
6978 - not bool(self._opts_no_background.intersection(self.myopts))
6979 -
6980 - if background:
6981 - interactive_tasks = self._get_interactive_tasks()
6982 - if interactive_tasks:
6983 - background = False
6984 - writemsg_level(">>> Sending package output to stdio due " + \
6985 - "to interactive package(s):\n",
6986 - level=logging.INFO, noiselevel=-1)
6987 - msg = [""]
6988 - for pkg in interactive_tasks:
6989 - pkg_str = " " + colorize("INFORM", str(pkg.cpv))
6990 - if pkg.root != "/":
6991 - pkg_str += " for " + pkg.root
6992 - msg.append(pkg_str)
6993 - msg.append("")
6994 - writemsg_level("".join("%s\n" % (l,) for l in msg),
6995 - level=logging.INFO, noiselevel=-1)
6996 - if self._max_jobs is True or self._max_jobs > 1:
6997 - self._set_max_jobs(1)
6998 - writemsg_level(">>> Setting --jobs=1 due " + \
6999 - "to the above interactive package(s)\n",
7000 - level=logging.INFO, noiselevel=-1)
7001 -
7002 - self._status_display.quiet = \
7003 - not background or \
7004 - ("--quiet" in self.myopts and \
7005 - "--verbose" not in self.myopts)
7006 -
7007 - self._logger.xterm_titles = \
7008 - "notitles" not in self.settings.features and \
7009 - self._status_display.quiet
7010 -
7011 - return background
7012 -
7013 - def _get_interactive_tasks(self):
7014 - from portage import flatten
7015 - from portage.dep import use_reduce, paren_reduce
7016 - interactive_tasks = []
7017 - for task in self._mergelist:
7018 - if not (isinstance(task, Package) and \
7019 - task.operation == "merge"):
7020 - continue
7021 - try:
7022 - properties = flatten(use_reduce(paren_reduce(
7023 - task.metadata["PROPERTIES"]), uselist=task.use.enabled))
7024 - except portage.exception.InvalidDependString, e:
7025 - show_invalid_depstring_notice(task,
7026 - task.metadata["PROPERTIES"], str(e))
7027 - raise self._unknown_internal_error()
7028 - if "interactive" in properties:
7029 - interactive_tasks.append(task)
7030 - return interactive_tasks
7031 -
7032 - def _set_digraph(self, digraph):
7033 - if "--nodeps" in self.myopts or \
7034 - (self._max_jobs is not True and self._max_jobs < 2):
7035 - # save some memory
7036 - self._digraph = None
7037 - return
7038 -
7039 - self._digraph = digraph
7040 - self._find_system_deps()
7041 - self._prune_digraph()
7042 - self._prevent_builddir_collisions()
7043 -
7044 - def _find_system_deps(self):
7045 - """
7046 - Find system packages and their deep runtime dependencies. Before being
7047 - merged, these packages go to merge_wait_queue, to be merged when no
7048 - other packages are building.
7049 - """
7050 - deep_system_deps = self._deep_system_deps
7051 - deep_system_deps.clear()
7052 - deep_system_deps.update(
7053 - _find_deep_system_runtime_deps(self._digraph))
7054 - deep_system_deps.difference_update([pkg for pkg in \
7055 - deep_system_deps if pkg.operation != "merge"])
7056 -
7057 - def _prune_digraph(self):
7058 - """
7059 - Prune any root nodes that are irrelevant.
7060 - """
7061 -
7062 - graph = self._digraph
7063 - completed_tasks = self._completed_tasks
7064 - removed_nodes = set()
7065 - while True:
7066 - for node in graph.root_nodes():
7067 - if not isinstance(node, Package) or \
7068 - (node.installed and node.operation == "nomerge") or \
7069 - node.onlydeps or \
7070 - node in completed_tasks:
7071 - removed_nodes.add(node)
7072 - if removed_nodes:
7073 - graph.difference_update(removed_nodes)
7074 - if not removed_nodes:
7075 - break
7076 - removed_nodes.clear()
7077 -
7078 - def _prevent_builddir_collisions(self):
7079 - """
7080 - When building stages, sometimes the same exact cpv needs to be merged
7081 - to both $ROOTs. Add edges to the digraph in order to avoid collisions
7082 - in the builddir. Currently, normal file locks would be inappropriate
7083 - for this purpose since emerge holds all of it's build dir locks from
7084 - the main process.
7085 - """
7086 - cpv_map = {}
7087 - for pkg in self._mergelist:
7088 - if not isinstance(pkg, Package):
7089 - # a satisfied blocker
7090 - continue
7091 - if pkg.installed:
7092 - continue
7093 - if pkg.cpv not in cpv_map:
7094 - cpv_map[pkg.cpv] = [pkg]
7095 - continue
7096 - for earlier_pkg in cpv_map[pkg.cpv]:
7097 - self._digraph.add(earlier_pkg, pkg,
7098 - priority=DepPriority(buildtime=True))
7099 - cpv_map[pkg.cpv].append(pkg)
7100 -
7101 - class _pkg_failure(portage.exception.PortageException):
7102 - """
7103 - An instance of this class is raised by unmerge() when
7104 - an uninstallation fails.
7105 - """
7106 - status = 1
7107 - def __init__(self, *pargs):
7108 - portage.exception.PortageException.__init__(self, pargs)
7109 - if pargs:
7110 - self.status = pargs[0]
7111 -
7112 - def _schedule_fetch(self, fetcher):
7113 - """
7114 - Schedule a fetcher on the fetch queue, in order to
7115 - serialize access to the fetch log.
7116 - """
7117 - self._task_queues.fetch.addFront(fetcher)
7118 -
7119 - def _schedule_setup(self, setup_phase):
7120 - """
7121 - Schedule a setup phase on the merge queue, in order to
7122 - serialize unsandboxed access to the live filesystem.
7123 - """
7124 - self._task_queues.merge.addFront(setup_phase)
7125 - self._schedule()
7126 -
7127 - def _schedule_unpack(self, unpack_phase):
7128 - """
7129 - Schedule an unpack phase on the unpack queue, in order
7130 - to serialize $DISTDIR access for live ebuilds.
7131 - """
7132 - self._task_queues.unpack.add(unpack_phase)
7133 -
7134 - def _find_blockers(self, new_pkg):
7135 - """
7136 - Returns a callable which should be called only when
7137 - the vdb lock has been acquired.
7138 - """
7139 - def get_blockers():
7140 - return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
7141 - return get_blockers
7142 -
7143 - def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
7144 - if self._opts_ignore_blockers.intersection(self.myopts):
7145 - return None
7146 -
7147 - # Call gc.collect() here to avoid heap overflow that
7148 - # triggers 'Cannot allocate memory' errors (reported
7149 - # with python-2.5).
7150 - import gc
7151 - gc.collect()
7152 -
7153 - blocker_db = self._blocker_db[new_pkg.root]
7154 -
7155 - blocker_dblinks = []
7156 - for blocking_pkg in blocker_db.findInstalledBlockers(
7157 - new_pkg, acquire_lock=acquire_lock):
7158 - if new_pkg.slot_atom == blocking_pkg.slot_atom:
7159 - continue
7160 - if new_pkg.cpv == blocking_pkg.cpv:
7161 - continue
7162 - blocker_dblinks.append(portage.dblink(
7163 - blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
7164 - self.pkgsettings[blocking_pkg.root], treetype="vartree",
7165 - vartree=self.trees[blocking_pkg.root]["vartree"]))
7166 -
7167 - gc.collect()
7168 -
7169 - return blocker_dblinks
7170 -
7171 - def _dblink_pkg(self, pkg_dblink):
7172 - cpv = pkg_dblink.mycpv
7173 - type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
7174 - root_config = self.trees[pkg_dblink.myroot]["root_config"]
7175 - installed = type_name == "installed"
7176 - return self._pkg(cpv, type_name, root_config, installed=installed)
7177 -
7178 - def _append_to_log_path(self, log_path, msg):
7179 - f = open(log_path, 'a')
7180 - try:
7181 - f.write(msg)
7182 - finally:
7183 - f.close()
7184 -
7185 - def _dblink_elog(self, pkg_dblink, phase, func, msgs):
7186 -
7187 - log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
7188 - log_file = None
7189 - out = sys.stdout
7190 - background = self._background
7191 -
7192 - if background and log_path is not None:
7193 - log_file = open(log_path, 'a')
7194 - out = log_file
7195 -
7196 - try:
7197 - for msg in msgs:
7198 - func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
7199 - finally:
7200 - if log_file is not None:
7201 - log_file.close()
7202 -
7203 - def _dblink_emerge_log(self, msg):
7204 - self._logger.log(msg)
7205 -
7206 - def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
7207 - log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
7208 - background = self._background
7209 -
7210 - if log_path is None:
7211 - if not (background and level < logging.WARN):
7212 - portage.util.writemsg_level(msg,
7213 - level=level, noiselevel=noiselevel)
7214 - else:
7215 - if not background:
7216 - portage.util.writemsg_level(msg,
7217 - level=level, noiselevel=noiselevel)
7218 - self._append_to_log_path(log_path, msg)
7219 -
7220 - def _dblink_ebuild_phase(self,
7221 - pkg_dblink, pkg_dbapi, ebuild_path, phase):
7222 - """
7223 - Using this callback for merge phases allows the scheduler
7224 - to run while these phases execute asynchronously, and allows
7225 - the scheduler control output handling.
7226 - """
7227 -
7228 - scheduler = self._sched_iface
7229 - settings = pkg_dblink.settings
7230 - pkg = self._dblink_pkg(pkg_dblink)
7231 - background = self._background
7232 - log_path = settings.get("PORTAGE_LOG_FILE")
7233 -
7234 - ebuild_phase = EbuildPhase(background=background,
7235 - pkg=pkg, phase=phase, scheduler=scheduler,
7236 - settings=settings, tree=pkg_dblink.treetype)
7237 - ebuild_phase.start()
7238 - ebuild_phase.wait()
7239 -
7240 - return ebuild_phase.returncode
7241 -
7242 - def _generate_digests(self):
7243 - """
7244 - Generate digests if necessary for --digests or FEATURES=digest.
7245 - In order to avoid interference, this must done before parallel
7246 - tasks are started.
7247 - """
7248 -
7249 - if '--fetchonly' in self.myopts:
7250 - return os.EX_OK
7251 -
7252 - digest = '--digest' in self.myopts
7253 - if not digest:
7254 - for pkgsettings in self.pkgsettings.itervalues():
7255 - if 'digest' in pkgsettings.features:
7256 - digest = True
7257 - break
7258 -
7259 - if not digest:
7260 - return os.EX_OK
7261 -
7262 - for x in self._mergelist:
7263 - if not isinstance(x, Package) or \
7264 - x.type_name != 'ebuild' or \
7265 - x.operation != 'merge':
7266 - continue
7267 - pkgsettings = self.pkgsettings[x.root]
7268 - if '--digest' not in self.myopts and \
7269 - 'digest' not in pkgsettings.features:
7270 - continue
7271 - portdb = x.root_config.trees['porttree'].dbapi
7272 - ebuild_path = portdb.findname(x.cpv)
7273 - if not ebuild_path:
7274 - writemsg_level(
7275 - "!!! Could not locate ebuild for '%s'.\n" \
7276 - % x.cpv, level=logging.ERROR, noiselevel=-1)
7277 - return 1
7278 - pkgsettings['O'] = os.path.dirname(ebuild_path)
7279 - if not portage.digestgen([], pkgsettings, myportdb=portdb):
7280 - writemsg_level(
7281 - "!!! Unable to generate manifest for '%s'.\n" \
7282 - % x.cpv, level=logging.ERROR, noiselevel=-1)
7283 - return 1
7284 -
7285 - return os.EX_OK
7286 -
7287 - def _check_manifests(self):
7288 - # Verify all the manifests now so that the user is notified of failure
7289 - # as soon as possible.
7290 - if "strict" not in self.settings.features or \
7291 - "--fetchonly" in self.myopts or \
7292 - "--fetch-all-uri" in self.myopts:
7293 - return os.EX_OK
7294 -
7295 - shown_verifying_msg = False
7296 - quiet_settings = {}
7297 - for myroot, pkgsettings in self.pkgsettings.iteritems():
7298 - quiet_config = portage.config(clone=pkgsettings)
7299 - quiet_config["PORTAGE_QUIET"] = "1"
7300 - quiet_config.backup_changes("PORTAGE_QUIET")
7301 - quiet_settings[myroot] = quiet_config
7302 - del quiet_config
7303 -
7304 - for x in self._mergelist:
7305 - if not isinstance(x, Package) or \
7306 - x.type_name != "ebuild":
7307 - continue
7308 -
7309 - if not shown_verifying_msg:
7310 - shown_verifying_msg = True
7311 - self._status_msg("Verifying ebuild manifests")
7312 -
7313 - root_config = x.root_config
7314 - portdb = root_config.trees["porttree"].dbapi
7315 - quiet_config = quiet_settings[root_config.root]
7316 - quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
7317 - if not portage.digestcheck([], quiet_config, strict=True):
7318 - return 1
7319 -
7320 - return os.EX_OK
7321 -
7322 - def _add_prefetchers(self):
7323 -
7324 - if not self._parallel_fetch:
7325 - return
7326 -
7327 - if self._parallel_fetch:
7328 - self._status_msg("Starting parallel fetch")
7329 -
7330 - prefetchers = self._prefetchers
7331 - getbinpkg = "--getbinpkg" in self.myopts
7332 -
7333 - # In order to avoid "waiting for lock" messages
7334 - # at the beginning, which annoy users, never
7335 - # spawn a prefetcher for the first package.
7336 - for pkg in self._mergelist[1:]:
7337 - prefetcher = self._create_prefetcher(pkg)
7338 - if prefetcher is not None:
7339 - self._task_queues.fetch.add(prefetcher)
7340 - prefetchers[pkg] = prefetcher
7341 -
7342 - def _create_prefetcher(self, pkg):
7343 - """
7344 - @return: a prefetcher, or None if not applicable
7345 - """
7346 - prefetcher = None
7347 -
7348 - if not isinstance(pkg, Package):
7349 - pass
7350 -
7351 - elif pkg.type_name == "ebuild":
7352 -
7353 - prefetcher = EbuildFetcher(background=True,
7354 - config_pool=self._ConfigPool(pkg.root,
7355 - self._allocate_config, self._deallocate_config),
7356 - fetchonly=1, logfile=self._fetch_log,
7357 - pkg=pkg, prefetch=True, scheduler=self._sched_iface)
7358 -
7359 - elif pkg.type_name == "binary" and \
7360 - "--getbinpkg" in self.myopts and \
7361 - pkg.root_config.trees["bintree"].isremote(pkg.cpv):
7362 -
7363 - prefetcher = BinpkgPrefetcher(background=True,
7364 - pkg=pkg, scheduler=self._sched_iface)
7365 -
7366 - return prefetcher
7367 -
7368 - def _is_restart_scheduled(self):
7369 - """
7370 - Check if the merge list contains a replacement
7371 - for the current running instance, that will result
7372 - in restart after merge.
7373 - @rtype: bool
7374 - @returns: True if a restart is scheduled, False otherwise.
7375 - """
7376 - if self._opts_no_restart.intersection(self.myopts):
7377 - return False
7378 -
7379 - mergelist = self._mergelist
7380 -
7381 - for i, pkg in enumerate(mergelist):
7382 - if self._is_restart_necessary(pkg) and \
7383 - i != len(mergelist) - 1:
7384 - return True
7385 -
7386 - return False
7387 -
7388 - def _is_restart_necessary(self, pkg):
7389 - """
7390 - @return: True if merging the given package
7391 - requires restart, False otherwise.
7392 - """
7393 -
7394 - # Figure out if we need a restart.
7395 - if pkg.root == self._running_root.root and \
7396 - EPREFIX == BPREFIX and \
7397 - portage.match_from_list(
7398 - portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
7399 - if self._running_portage:
7400 - return pkg.cpv != self._running_portage.cpv
7401 - return True
7402 - return False
7403 -
7404 - def _restart_if_necessary(self, pkg):
7405 - """
7406 - Use execv() to restart emerge. This happens
7407 - if portage upgrades itself and there are
7408 - remaining packages in the list.
7409 - """
7410 -
7411 - if self._opts_no_restart.intersection(self.myopts):
7412 - return
7413 -
7414 - if not self._is_restart_necessary(pkg):
7415 - return
7416 -
7417 - if pkg == self._mergelist[-1]:
7418 - return
7419 -
7420 - self._main_loop_cleanup()
7421 -
7422 - logger = self._logger
7423 - pkg_count = self._pkg_count
7424 - mtimedb = self._mtimedb
7425 - bad_resume_opts = self._bad_resume_opts
7426 -
7427 - logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
7428 - (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
7429 -
7430 - logger.log(" *** RESTARTING " + \
7431 - "emerge via exec() after change of " + \
7432 - "portage version.")
7433 -
7434 - mtimedb["resume"]["mergelist"].remove(list(pkg))
7435 - mtimedb.commit()
7436 - portage.run_exitfuncs()
7437 - mynewargv = [sys.argv[0], "--resume"]
7438 - resume_opts = self.myopts.copy()
7439 - # For automatic resume, we need to prevent
7440 - # any of bad_resume_opts from leaking in
7441 - # via EMERGE_DEFAULT_OPTS.
7442 - resume_opts["--ignore-default-opts"] = True
7443 - for myopt, myarg in resume_opts.iteritems():
7444 - if myopt not in bad_resume_opts:
7445 - if myarg is True:
7446 - mynewargv.append(myopt)
7447 - else:
7448 - mynewargv.append(myopt +"="+ str(myarg))
7449 - # priority only needs to be adjusted on the first run
7450 - os.environ["PORTAGE_NICENESS"] = "0"
7451 - os.execv(mynewargv[0], mynewargv)
7452 -
7453 - def merge(self):
7454 -
7455 - if "--resume" in self.myopts:
7456 - # We're resuming.
7457 - portage.writemsg_stdout(
7458 - colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
7459 - self._logger.log(" *** Resuming merge...")
7460 -
7461 - self._save_resume_list()
7462 -
7463 - try:
7464 - self._background = self._background_mode()
7465 - except self._unknown_internal_error:
7466 - return 1
7467 -
7468 - for root in self.trees:
7469 - root_config = self.trees[root]["root_config"]
7470 -
7471 - # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
7472 - # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
7473 - # for ensuring sane $PWD (bug #239560) and storing elog messages.
7474 - tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
7475 - if not tmpdir or not os.path.isdir(tmpdir):
7476 - msg = "The directory specified in your " + \
7477 - "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
7478 - "does not exist. Please create this " + \
7479 - "directory or correct your PORTAGE_TMPDIR setting."
7480 - msg = textwrap.wrap(msg, 70)
7481 - out = portage.output.EOutput()
7482 - for l in msg:
7483 - out.eerror(l)
7484 - return 1
7485 -
7486 - if self._background:
7487 - root_config.settings.unlock()
7488 - root_config.settings["PORTAGE_BACKGROUND"] = "1"
7489 - root_config.settings.backup_changes("PORTAGE_BACKGROUND")
7490 - root_config.settings.lock()
7491 -
7492 - self.pkgsettings[root] = portage.config(
7493 - clone=root_config.settings)
7494 -
7495 - rval = self._generate_digests()
7496 - if rval != os.EX_OK:
7497 - return rval
7498 -
7499 - rval = self._check_manifests()
7500 - if rval != os.EX_OK:
7501 - return rval
7502 -
7503 - keep_going = "--keep-going" in self.myopts
7504 - fetchonly = self._build_opts.fetchonly
7505 - mtimedb = self._mtimedb
7506 - failed_pkgs = self._failed_pkgs
7507 -
7508 - while True:
7509 - rval = self._merge()
7510 - if rval == os.EX_OK or fetchonly or not keep_going:
7511 - break
7512 - if "resume" not in mtimedb:
7513 - break
7514 - mergelist = self._mtimedb["resume"].get("mergelist")
7515 - if not mergelist:
7516 - break
7517 -
7518 - if not failed_pkgs:
7519 - break
7520 -
7521 - for failed_pkg in failed_pkgs:
7522 - mergelist.remove(list(failed_pkg.pkg))
7523 -
7524 - self._failed_pkgs_all.extend(failed_pkgs)
7525 - del failed_pkgs[:]
7526 -
7527 - if not mergelist:
7528 - break
7529 -
7530 - if not self._calc_resume_list():
7531 - break
7532 -
7533 - clear_caches(self.trees)
7534 - if not self._mergelist:
7535 - break
7536 -
7537 - self._save_resume_list()
7538 - self._pkg_count.curval = 0
7539 - self._pkg_count.maxval = len([x for x in self._mergelist \
7540 - if isinstance(x, Package) and x.operation == "merge"])
7541 - self._status_display.maxval = self._pkg_count.maxval
7542 -
7543 - self._logger.log(" *** Finished. Cleaning up...")
7544 -
7545 - if failed_pkgs:
7546 - self._failed_pkgs_all.extend(failed_pkgs)
7547 - del failed_pkgs[:]
7548 -
7549 - background = self._background
7550 - failure_log_shown = False
7551 - if background and len(self._failed_pkgs_all) == 1:
7552 - # If only one package failed then just show it's
7553 - # whole log for easy viewing.
7554 - failed_pkg = self._failed_pkgs_all[-1]
7555 - build_dir = failed_pkg.build_dir
7556 - log_file = None
7557 -
7558 - log_paths = [failed_pkg.build_log]
7559 -
7560 - log_path = self._locate_failure_log(failed_pkg)
7561 - if log_path is not None:
7562 - try:
7563 - log_file = open(log_path)
7564 - except IOError:
7565 - pass
7566 -
7567 - if log_file is not None:
7568 - try:
7569 - for line in log_file:
7570 - writemsg_level(line, noiselevel=-1)
7571 - finally:
7572 - log_file.close()
7573 - failure_log_shown = True
7574 -
7575 - # Dump mod_echo output now since it tends to flood the terminal.
7576 - # This allows us to avoid having more important output, generated
7577 - # later, from being swept away by the mod_echo output.
7578 - mod_echo_output = _flush_elog_mod_echo()
7579 -
7580 - if background and not failure_log_shown and \
7581 - self._failed_pkgs_all and \
7582 - self._failed_pkgs_die_msgs and \
7583 - not mod_echo_output:
7584 -
7585 - printer = portage.output.EOutput()
7586 - for mysettings, key, logentries in self._failed_pkgs_die_msgs:
7587 - root_msg = ""
7588 - if mysettings["ROOT"] != "/":
7589 - root_msg = " merged to %s" % mysettings["ROOT"]
7590 - print
7591 - printer.einfo("Error messages for package %s%s:" % \
7592 - (colorize("INFORM", key), root_msg))
7593 - print
7594 - for phase in portage.const.EBUILD_PHASES:
7595 - if phase not in logentries:
7596 - continue
7597 - for msgtype, msgcontent in logentries[phase]:
7598 - if isinstance(msgcontent, basestring):
7599 - msgcontent = [msgcontent]
7600 - for line in msgcontent:
7601 - printer.eerror(line.strip("\n"))
7602 -
7603 - if self._post_mod_echo_msgs:
7604 - for msg in self._post_mod_echo_msgs:
7605 - msg()
7606 -
7607 - if len(self._failed_pkgs_all) > 1 or \
7608 - (self._failed_pkgs_all and "--keep-going" in self.myopts):
7609 - if len(self._failed_pkgs_all) > 1:
7610 - msg = "The following %d packages have " % \
7611 - len(self._failed_pkgs_all) + \
7612 - "failed to build or install:"
7613 - else:
7614 - msg = "The following package has " + \
7615 - "failed to build or install:"
7616 - prefix = bad(" * ")
7617 - writemsg(prefix + "\n", noiselevel=-1)
7618 - from textwrap import wrap
7619 - for line in wrap(msg, 72):
7620 - writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
7621 - writemsg(prefix + "\n", noiselevel=-1)
7622 - for failed_pkg in self._failed_pkgs_all:
7623 - writemsg("%s\t%s\n" % (prefix,
7624 - colorize("INFORM", str(failed_pkg.pkg))),
7625 - noiselevel=-1)
7626 - writemsg(prefix + "\n", noiselevel=-1)
7627 -
7628 - return rval
7629 -
7630 - def _elog_listener(self, mysettings, key, logentries, fulltext):
7631 - errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
7632 - if errors:
7633 - self._failed_pkgs_die_msgs.append(
7634 - (mysettings, key, errors))
7635 -
7636 - def _locate_failure_log(self, failed_pkg):
7637 -
7638 - build_dir = failed_pkg.build_dir
7639 - log_file = None
7640 -
7641 - log_paths = [failed_pkg.build_log]
7642 -
7643 - for log_path in log_paths:
7644 - if not log_path:
7645 - continue
7646 -
7647 - try:
7648 - log_size = os.stat(log_path).st_size
7649 - except OSError:
7650 - continue
7651 -
7652 - if log_size == 0:
7653 - continue
7654 -
7655 - return log_path
7656 -
7657 - return None
7658 -
7659 - def _add_packages(self):
7660 - pkg_queue = self._pkg_queue
7661 - for pkg in self._mergelist:
7662 - if isinstance(pkg, Package):
7663 - pkg_queue.append(pkg)
7664 - elif isinstance(pkg, Blocker):
7665 - pass
7666 -
7667 - def _system_merge_started(self, merge):
7668 - """
7669 - Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
7670 - """
7671 - graph = self._digraph
7672 - if graph is None:
7673 - return
7674 - pkg = merge.merge.pkg
7675 -
7676 - # Skip this if $ROOT != / since it shouldn't matter if there
7677 - # are unsatisfied system runtime deps in this case.
7678 - if pkg.root != '/':
7679 - return
7680 -
7681 - completed_tasks = self._completed_tasks
7682 - unsatisfied = self._unsatisfied_system_deps
7683 -
7684 - def ignore_non_runtime_or_satisfied(priority):
7685 - """
7686 - Ignore non-runtime and satisfied runtime priorities.
7687 - """
7688 - if isinstance(priority, DepPriority) and \
7689 - not priority.satisfied and \
7690 - (priority.runtime or priority.runtime_post):
7691 - return False
7692 - return True
7693 -
7694 - # When checking for unsatisfied runtime deps, only check
7695 - # direct deps since indirect deps are checked when the
7696 - # corresponding parent is merged.
7697 - for child in graph.child_nodes(pkg,
7698 - ignore_priority=ignore_non_runtime_or_satisfied):
7699 - if not isinstance(child, Package) or \
7700 - child.operation == 'uninstall':
7701 - continue
7702 - if child is pkg:
7703 - continue
7704 - if child.operation == 'merge' and \
7705 - child not in completed_tasks:
7706 - unsatisfied.add(child)
7707 -
7708 - def _merge_wait_exit_handler(self, task):
7709 - self._merge_wait_scheduled.remove(task)
7710 - self._merge_exit(task)
7711 -
7712 - def _merge_exit(self, merge):
7713 - self._do_merge_exit(merge)
7714 - self._deallocate_config(merge.merge.settings)
7715 - if merge.returncode == os.EX_OK and \
7716 - not merge.merge.pkg.installed:
7717 - self._status_display.curval += 1
7718 - self._status_display.merges = len(self._task_queues.merge)
7719 - self._schedule()
7720 -
7721 - def _do_merge_exit(self, merge):
7722 - pkg = merge.merge.pkg
7723 - if merge.returncode != os.EX_OK:
7724 - settings = merge.merge.settings
7725 - build_dir = settings.get("PORTAGE_BUILDDIR")
7726 - build_log = settings.get("PORTAGE_LOG_FILE")
7727 -
7728 - self._failed_pkgs.append(self._failed_pkg(
7729 - build_dir=build_dir, build_log=build_log,
7730 - pkg=pkg,
7731 - returncode=merge.returncode))
7732 - self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
7733 -
7734 - self._status_display.failed = len(self._failed_pkgs)
7735 - return
7736 -
7737 - self._task_complete(pkg)
7738 - pkg_to_replace = merge.merge.pkg_to_replace
7739 - if pkg_to_replace is not None:
7740 - # When a package is replaced, mark it's uninstall
7741 - # task complete (if any).
7742 - uninst_hash_key = \
7743 - ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
7744 - self._task_complete(uninst_hash_key)
7745 -
7746 - if pkg.installed:
7747 - return
7748 -
7749 - self._restart_if_necessary(pkg)
7750 -
7751 - # Call mtimedb.commit() after each merge so that
7752 - # --resume still works after being interrupted
7753 - # by reboot, sigkill or similar.
7754 - mtimedb = self._mtimedb
7755 - mtimedb["resume"]["mergelist"].remove(list(pkg))
7756 - if not mtimedb["resume"]["mergelist"]:
7757 - del mtimedb["resume"]
7758 - mtimedb.commit()
7759 -
7760 - def _build_exit(self, build):
7761 - if build.returncode == os.EX_OK:
7762 - self.curval += 1
7763 - merge = PackageMerge(merge=build)
7764 - if not build.build_opts.buildpkgonly and \
7765 - build.pkg in self._deep_system_deps:
7766 - # Since dependencies on system packages are frequently
7767 - # unspecified, merge them only when no builds are executing.
7768 - self._merge_wait_queue.append(merge)
7769 - merge.addStartListener(self._system_merge_started)
7770 - else:
7771 - merge.addExitListener(self._merge_exit)
7772 - self._task_queues.merge.add(merge)
7773 - self._status_display.merges = len(self._task_queues.merge)
7774 - else:
7775 - settings = build.settings
7776 - build_dir = settings.get("PORTAGE_BUILDDIR")
7777 - build_log = settings.get("PORTAGE_LOG_FILE")
7778 -
7779 - self._failed_pkgs.append(self._failed_pkg(
7780 - build_dir=build_dir, build_log=build_log,
7781 - pkg=build.pkg,
7782 - returncode=build.returncode))
7783 - self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
7784 -
7785 - self._status_display.failed = len(self._failed_pkgs)
7786 - self._deallocate_config(build.settings)
7787 - self._jobs -= 1
7788 - self._status_display.running = self._jobs
7789 - self._schedule()
7790 -
7791 - def _extract_exit(self, build):
7792 - self._build_exit(build)
7793 -
7794 - def _task_complete(self, pkg):
7795 - self._completed_tasks.add(pkg)
7796 - self._unsatisfied_system_deps.discard(pkg)
7797 - self._choose_pkg_return_early = False
7798 -
7799 - def _merge(self):
7800 -
7801 - self._add_prefetchers()
7802 - self._add_packages()
7803 - pkg_queue = self._pkg_queue
7804 - failed_pkgs = self._failed_pkgs
7805 - portage.locks._quiet = self._background
7806 - portage.elog._emerge_elog_listener = self._elog_listener
7807 - rval = os.EX_OK
7808 -
7809 - try:
7810 - self._main_loop()
7811 - finally:
7812 - self._main_loop_cleanup()
7813 - portage.locks._quiet = False
7814 - portage.elog._emerge_elog_listener = None
7815 - if failed_pkgs:
7816 - rval = failed_pkgs[-1].returncode
7817 -
7818 - return rval
7819 -
7820 - def _main_loop_cleanup(self):
7821 - del self._pkg_queue[:]
7822 - self._completed_tasks.clear()
7823 - self._deep_system_deps.clear()
7824 - self._unsatisfied_system_deps.clear()
7825 - self._choose_pkg_return_early = False
7826 - self._status_display.reset()
7827 - self._digraph = None
7828 - self._task_queues.fetch.clear()
7829 -
7830 - def _choose_pkg(self):
7831 - """
7832 - Choose a task that has all it's dependencies satisfied.
7833 - """
7834 -
7835 - if self._choose_pkg_return_early:
7836 - return None
7837 -
7838 - if self._digraph is None:
7839 - if (self._jobs or self._task_queues.merge) and \
7840 - not ("--nodeps" in self.myopts and \
7841 - (self._max_jobs is True or self._max_jobs > 1)):
7842 - self._choose_pkg_return_early = True
7843 - return None
7844 - return self._pkg_queue.pop(0)
7845 -
7846 - if not (self._jobs or self._task_queues.merge):
7847 - return self._pkg_queue.pop(0)
7848 -
7849 - self._prune_digraph()
7850 -
7851 - chosen_pkg = None
7852 - later = set(self._pkg_queue)
7853 - for pkg in self._pkg_queue:
7854 - later.remove(pkg)
7855 - if not self._dependent_on_scheduled_merges(pkg, later):
7856 - chosen_pkg = pkg
7857 - break
7858 -
7859 - if chosen_pkg is not None:
7860 - self._pkg_queue.remove(chosen_pkg)
7861 -
7862 - if chosen_pkg is None:
7863 - # There's no point in searching for a package to
7864 - # choose until at least one of the existing jobs
7865 - # completes.
7866 - self._choose_pkg_return_early = True
7867 -
7868 - return chosen_pkg
7869 -
7870 - def _dependent_on_scheduled_merges(self, pkg, later):
7871 - """
7872 - Traverse the subgraph of the given packages deep dependencies
7873 - to see if it contains any scheduled merges.
7874 - @param pkg: a package to check dependencies for
7875 - @type pkg: Package
7876 - @param later: packages for which dependence should be ignored
7877 - since they will be merged later than pkg anyway and therefore
7878 - delaying the merge of pkg will not result in a more optimal
7879 - merge order
7880 - @type later: set
7881 - @rtype: bool
7882 - @returns: True if the package is dependent, False otherwise.
7883 - """
7884 -
7885 - graph = self._digraph
7886 - completed_tasks = self._completed_tasks
7887 -
7888 - dependent = False
7889 - traversed_nodes = set([pkg])
7890 - direct_deps = graph.child_nodes(pkg)
7891 - node_stack = direct_deps
7892 - direct_deps = frozenset(direct_deps)
7893 - while node_stack:
7894 - node = node_stack.pop()
7895 - if node in traversed_nodes:
7896 - continue
7897 - traversed_nodes.add(node)
7898 - if not ((node.installed and node.operation == "nomerge") or \
7899 - (node.operation == "uninstall" and \
7900 - node not in direct_deps) or \
7901 - node in completed_tasks or \
7902 - node in later):
7903 - dependent = True
7904 - break
7905 - node_stack.extend(graph.child_nodes(node))
7906 -
7907 - return dependent
7908 -
7909 - def _allocate_config(self, root):
7910 - """
7911 - Allocate a unique config instance for a task in order
7912 - to prevent interference between parallel tasks.
7913 - """
7914 - if self._config_pool[root]:
7915 - temp_settings = self._config_pool[root].pop()
7916 - else:
7917 - temp_settings = portage.config(clone=self.pkgsettings[root])
7918 - # Since config.setcpv() isn't guaranteed to call config.reset() due to
7919 - # performance reasons, call it here to make sure all settings from the
7920 - # previous package get flushed out (such as PORTAGE_LOG_FILE).
7921 - temp_settings.reload()
7922 - temp_settings.reset()
7923 - return temp_settings
7924 -
7925 - def _deallocate_config(self, settings):
7926 - self._config_pool[settings["ROOT"]].append(settings)
7927 -
7928 - def _main_loop(self):
7929 -
7930 - # Only allow 1 job max if a restart is scheduled
7931 - # due to portage update.
7932 - if self._is_restart_scheduled() or \
7933 - self._opts_no_background.intersection(self.myopts):
7934 - self._set_max_jobs(1)
7935 -
7936 - merge_queue = self._task_queues.merge
7937 -
7938 - while self._schedule():
7939 - if self._poll_event_handlers:
7940 - self._poll_loop()
7941 -
7942 - while True:
7943 - self._schedule()
7944 - if not (self._jobs or merge_queue):
7945 - break
7946 - if self._poll_event_handlers:
7947 - self._poll_loop()
7948 -
7949 - def _keep_scheduling(self):
7950 - return bool(self._pkg_queue and \
7951 - not (self._failed_pkgs and not self._build_opts.fetchonly))
7952 -
7953 - def _schedule_tasks(self):
7954 -
7955 - # When the number of jobs drops to zero, process all waiting merges.
7956 - if not self._jobs and self._merge_wait_queue:
7957 - for task in self._merge_wait_queue:
7958 - task.addExitListener(self._merge_wait_exit_handler)
7959 - self._task_queues.merge.add(task)
7960 - self._status_display.merges = len(self._task_queues.merge)
7961 - self._merge_wait_scheduled.extend(self._merge_wait_queue)
7962 - del self._merge_wait_queue[:]
7963 -
7964 - self._schedule_tasks_imp()
7965 - self._status_display.display()
7966 -
7967 - state_change = 0
7968 - for q in self._task_queues.values():
7969 - if q.schedule():
7970 - state_change += 1
7971 -
7972 - # Cancel prefetchers if they're the only reason
7973 - # the main poll loop is still running.
7974 - if self._failed_pkgs and not self._build_opts.fetchonly and \
7975 - not (self._jobs or self._task_queues.merge) and \
7976 - self._task_queues.fetch:
7977 - self._task_queues.fetch.clear()
7978 - state_change += 1
7979 -
7980 - if state_change:
7981 - self._schedule_tasks_imp()
7982 - self._status_display.display()
7983 -
7984 - return self._keep_scheduling()
7985 -
7986 - def _job_delay(self):
7987 - """
7988 - @rtype: bool
7989 - @returns: True if job scheduling should be delayed, False otherwise.
7990 - """
7991 -
7992 - if self._jobs and self._max_load is not None:
7993 -
7994 - current_time = time.time()
7995 -
7996 - delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
7997 - if delay > self._job_delay_max:
7998 - delay = self._job_delay_max
7999 - if (current_time - self._previous_job_start_time) < delay:
8000 - return True
8001 -
8002 - return False
8003 -
8004 - def _schedule_tasks_imp(self):
8005 - """
8006 - @rtype: bool
8007 - @returns: True if state changed, False otherwise.
8008 - """
8009 -
8010 - state_change = 0
8011 -
8012 - while True:
8013 -
8014 - if not self._keep_scheduling():
8015 - return bool(state_change)
8016 -
8017 - if self._choose_pkg_return_early or \
8018 - self._merge_wait_scheduled or \
8019 - (self._jobs and self._unsatisfied_system_deps) or \
8020 - not self._can_add_job() or \
8021 - self._job_delay():
8022 - return bool(state_change)
8023 -
8024 - pkg = self._choose_pkg()
8025 - if pkg is None:
8026 - return bool(state_change)
8027 -
8028 - state_change += 1
8029 -
8030 - if not pkg.installed:
8031 - self._pkg_count.curval += 1
8032 -
8033 - task = self._task(pkg)
8034 -
8035 - if pkg.installed:
8036 - merge = PackageMerge(merge=task)
8037 - merge.addExitListener(self._merge_exit)
8038 - self._task_queues.merge.add(merge)
8039 -
8040 - elif pkg.built:
8041 - self._jobs += 1
8042 - self._previous_job_start_time = time.time()
8043 - self._status_display.running = self._jobs
8044 - task.addExitListener(self._extract_exit)
8045 - self._task_queues.jobs.add(task)
8046 -
8047 - else:
8048 - self._jobs += 1
8049 - self._previous_job_start_time = time.time()
8050 - self._status_display.running = self._jobs
8051 - task.addExitListener(self._build_exit)
8052 - self._task_queues.jobs.add(task)
8053 -
8054 - return bool(state_change)
8055 -
8056 - def _task(self, pkg):
8057 -
8058 - pkg_to_replace = None
8059 - if pkg.operation != "uninstall":
8060 - vardb = pkg.root_config.trees["vartree"].dbapi
8061 - previous_cpv = vardb.match(pkg.slot_atom)
8062 - if previous_cpv:
8063 - previous_cpv = previous_cpv.pop()
8064 - pkg_to_replace = self._pkg(previous_cpv,
8065 - "installed", pkg.root_config, installed=True)
8066 -
8067 - task = MergeListItem(args_set=self._args_set,
8068 - background=self._background, binpkg_opts=self._binpkg_opts,
8069 - build_opts=self._build_opts,
8070 - config_pool=self._ConfigPool(pkg.root,
8071 - self._allocate_config, self._deallocate_config),
8072 - emerge_opts=self.myopts,
8073 - find_blockers=self._find_blockers(pkg), logger=self._logger,
8074 - mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
8075 - pkg_to_replace=pkg_to_replace,
8076 - prefetcher=self._prefetchers.get(pkg),
8077 - scheduler=self._sched_iface,
8078 - settings=self._allocate_config(pkg.root),
8079 - statusMessage=self._status_msg,
8080 - world_atom=self._world_atom)
8081 -
8082 - return task
8083 -
8084 - def _failed_pkg_msg(self, failed_pkg, action, preposition):
8085 - pkg = failed_pkg.pkg
8086 - msg = "%s to %s %s" % \
8087 - (bad("Failed"), action, colorize("INFORM", pkg.cpv))
8088 - if pkg.root != "/":
8089 - msg += " %s %s" % (preposition, pkg.root)
8090 -
8091 - log_path = self._locate_failure_log(failed_pkg)
8092 - if log_path is not None:
8093 - msg += ", Log file:"
8094 - self._status_msg(msg)
8095 -
8096 - if log_path is not None:
8097 - self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
8098 -
8099 - def _status_msg(self, msg):
8100 - """
8101 - Display a brief status message (no newlines) in the status display.
8102 - This is called by tasks to provide feedback to the user. This
8103 - delegates the resposibility of generating \r and \n control characters,
8104 - to guarantee that lines are created or erased when necessary and
8105 - appropriate.
8106 -
8107 - @type msg: str
8108 - @param msg: a brief status message (no newlines allowed)
8109 - """
8110 - if not self._background:
8111 - writemsg_level("\n")
8112 - self._status_display.displayMessage(msg)
8113 -
8114 - def _save_resume_list(self):
8115 - """
8116 - Do this before verifying the ebuild Manifests since it might
8117 - be possible for the user to use --resume --skipfirst get past
8118 - a non-essential package with a broken digest.
8119 - """
8120 - mtimedb = self._mtimedb
8121 - mtimedb["resume"]["mergelist"] = [list(x) \
8122 - for x in self._mergelist \
8123 - if isinstance(x, Package) and x.operation == "merge"]
8124 -
8125 - mtimedb.commit()
8126 -
8127 - def _calc_resume_list(self):
8128 - """
8129 - Use the current resume list to calculate a new one,
8130 - dropping any packages with unsatisfied deps.
8131 - @rtype: bool
8132 - @returns: True if successful, False otherwise.
8133 - """
8134 - print colorize("GOOD", "*** Resuming merge...")
8135 -
8136 - if self._show_list():
8137 - if "--tree" in self.myopts:
8138 - portage.writemsg_stdout("\n" + \
8139 - darkgreen("These are the packages that " + \
8140 - "would be merged, in reverse order:\n\n"))
8141 -
8142 - else:
8143 - portage.writemsg_stdout("\n" + \
8144 - darkgreen("These are the packages that " + \
8145 - "would be merged, in order:\n\n"))
8146 -
8147 - show_spinner = "--quiet" not in self.myopts and \
8148 - "--nodeps" not in self.myopts
8149 -
8150 - if show_spinner:
8151 - print "Calculating dependencies ",
8152 -
8153 - myparams = create_depgraph_params(self.myopts, None)
8154 - success = False
8155 - e = None
8156 - try:
8157 - success, mydepgraph, dropped_tasks = resume_depgraph(
8158 - self.settings, self.trees, self._mtimedb, self.myopts,
8159 - myparams, self._spinner)
8160 - except depgraph.UnsatisfiedResumeDep, exc:
8161 - # rename variable to avoid python-3.0 error:
8162 - # SyntaxError: can not delete variable 'e' referenced in nested
8163 - # scope
8164 - e = exc
8165 - mydepgraph = e.depgraph
8166 - dropped_tasks = set()
8167 -
8168 - if show_spinner:
8169 - print "\b\b... done!"
8170 -
8171 - if e is not None:
8172 - def unsatisfied_resume_dep_msg():
8173 - mydepgraph.display_problems()
8174 - out = portage.output.EOutput()
8175 - out.eerror("One or more packages are either masked or " + \
8176 - "have missing dependencies:")
8177 - out.eerror("")
8178 - indent = " "
8179 - show_parents = set()
8180 - for dep in e.value:
8181 - if dep.parent in show_parents:
8182 - continue
8183 - show_parents.add(dep.parent)
8184 - if dep.atom is None:
8185 - out.eerror(indent + "Masked package:")
8186 - out.eerror(2 * indent + str(dep.parent))
8187 - out.eerror("")
8188 - else:
8189 - out.eerror(indent + str(dep.atom) + " pulled in by:")
8190 - out.eerror(2 * indent + str(dep.parent))
8191 - out.eerror("")
8192 - msg = "The resume list contains packages " + \
8193 - "that are either masked or have " + \
8194 - "unsatisfied dependencies. " + \
8195 - "Please restart/continue " + \
8196 - "the operation manually, or use --skipfirst " + \
8197 - "to skip the first package in the list and " + \
8198 - "any other packages that may be " + \
8199 - "masked or have missing dependencies."
8200 - for line in textwrap.wrap(msg, 72):
8201 - out.eerror(line)
8202 - self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
8203 - return False
8204 -
8205 - if success and self._show_list():
8206 - mylist = mydepgraph.altlist()
8207 - if mylist:
8208 - if "--tree" in self.myopts:
8209 - mylist.reverse()
8210 - mydepgraph.display(mylist, favorites=self._favorites)
8211 -
8212 - if not success:
8213 - self._post_mod_echo_msgs.append(mydepgraph.display_problems)
8214 - return False
8215 - mydepgraph.display_problems()
8216 -
8217 - mylist = mydepgraph.altlist()
8218 - mydepgraph.break_refs(mylist)
8219 - mydepgraph.break_refs(dropped_tasks)
8220 - self._mergelist = mylist
8221 - self._set_digraph(mydepgraph.schedulerGraph())
8222 -
8223 - msg_width = 75
8224 - for task in dropped_tasks:
8225 - if not (isinstance(task, Package) and task.operation == "merge"):
8226 - continue
8227 - pkg = task
8228 - msg = "emerge --keep-going:" + \
8229 - " %s" % (pkg.cpv,)
8230 - if pkg.root != "/":
8231 - msg += " for %s" % (pkg.root,)
8232 - msg += " dropped due to unsatisfied dependency."
8233 - for line in textwrap.wrap(msg, msg_width):
8234 - eerror(line, phase="other", key=pkg.cpv)
8235 - settings = self.pkgsettings[pkg.root]
8236 - # Ensure that log collection from $T is disabled inside
8237 - # elog_process(), since any logs that might exist are
8238 - # not valid here.
8239 - settings.pop("T", None)
8240 - portage.elog.elog_process(pkg.cpv, settings)
8241 - self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
8242 -
8243 - return True
8244 -
8245 - def _show_list(self):
8246 - myopts = self.myopts
8247 - if "--quiet" not in myopts and \
8248 - ("--ask" in myopts or "--tree" in myopts or \
8249 - "--verbose" in myopts):
8250 - return True
8251 - return False
8252 -
8253 - def _world_atom(self, pkg):
8254 - """
8255 - Add the package to the world file, but only if
8256 - it's supposed to be added. Otherwise, do nothing.
8257 - """
8258 -
8259 - if set(("--buildpkgonly", "--fetchonly",
8260 - "--fetch-all-uri",
8261 - "--oneshot", "--onlydeps",
8262 - "--pretend")).intersection(self.myopts):
8263 - return
8264 -
8265 - if pkg.root != self.target_root:
8266 - return
8267 -
8268 - args_set = self._args_set
8269 - if not args_set.findAtomForPackage(pkg):
8270 - return
8271 -
8272 - logger = self._logger
8273 - pkg_count = self._pkg_count
8274 - root_config = pkg.root_config
8275 - world_set = root_config.sets["world"]
8276 - world_locked = False
8277 - if hasattr(world_set, "lock"):
8278 - world_set.lock()
8279 - world_locked = True
8280 -
8281 - try:
8282 - if hasattr(world_set, "load"):
8283 - world_set.load() # maybe it's changed on disk
8284 -
8285 - atom = create_world_atom(pkg, args_set, root_config)
8286 - if atom:
8287 - if hasattr(world_set, "add"):
8288 - self._status_msg(('Recording %s in "world" ' + \
8289 - 'favorites file...') % atom)
8290 - logger.log(" === (%s of %s) Updating world file (%s)" % \
8291 - (pkg_count.curval, pkg_count.maxval, pkg.cpv))
8292 - world_set.add(atom)
8293 - else:
8294 - writemsg_level('\n!!! Unable to record %s in "world"\n' % \
8295 - (atom,), level=logging.WARN, noiselevel=-1)
8296 - finally:
8297 - if world_locked:
8298 - world_set.unlock()
8299 -
8300 - def _pkg(self, cpv, type_name, root_config, installed=False):
8301 - """
8302 - Get a package instance from the cache, or create a new
8303 - one if necessary. Raises KeyError from aux_get if it
8304 - failures for some reason (package does not exist or is
8305 - corrupt).
8306 - """
8307 - operation = "merge"
8308 - if installed:
8309 - operation = "nomerge"
8310 -
8311 - if self._digraph is not None:
8312 - # Reuse existing instance when available.
8313 - pkg = self._digraph.get(
8314 - (type_name, root_config.root, cpv, operation))
8315 - if pkg is not None:
8316 - return pkg
8317 -
8318 - tree_type = depgraph.pkg_tree_map[type_name]
8319 - db = root_config.trees[tree_type].dbapi
8320 - db_keys = list(self.trees[root_config.root][
8321 - tree_type].dbapi._aux_cache_keys)
8322 - metadata = izip(db_keys, db.aux_get(cpv, db_keys))
8323 - pkg = Package(cpv=cpv, metadata=metadata,
8324 - root_config=root_config, installed=installed)
8325 - if type_name == "ebuild":
8326 - settings = self.pkgsettings[root_config.root]
8327 - settings.setcpv(pkg)
8328 - pkg.metadata["USE"] = settings["PORTAGE_USE"]
8329 - pkg.metadata['CHOST'] = settings.get('CHOST', '')
8330 -
8331 - return pkg
8332 -
8333 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
8334
8335 if os.path.exists(EPREFIX + "/usr/bin/install-info"):
8336 @@ -6983,23 +408,6 @@
8337 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
8338
8339
8340 -def _flush_elog_mod_echo():
8341 - """
8342 - Dump the mod_echo output now so that our other
8343 - notifications are shown last.
8344 - @rtype: bool
8345 - @returns: True if messages were shown, False otherwise.
8346 - """
8347 - messages_shown = False
8348 - try:
8349 - from portage.elog import mod_echo
8350 - except ImportError:
8351 - pass # happens during downgrade to a version without the module
8352 - else:
8353 - messages_shown = bool(mod_echo._items)
8354 - mod_echo.finalize()
8355 - return messages_shown
8356 -
8357 def post_emerge(root_config, myopts, mtimedb, retval):
8358 """
8359 Misc. things to run at the end of a merge session.
8360 @@ -7159,34 +567,6 @@
8361 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
8362 return manager.getUnreadItems( repo_id, update=update )
8363
8364 -def insert_category_into_atom(atom, category):
8365 - alphanum = re.search(r'\w', atom)
8366 - if alphanum:
8367 - ret = atom[:alphanum.start()] + "%s/" % category + \
8368 - atom[alphanum.start():]
8369 - else:
8370 - ret = None
8371 - return ret
8372 -
8373 -def is_valid_package_atom(x):
8374 - if "/" not in x:
8375 - alphanum = re.search(r'\w', x)
8376 - if alphanum:
8377 - x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
8378 - return portage.isvalidatom(x)
8379 -
8380 -def show_blocker_docs_link():
8381 - print
8382 - print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
8383 - print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
8384 - print
8385 - print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
8386 - print
8387 -
8388 -def show_mask_docs():
8389 - print "For more information, see the MASKED PACKAGES section in the emerge"
8390 - print "man page or refer to the Gentoo Handbook."
8391 -
8392 def action_sync(settings, trees, mtimedb, myopts, myaction):
8393 xterm_titles = "notitles" not in settings.features
8394 emergelog(xterm_titles, " === sync")
8395 @@ -9182,80 +2562,6 @@
8396 else:
8397 print "Number removed: "+str(len(cleanlist))
8398
8399 -def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
8400 - """
8401 - Construct a depgraph for the given resume list. This will raise
8402 - PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
8403 - @rtype: tuple
8404 - @returns: (success, depgraph, dropped_tasks)
8405 - """
8406 - skip_masked = True
8407 - skip_unsatisfied = True
8408 - mergelist = mtimedb["resume"]["mergelist"]
8409 - dropped_tasks = set()
8410 - while True:
8411 - mydepgraph = depgraph(settings, trees,
8412 - myopts, myparams, spinner)
8413 - try:
8414 - success = mydepgraph.loadResumeCommand(mtimedb["resume"],
8415 - skip_masked=skip_masked)
8416 - except depgraph.UnsatisfiedResumeDep, e:
8417 - if not skip_unsatisfied:
8418 - raise
8419 -
8420 - graph = mydepgraph.digraph
8421 - unsatisfied_parents = dict((dep.parent, dep.parent) \
8422 - for dep in e.value)
8423 - traversed_nodes = set()
8424 - unsatisfied_stack = list(unsatisfied_parents)
8425 - while unsatisfied_stack:
8426 - pkg = unsatisfied_stack.pop()
8427 - if pkg in traversed_nodes:
8428 - continue
8429 - traversed_nodes.add(pkg)
8430 -
8431 - # If this package was pulled in by a parent
8432 - # package scheduled for merge, removing this
8433 - # package may cause the the parent package's
8434 - # dependency to become unsatisfied.
8435 - for parent_node in graph.parent_nodes(pkg):
8436 - if not isinstance(parent_node, Package) \
8437 - or parent_node.operation not in ("merge", "nomerge"):
8438 - continue
8439 - unsatisfied = \
8440 - graph.child_nodes(parent_node,
8441 - ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
8442 - if pkg in unsatisfied:
8443 - unsatisfied_parents[parent_node] = parent_node
8444 - unsatisfied_stack.append(parent_node)
8445 -
8446 - pruned_mergelist = []
8447 - for x in mergelist:
8448 - if isinstance(x, list) and \
8449 - tuple(x) not in unsatisfied_parents:
8450 - pruned_mergelist.append(x)
8451 -
8452 - # If the mergelist doesn't shrink then this loop is infinite.
8453 - if len(pruned_mergelist) == len(mergelist):
8454 - # This happens if a package can't be dropped because
8455 - # it's already installed, but it has unsatisfied PDEPEND.
8456 - raise
8457 - mergelist[:] = pruned_mergelist
8458 -
8459 - # Exclude installed packages that have been removed from the graph due
8460 - # to failure to build/install runtime dependencies after the dependent
8461 - # package has already been installed.
8462 - dropped_tasks.update(pkg for pkg in \
8463 - unsatisfied_parents if pkg.operation != "nomerge")
8464 - mydepgraph.break_refs(unsatisfied_parents)
8465 -
8466 - del e, graph, traversed_nodes, \
8467 - unsatisfied_parents, unsatisfied_stack
8468 - continue
8469 - else:
8470 - break
8471 - return (success, mydepgraph, dropped_tasks)
8472 -
8473 def action_build(settings, trees, mtimedb,
8474 myopts, myaction, myfiles, spinner):
8475
8476 @@ -9921,16 +3227,6 @@
8477 settings = trees[myroot]["vartree"].settings
8478 settings.validate()
8479
8480 -def clear_caches(trees):
8481 - for d in trees.itervalues():
8482 - d["porttree"].dbapi.melt()
8483 - d["porttree"].dbapi._aux_cache.clear()
8484 - d["bintree"].dbapi._aux_cache.clear()
8485 - d["bintree"].dbapi._clear_cache()
8486 - d["vartree"].dbapi.linkmap._clear_cache()
8487 - portage.dircache.clear()
8488 - gc.collect()
8489 -
8490 def load_emerge_config(trees=None):
8491 kwargs = {}
8492 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
8493 @@ -10329,28 +3625,6 @@
8494 msg += " for '%s'" % root
8495 writemsg_level(msg, level=logging.WARN, noiselevel=-1)
8496
8497 -def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
8498 -
8499 - if "--quiet" in myopts:
8500 - print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
8501 - print "!!! one of the following fully-qualified ebuild names instead:\n"
8502 - for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
8503 - print " " + colorize("INFORM", cp)
8504 - return
8505 -
8506 - s = search(root_config, spinner, "--searchdesc" in myopts,
8507 - "--quiet" not in myopts, "--usepkg" in myopts,
8508 - "--usepkgonly" in myopts)
8509 - null_cp = portage.dep_getkey(insert_category_into_atom(
8510 - arg, "null"))
8511 - cat, atom_pn = portage.catsplit(null_cp)
8512 - s.searchkey = atom_pn
8513 - for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
8514 - s.addCP(cp)
8515 - s.output()
8516 - print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
8517 - print "!!! one of the above fully-qualified ebuild names instead.\n"
8518 -
8519 def profile_check(trees, myaction, myopts):
8520 if myaction in ("info", "sync"):
8521 return os.EX_OK
8522
8523 Copied: main/branches/prefix/pym/_emerge/_find_deep_system_runtime_deps.py (from rev 13672, main/trunk/pym/_emerge/_find_deep_system_runtime_deps.py)
8524 ===================================================================
8525 --- main/branches/prefix/pym/_emerge/_find_deep_system_runtime_deps.py (rev 0)
8526 +++ main/branches/prefix/pym/_emerge/_find_deep_system_runtime_deps.py 2009-06-27 14:07:14 UTC (rev 13710)
8527 @@ -0,0 +1,35 @@
8528 +from _emerge.DepPriority import DepPriority
8529 +from _emerge.Package import Package
8530 +
8531 +def _find_deep_system_runtime_deps(graph):
8532 + deep_system_deps = set()
8533 + node_stack = []
8534 + for node in graph:
8535 + if not isinstance(node, Package) or \
8536 + node.operation == 'uninstall':
8537 + continue
8538 + if node.root_config.sets['system'].findAtomForPackage(node):
8539 + node_stack.append(node)
8540 +
8541 + def ignore_priority(priority):
8542 + """
8543 + Ignore non-runtime priorities.
8544 + """
8545 + if isinstance(priority, DepPriority) and \
8546 + (priority.runtime or priority.runtime_post):
8547 + return False
8548 + return True
8549 +
8550 + while node_stack:
8551 + node = node_stack.pop()
8552 + if node in deep_system_deps:
8553 + continue
8554 + deep_system_deps.add(node)
8555 + for child in graph.child_nodes(node, ignore_priority=ignore_priority):
8556 + if not isinstance(child, Package) or \
8557 + child.operation == 'uninstall':
8558 + continue
8559 + node_stack.append(child)
8560 +
8561 + return deep_system_deps
8562 +
8563
8564 Copied: main/branches/prefix/pym/_emerge/_flush_elog_mod_echo.py (from rev 13672, main/trunk/pym/_emerge/_flush_elog_mod_echo.py)
8565 ===================================================================
8566 --- main/branches/prefix/pym/_emerge/_flush_elog_mod_echo.py (rev 0)
8567 +++ main/branches/prefix/pym/_emerge/_flush_elog_mod_echo.py 2009-06-27 14:07:14 UTC (rev 13710)
8568 @@ -0,0 +1,17 @@
8569 +def _flush_elog_mod_echo():
8570 + """
8571 + Dump the mod_echo output now so that our other
8572 + notifications are shown last.
8573 + @rtype: bool
8574 + @returns: True if messages were shown, False otherwise.
8575 + """
8576 + messages_shown = False
8577 + try:
8578 + from portage.elog import mod_echo
8579 + except ImportError:
8580 + pass # happens during downgrade to a version without the module
8581 + else:
8582 + messages_shown = bool(mod_echo._items)
8583 + mod_echo.finalize()
8584 + return messages_shown
8585 +
8586
8587 Copied: main/branches/prefix/pym/_emerge/clear_caches.py (from rev 13672, main/trunk/pym/_emerge/clear_caches.py)
8588 ===================================================================
8589 --- main/branches/prefix/pym/_emerge/clear_caches.py (rev 0)
8590 +++ main/branches/prefix/pym/_emerge/clear_caches.py 2009-06-27 14:07:14 UTC (rev 13710)
8591 @@ -0,0 +1,20 @@
8592 +import gc
8593 +
8594 +# for an explanation on this logic, see pym/_emerge/__init__.py
8595 +import os
8596 +import sys
8597 +if os.environ.__contains__("PORTAGE_PYTHONPATH"):
8598 + sys.path.insert(0, os.environ["PORTAGE_PYTHONPATH"])
8599 +else:
8600 + sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "pym"))
8601 +import portage
8602 +
8603 +def clear_caches(trees):
8604 + for d in trees.itervalues():
8605 + d["porttree"].dbapi.melt()
8606 + d["porttree"].dbapi._aux_cache.clear()
8607 + d["bintree"].dbapi._aux_cache.clear()
8608 + d["bintree"].dbapi._clear_cache()
8609 + d["vartree"].dbapi.linkmap._clear_cache()
8610 + portage.dircache.clear()
8611 + gc.collect()
8612
8613 Copied: main/branches/prefix/pym/_emerge/create_depgraph_params.py (from rev 13672, main/trunk/pym/_emerge/create_depgraph_params.py)
8614 ===================================================================
8615 --- main/branches/prefix/pym/_emerge/create_depgraph_params.py (rev 0)
8616 +++ main/branches/prefix/pym/_emerge/create_depgraph_params.py 2009-06-27 14:07:14 UTC (rev 13710)
8617 @@ -0,0 +1,33 @@
8618 +def create_depgraph_params(myopts, myaction):
8619 + #configure emerge engine parameters
8620 + #
8621 + # self: include _this_ package regardless of if it is merged.
8622 + # selective: exclude the package if it is merged
8623 + # recurse: go into the dependencies
8624 + # deep: go into the dependencies of already merged packages
8625 + # empty: pretend nothing is merged
8626 + # complete: completely account for all known dependencies
8627 + # remove: build graph for use in removing packages
8628 + myparams = set(["recurse"])
8629 +
8630 + if myaction == "remove":
8631 + myparams.add("remove")
8632 + myparams.add("complete")
8633 + return myparams
8634 +
8635 + if "--update" in myopts or \
8636 + "--newuse" in myopts or \
8637 + "--reinstall" in myopts or \
8638 + "--noreplace" in myopts:
8639 + myparams.add("selective")
8640 + if "--emptytree" in myopts:
8641 + myparams.add("empty")
8642 + myparams.discard("selective")
8643 + if "--nodeps" in myopts:
8644 + myparams.discard("recurse")
8645 + if "--deep" in myopts:
8646 + myparams.add("deep")
8647 + if "--complete-graph" in myopts:
8648 + myparams.add("complete")
8649 + return myparams
8650 +
8651
8652 Copied: main/branches/prefix/pym/_emerge/create_world_atom.py (from rev 13672, main/trunk/pym/_emerge/create_world_atom.py)
8653 ===================================================================
8654 --- main/branches/prefix/pym/_emerge/create_world_atom.py (rev 0)
8655 +++ main/branches/prefix/pym/_emerge/create_world_atom.py 2009-06-27 14:07:14 UTC (rev 13710)
8656 @@ -0,0 +1,92 @@
8657 +# for an explanation on this logic, see pym/_emerge/__init__.py
8658 +import os
8659 +import sys
8660 +if os.environ.__contains__("PORTAGE_PYTHONPATH"):
8661 + sys.path.insert(0, os.environ["PORTAGE_PYTHONPATH"])
8662 +else:
8663 + sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "pym"))
8664 +import portage
8665 +
8666 +def create_world_atom(pkg, args_set, root_config):
8667 + """Create a new atom for the world file if one does not exist. If the
8668 + argument atom is precise enough to identify a specific slot then a slot
8669 + atom will be returned. Atoms that are in the system set may also be stored
8670 + in world since system atoms can only match one slot while world atoms can
8671 + be greedy with respect to slots. Unslotted system packages will not be
8672 + stored in world."""
8673 +
8674 + arg_atom = args_set.findAtomForPackage(pkg)
8675 + if not arg_atom:
8676 + return None
8677 + cp = portage.dep_getkey(arg_atom)
8678 + new_world_atom = cp
8679 + sets = root_config.sets
8680 + portdb = root_config.trees["porttree"].dbapi
8681 + vardb = root_config.trees["vartree"].dbapi
8682 + available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
8683 + for cpv in portdb.match(cp))
8684 + slotted = len(available_slots) > 1 or \
8685 + (len(available_slots) == 1 and "0" not in available_slots)
8686 + if not slotted:
8687 + # check the vdb in case this is multislot
8688 + available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
8689 + for cpv in vardb.match(cp))
8690 + slotted = len(available_slots) > 1 or \
8691 + (len(available_slots) == 1 and "0" not in available_slots)
8692 + if slotted and arg_atom != cp:
8693 + # If the user gave a specific atom, store it as a
8694 + # slot atom in the world file.
8695 + slot_atom = pkg.slot_atom
8696 +
8697 + # For USE=multislot, there are a couple of cases to
8698 + # handle here:
8699 + #
8700 + # 1) SLOT="0", but the real SLOT spontaneously changed to some
8701 + # unknown value, so just record an unslotted atom.
8702 + #
8703 + # 2) SLOT comes from an installed package and there is no
8704 + # matching SLOT in the portage tree.
8705 + #
8706 + # Make sure that the slot atom is available in either the
8707 + # portdb or the vardb, since otherwise the user certainly
8708 + # doesn't want the SLOT atom recorded in the world file
8709 + # (case 1 above). If it's only available in the vardb,
8710 + # the user may be trying to prevent a USE=multislot
8711 + # package from being removed by --depclean (case 2 above).
8712 +
8713 + mydb = portdb
8714 + if not portdb.match(slot_atom):
8715 + # SLOT seems to come from an installed multislot package
8716 + mydb = vardb
8717 + # If there is no installed package matching the SLOT atom,
8718 + # it probably changed SLOT spontaneously due to USE=multislot,
8719 + # so just record an unslotted atom.
8720 + if vardb.match(slot_atom):
8721 + # Now verify that the argument is precise
8722 + # enough to identify a specific slot.
8723 + matches = mydb.match(arg_atom)
8724 + matched_slots = set()
8725 + for cpv in matches:
8726 + matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
8727 + if len(matched_slots) == 1:
8728 + new_world_atom = slot_atom
8729 +
8730 + if new_world_atom == sets["world"].findAtomForPackage(pkg):
8731 + # Both atoms would be identical, so there's nothing to add.
8732 + return None
8733 + if not slotted:
8734 + # Unlike world atoms, system atoms are not greedy for slots, so they
8735 + # can't be safely excluded from world if they are slotted.
8736 + system_atom = sets["system"].findAtomForPackage(pkg)
8737 + if system_atom:
8738 + if not portage.dep_getkey(system_atom).startswith("virtual/"):
8739 + return None
8740 + # System virtuals aren't safe to exclude from world since they can
8741 + # match multiple old-style virtuals but only one of them will be
8742 + # pulled in by update or depclean.
8743 + providers = portdb.mysettings.getvirtuals().get(
8744 + portage.dep_getkey(system_atom))
8745 + if providers and len(providers) == 1 and providers[0] == cp:
8746 + return None
8747 + return new_world_atom
8748 +
8749
8750 Copied: main/branches/prefix/pym/_emerge/depgraph.py (from rev 13672, main/trunk/pym/_emerge/depgraph.py)
8751 ===================================================================
8752 --- main/branches/prefix/pym/_emerge/depgraph.py (rev 0)
8753 +++ main/branches/prefix/pym/_emerge/depgraph.py 2009-06-27 14:07:14 UTC (rev 13710)
8754 @@ -0,0 +1,4981 @@
8755 +import gc
8756 +import os
8757 +import re
8758 +import sys
8759 +import textwrap
8760 +from itertools import chain, izip
8761 +
8762 +# for an explanation on this logic, see pym/_emerge/__init__.py
8763 +import os
8764 +import sys
8765 +if os.environ.__contains__("PORTAGE_PYTHONPATH"):
8766 + sys.path.insert(0, os.environ["PORTAGE_PYTHONPATH"])
8767 +else:
8768 + sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "pym"))
8769 +import portage
8770 +
8771 +from portage import digraph
8772 +from portage.output import bold, blue, colorize, create_color_func, darkblue, \
8773 + darkgreen, green, nc_len, red, teal, turquoise, yellow
8774 +bad = create_color_func("BAD")
8775 +from portage.sets import SETPREFIX
8776 +from portage.sets.base import InternalPackageSet
8777 +from portage.util import cmp_sort_key, writemsg
8778 +
8779 +from _emerge.AtomArg import AtomArg
8780 +from _emerge.Blocker import Blocker
8781 +from _emerge.BlockerCache import BlockerCache
8782 +from _emerge.BlockerDepPriority import BlockerDepPriority
8783 +from _emerge.countdown import countdown
8784 +from _emerge.create_world_atom import create_world_atom
8785 +from _emerge.Dependency import Dependency
8786 +from _emerge.DependencyArg import DependencyArg
8787 +from _emerge.DepPriority import DepPriority
8788 +from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
8789 +from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
8790 +from _emerge.FakeVartree import FakeVartree
8791 +from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
8792 +from _emerge.format_size import format_size
8793 +from _emerge.is_valid_package_atom import is_valid_package_atom
8794 +from _emerge.Package import Package
8795 +from _emerge.PackageArg import PackageArg
8796 +from _emerge.PackageCounters import PackageCounters
8797 +from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
8798 +from _emerge.RepoDisplay import RepoDisplay
8799 +from _emerge.RootConfig import RootConfig
8800 +from _emerge.search import search
8801 +from _emerge.SetArg import SetArg
8802 +from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
8803 +from _emerge.UnmergeDepPriority import UnmergeDepPriority
8804 +from _emerge.visible import visible
8805 +
8806 +import portage.proxy.lazyimport
8807 +import portage.proxy as proxy
8808 +proxy.lazyimport.lazyimport(globals(),
8809 + '_emerge.Scheduler:Scheduler',
8810 +)
8811 +#from _emerge.Scheduler import Scheduler
8812 +class depgraph(object):
8813 +
8814 + pkg_tree_map = RootConfig.pkg_tree_map
8815 +
8816 + _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
8817 +
8818 + def __init__(self, settings, trees, myopts, myparams, spinner):
8819 + self.settings = settings
8820 + self.target_root = settings["ROOT"]
8821 + self.myopts = myopts
8822 + self.myparams = myparams
8823 + self.edebug = 0
8824 + if settings.get("PORTAGE_DEBUG", "") == "1":
8825 + self.edebug = 1
8826 + self.spinner = spinner
8827 + self._running_root = trees["/"]["root_config"]
8828 + self._opts_no_restart = Scheduler._opts_no_restart
8829 + self.pkgsettings = {}
8830 + # Maps slot atom to package for each Package added to the graph.
8831 + self._slot_pkg_map = {}
8832 + # Maps nodes to the reasons they were selected for reinstallation.
8833 + self._reinstall_nodes = {}
8834 + self.mydbapi = {}
8835 + self.trees = {}
8836 + self._trees_orig = trees
8837 + self.roots = {}
8838 + # Contains a filtered view of preferred packages that are selected
8839 + # from available repositories.
8840 + self._filtered_trees = {}
8841 + # Contains installed packages and new packages that have been added
8842 + # to the graph.
8843 + self._graph_trees = {}
8844 + # All Package instances
8845 + self._pkg_cache = {}
8846 + for myroot in trees:
8847 + self.trees[myroot] = {}
8848 + # Create a RootConfig instance that references
8849 + # the FakeVartree instead of the real one.
8850 + self.roots[myroot] = RootConfig(
8851 + trees[myroot]["vartree"].settings,
8852 + self.trees[myroot],
8853 + trees[myroot]["root_config"].setconfig)
8854 + for tree in ("porttree", "bintree"):
8855 + self.trees[myroot][tree] = trees[myroot][tree]
8856 + self.trees[myroot]["vartree"] = \
8857 + FakeVartree(trees[myroot]["root_config"],
8858 + pkg_cache=self._pkg_cache)
8859 + self.pkgsettings[myroot] = portage.config(
8860 + clone=self.trees[myroot]["vartree"].settings)
8861 + self._slot_pkg_map[myroot] = {}
8862 + vardb = self.trees[myroot]["vartree"].dbapi
8863 + preload_installed_pkgs = "--nodeps" not in self.myopts and \
8864 + "--buildpkgonly" not in self.myopts
8865 + # This fakedbapi instance will model the state that the vdb will
8866 + # have after new packages have been installed.
8867 + fakedb = PackageVirtualDbapi(vardb.settings)
8868 + if preload_installed_pkgs:
8869 + for pkg in vardb:
8870 + self.spinner.update()
8871 + # This triggers metadata updates via FakeVartree.
8872 + vardb.aux_get(pkg.cpv, [])
8873 + fakedb.cpv_inject(pkg)
8874 +
8875 + # Now that the vardb state is cached in our FakeVartree,
8876 + # we won't be needing the real vartree cache for awhile.
8877 + # To make some room on the heap, clear the vardbapi
8878 + # caches.
8879 + trees[myroot]["vartree"].dbapi._clear_cache()
8880 + gc.collect()
8881 +
8882 + self.mydbapi[myroot] = fakedb
8883 + def graph_tree():
8884 + pass
8885 + graph_tree.dbapi = fakedb
8886 + self._graph_trees[myroot] = {}
8887 + self._filtered_trees[myroot] = {}
8888 + # Substitute the graph tree for the vartree in dep_check() since we
8889 + # want atom selections to be consistent with package selections
8890 + # have already been made.
8891 + self._graph_trees[myroot]["porttree"] = graph_tree
8892 + self._graph_trees[myroot]["vartree"] = graph_tree
8893 + def filtered_tree():
8894 + pass
8895 + filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
8896 + self._filtered_trees[myroot]["porttree"] = filtered_tree
8897 +
8898 + # Passing in graph_tree as the vartree here could lead to better
8899 + # atom selections in some cases by causing atoms for packages that
8900 + # have been added to the graph to be preferred over other choices.
8901 + # However, it can trigger atom selections that result in
8902 + # unresolvable direct circular dependencies. For example, this
8903 + # happens with gwydion-dylan which depends on either itself or
8904 + # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
8905 + # gwydion-dylan-bin needs to be selected in order to avoid a
8906 + # an unresolvable direct circular dependency.
8907 + #
8908 + # To solve the problem described above, pass in "graph_db" so that
8909 + # packages that have been added to the graph are distinguishable
8910 + # from other available packages and installed packages. Also, pass
8911 + # the parent package into self._select_atoms() calls so that
8912 + # unresolvable direct circular dependencies can be detected and
8913 + # avoided when possible.
8914 + self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
8915 + self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
8916 +
8917 + dbs = []
8918 + portdb = self.trees[myroot]["porttree"].dbapi
8919 + bindb = self.trees[myroot]["bintree"].dbapi
8920 + vardb = self.trees[myroot]["vartree"].dbapi
8921 + # (db, pkg_type, built, installed, db_keys)
8922 + if "--usepkgonly" not in self.myopts:
8923 + db_keys = list(portdb._aux_cache_keys)
8924 + dbs.append((portdb, "ebuild", False, False, db_keys))
8925 + if "--usepkg" in self.myopts:
8926 + db_keys = list(bindb._aux_cache_keys)
8927 + dbs.append((bindb, "binary", True, False, db_keys))
8928 + db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
8929 + dbs.append((vardb, "installed", True, True, db_keys))
8930 + self._filtered_trees[myroot]["dbs"] = dbs
8931 + if "--usepkg" in self.myopts:
8932 + self.trees[myroot]["bintree"].populate(
8933 + "--getbinpkg" in self.myopts,
8934 + "--getbinpkgonly" in self.myopts)
8935 + del trees
8936 +
8937 + self.digraph=portage.digraph()
8938 + # contains all sets added to the graph
8939 + self._sets = {}
8940 + # contains atoms given as arguments
8941 + self._sets["args"] = InternalPackageSet()
8942 + # contains all atoms from all sets added to the graph, including
8943 + # atoms given as arguments
8944 + self._set_atoms = InternalPackageSet()
8945 + self._atom_arg_map = {}
8946 + # contains all nodes pulled in by self._set_atoms
8947 + self._set_nodes = set()
8948 + # Contains only Blocker -> Uninstall edges
8949 + self._blocker_uninstalls = digraph()
8950 + # Contains only Package -> Blocker edges
8951 + self._blocker_parents = digraph()
8952 + # Contains only irrelevant Package -> Blocker edges
8953 + self._irrelevant_blockers = digraph()
8954 + # Contains only unsolvable Package -> Blocker edges
8955 + self._unsolvable_blockers = digraph()
8956 + # Contains all Blocker -> Blocked Package edges
8957 + self._blocked_pkgs = digraph()
8958 + # Contains world packages that have been protected from
8959 + # uninstallation but may not have been added to the graph
8960 + # if the graph is not complete yet.
8961 + self._blocked_world_pkgs = {}
8962 + self._slot_collision_info = {}
8963 + # Slot collision nodes are not allowed to block other packages since
8964 + # blocker validation is only able to account for one package per slot.
8965 + self._slot_collision_nodes = set()
8966 + self._parent_atoms = {}
8967 + self._slot_conflict_parent_atoms = set()
8968 + self._serialized_tasks_cache = None
8969 + self._scheduler_graph = None
8970 + self._displayed_list = None
8971 + self._pprovided_args = []
8972 + self._missing_args = []
8973 + self._masked_installed = set()
8974 + self._unsatisfied_deps_for_display = []
8975 + self._unsatisfied_blockers_for_display = None
8976 + self._circular_deps_for_display = None
8977 + self._dep_stack = []
8978 + self._dep_disjunctive_stack = []
8979 + self._unsatisfied_deps = []
8980 + self._initially_unsatisfied_deps = []
8981 + self._ignored_deps = []
8982 + self._required_set_names = set(["system", "world"])
8983 + self._select_atoms = self._select_atoms_highest_available
8984 + self._select_package = self._select_pkg_highest_available
8985 + self._highest_pkg_cache = {}
8986 +
8987 + def _show_slot_collision_notice(self):
8988 + """Show an informational message advising the user to mask one of the
8989 + the packages. In some cases it may be possible to resolve this
8990 + automatically, but support for backtracking (removal nodes that have
8991 + already been selected) will be required in order to handle all possible
8992 + cases.
8993 + """
8994 +
8995 + if not self._slot_collision_info:
8996 + return
8997 +
8998 + self._show_merge_list()
8999 +
9000 + msg = []
9001 + msg.append("\n!!! Multiple package instances within a single " + \
9002 + "package slot have been pulled\n")
9003 + msg.append("!!! into the dependency graph, resulting" + \
9004 + " in a slot conflict:\n\n")
9005 + indent = " "
9006 + # Max number of parents shown, to avoid flooding the display.
9007 + max_parents = 3
9008 + explanation_columns = 70
9009 + explanations = 0
9010 + for (slot_atom, root), slot_nodes \
9011 + in self._slot_collision_info.iteritems():
9012 + msg.append(str(slot_atom))
9013 + msg.append("\n\n")
9014 +
9015 + for node in slot_nodes:
9016 + msg.append(indent)
9017 + msg.append(str(node))
9018 + parent_atoms = self._parent_atoms.get(node)
9019 + if parent_atoms:
9020 + pruned_list = set()
9021 + # Prefer conflict atoms over others.
9022 + for parent_atom in parent_atoms:
9023 + if len(pruned_list) >= max_parents:
9024 + break
9025 + if parent_atom in self._slot_conflict_parent_atoms:
9026 + pruned_list.add(parent_atom)
9027 +
9028 + # If this package was pulled in by conflict atoms then
9029 + # show those alone since those are the most interesting.
9030 + if not pruned_list:
9031 + # When generating the pruned list, prefer instances
9032 + # of DependencyArg over instances of Package.
9033 + for parent_atom in parent_atoms:
9034 + if len(pruned_list) >= max_parents:
9035 + break
9036 + parent, atom = parent_atom
9037 + if isinstance(parent, DependencyArg):
9038 + pruned_list.add(parent_atom)
9039 + # Prefer Packages instances that themselves have been
9040 + # pulled into collision slots.
9041 + for parent_atom in parent_atoms:
9042 + if len(pruned_list) >= max_parents:
9043 + break
9044 + parent, atom = parent_atom
9045 + if isinstance(parent, Package) and \
9046 + (parent.slot_atom, parent.root) \
9047 + in self._slot_collision_info:
9048 + pruned_list.add(parent_atom)
9049 + for parent_atom in parent_atoms:
9050 + if len(pruned_list) >= max_parents:
9051 + break
9052 + pruned_list.add(parent_atom)
9053 + omitted_parents = len(parent_atoms) - len(pruned_list)
9054 + parent_atoms = pruned_list
9055 + msg.append(" pulled in by\n")
9056 + for parent_atom in parent_atoms:
9057 + parent, atom = parent_atom
9058 + msg.append(2*indent)
9059 + if isinstance(parent,
9060 + (PackageArg, AtomArg)):
9061 + # For PackageArg and AtomArg types, it's
9062 + # redundant to display the atom attribute.
9063 + msg.append(str(parent))
9064 + else:
9065 + # Display the specific atom from SetArg or
9066 + # Package types.
9067 + msg.append("%s required by %s" % (atom, parent))
9068 + msg.append("\n")
9069 + if omitted_parents:
9070 + msg.append(2*indent)
9071 + msg.append("(and %d more)\n" % omitted_parents)
9072 + else:
9073 + msg.append(" (no parents)\n")
9074 + msg.append("\n")
9075 + explanation = self._slot_conflict_explanation(slot_nodes)
9076 + if explanation:
9077 + explanations += 1
9078 + msg.append(indent + "Explanation:\n\n")
9079 + for line in textwrap.wrap(explanation, explanation_columns):
9080 + msg.append(2*indent + line + "\n")
9081 + msg.append("\n")
9082 + msg.append("\n")
9083 + sys.stderr.write("".join(msg))
9084 + sys.stderr.flush()
9085 +
9086 + explanations_for_all = explanations == len(self._slot_collision_info)
9087 +
9088 + if explanations_for_all or "--quiet" in self.myopts:
9089 + return
9090 +
9091 + msg = []
9092 + msg.append("It may be possible to solve this problem ")
9093 + msg.append("by using package.mask to prevent one of ")
9094 + msg.append("those packages from being selected. ")
9095 + msg.append("However, it is also possible that conflicting ")
9096 + msg.append("dependencies exist such that they are impossible to ")
9097 + msg.append("satisfy simultaneously. If such a conflict exists in ")
9098 + msg.append("the dependencies of two different packages, then those ")
9099 + msg.append("packages can not be installed simultaneously.")
9100 +
9101 + from formatter import AbstractFormatter, DumbWriter
9102 + f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
9103 + for x in msg:
9104 + f.add_flowing_data(x)
9105 + f.end_paragraph(1)
9106 +
9107 + msg = []
9108 + msg.append("For more information, see MASKED PACKAGES ")
9109 + msg.append("section in the emerge man page or refer ")
9110 + msg.append("to the Gentoo Handbook.")
9111 + for x in msg:
9112 + f.add_flowing_data(x)
9113 + f.end_paragraph(1)
9114 + f.writer.flush()
9115 +
9116 + def _slot_conflict_explanation(self, slot_nodes):
9117 + """
9118 + When a slot conflict occurs due to USE deps, there are a few
9119 + different cases to consider:
9120 +
9121 + 1) New USE are correctly set but --newuse wasn't requested so an
9122 + installed package with incorrect USE happened to get pulled
9123 + into graph before the new one.
9124 +
9125 + 2) New USE are incorrectly set but an installed package has correct
9126 + USE so it got pulled into the graph, and a new instance also got
9127 + pulled in due to --newuse or an upgrade.
9128 +
9129 + 3) Multiple USE deps exist that can't be satisfied simultaneously,
9130 + and multiple package instances got pulled into the same slot to
9131 + satisfy the conflicting deps.
9132 +
9133 + Currently, explanations and suggested courses of action are generated
9134 + for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
9135 + """
9136 +
9137 + if len(slot_nodes) != 2:
9138 + # Suggestions are only implemented for
9139 + # conflicts between two packages.
9140 + return None
9141 +
9142 + all_conflict_atoms = self._slot_conflict_parent_atoms
9143 + matched_node = None
9144 + matched_atoms = None
9145 + unmatched_node = None
9146 + for node in slot_nodes:
9147 + parent_atoms = self._parent_atoms.get(node)
9148 + if not parent_atoms:
9149 + # Normally, there are always parent atoms. If there are
9150 + # none then something unexpected is happening and there's
9151 + # currently no suggestion for this case.
9152 + return None
9153 + conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
9154 + for parent_atom in conflict_atoms:
9155 + parent, atom = parent_atom
9156 + if not atom.use:
9157 + # Suggestions are currently only implemented for cases
9158 + # in which all conflict atoms have USE deps.
9159 + return None
9160 + if conflict_atoms:
9161 + if matched_node is not None:
9162 + # If conflict atoms match multiple nodes
9163 + # then there's no suggestion.
9164 + return None
9165 + matched_node = node
9166 + matched_atoms = conflict_atoms
9167 + else:
9168 + if unmatched_node is not None:
9169 + # Neither node is matched by conflict atoms, and
9170 + # there is no suggestion for this case.
9171 + return None
9172 + unmatched_node = node
9173 +
9174 + if matched_node is None or unmatched_node is None:
9175 + # This shouldn't happen.
9176 + return None
9177 +
9178 + if unmatched_node.installed and not matched_node.installed and \
9179 + unmatched_node.cpv == matched_node.cpv:
9180 + # If the conflicting packages are the same version then
9181 + # --newuse should be all that's needed. If they are different
9182 + # versions then there's some other problem.
9183 + return "New USE are correctly set, but --newuse wasn't" + \
9184 + " requested, so an installed package with incorrect USE " + \
9185 + "happened to get pulled into the dependency graph. " + \
9186 + "In order to solve " + \
9187 + "this, either specify the --newuse option or explicitly " + \
9188 + " reinstall '%s'." % matched_node.slot_atom
9189 +
9190 + if matched_node.installed and not unmatched_node.installed:
9191 + atoms = sorted(set(atom for parent, atom in matched_atoms))
9192 + explanation = ("New USE for '%s' are incorrectly set. " + \
9193 + "In order to solve this, adjust USE to satisfy '%s'") % \
9194 + (matched_node.slot_atom, atoms[0])
9195 + if len(atoms) > 1:
9196 + for atom in atoms[1:-1]:
9197 + explanation += ", '%s'" % (atom,)
9198 + if len(atoms) > 2:
9199 + explanation += ","
9200 + explanation += " and '%s'" % (atoms[-1],)
9201 + explanation += "."
9202 + return explanation
9203 +
9204 + return None
9205 +
9206 + def _process_slot_conflicts(self):
9207 + """
9208 + Process slot conflict data to identify specific atoms which
9209 + lead to conflict. These atoms only match a subset of the
9210 + packages that have been pulled into a given slot.
9211 + """
9212 + for (slot_atom, root), slot_nodes \
9213 + in self._slot_collision_info.iteritems():
9214 +
9215 + all_parent_atoms = set()
9216 + for pkg in slot_nodes:
9217 + parent_atoms = self._parent_atoms.get(pkg)
9218 + if not parent_atoms:
9219 + continue
9220 + all_parent_atoms.update(parent_atoms)
9221 +
9222 + for pkg in slot_nodes:
9223 + parent_atoms = self._parent_atoms.get(pkg)
9224 + if parent_atoms is None:
9225 + parent_atoms = set()
9226 + self._parent_atoms[pkg] = parent_atoms
9227 + for parent_atom in all_parent_atoms:
9228 + if parent_atom in parent_atoms:
9229 + continue
9230 + # Use package set for matching since it will match via
9231 + # PROVIDE when necessary, while match_from_list does not.
9232 + parent, atom = parent_atom
9233 + atom_set = InternalPackageSet(
9234 + initial_atoms=(atom,))
9235 + if atom_set.findAtomForPackage(pkg):
9236 + parent_atoms.add(parent_atom)
9237 + else:
9238 + self._slot_conflict_parent_atoms.add(parent_atom)
9239 +
9240 + def _reinstall_for_flags(self, forced_flags,
9241 + orig_use, orig_iuse, cur_use, cur_iuse):
9242 + """Return a set of flags that trigger reinstallation, or None if there
9243 + are no such flags."""
9244 + if "--newuse" in self.myopts:
9245 + flags = set(orig_iuse.symmetric_difference(
9246 + cur_iuse).difference(forced_flags))
9247 + flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
9248 + cur_iuse.intersection(cur_use)))
9249 + if flags:
9250 + return flags
9251 + elif "changed-use" == self.myopts.get("--reinstall"):
9252 + flags = orig_iuse.intersection(orig_use).symmetric_difference(
9253 + cur_iuse.intersection(cur_use))
9254 + if flags:
9255 + return flags
9256 + return None
9257 +
9258 + def _create_graph(self, allow_unsatisfied=False):
9259 + dep_stack = self._dep_stack
9260 + dep_disjunctive_stack = self._dep_disjunctive_stack
9261 + while dep_stack or dep_disjunctive_stack:
9262 + self.spinner.update()
9263 + while dep_stack:
9264 + dep = dep_stack.pop()
9265 + if isinstance(dep, Package):
9266 + if not self._add_pkg_deps(dep,
9267 + allow_unsatisfied=allow_unsatisfied):
9268 + return 0
9269 + continue
9270 + if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
9271 + return 0
9272 + if dep_disjunctive_stack:
9273 + if not self._pop_disjunction(allow_unsatisfied):
9274 + return 0
9275 + return 1
9276 +
9277 + def _add_dep(self, dep, allow_unsatisfied=False):
9278 + debug = "--debug" in self.myopts
9279 + buildpkgonly = "--buildpkgonly" in self.myopts
9280 + nodeps = "--nodeps" in self.myopts
9281 + empty = "empty" in self.myparams
9282 + deep = "deep" in self.myparams
9283 + update = "--update" in self.myopts and dep.depth <= 1
9284 + if dep.blocker:
9285 + if not buildpkgonly and \
9286 + not nodeps and \
9287 + dep.parent not in self._slot_collision_nodes:
9288 + if dep.parent.onlydeps:
9289 + # It's safe to ignore blockers if the
9290 + # parent is an --onlydeps node.
9291 + return 1
9292 + # The blocker applies to the root where
9293 + # the parent is or will be installed.
9294 + blocker = Blocker(atom=dep.atom,
9295 + eapi=dep.parent.metadata["EAPI"],
9296 + root=dep.parent.root)
9297 + self._blocker_parents.add(blocker, dep.parent)
9298 + return 1
9299 + dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
9300 + onlydeps=dep.onlydeps)
9301 + if not dep_pkg:
9302 + if dep.priority.optional:
9303 + # This could be an unecessary build-time dep
9304 + # pulled in by --with-bdeps=y.
9305 + return 1
9306 + if allow_unsatisfied:
9307 + self._unsatisfied_deps.append(dep)
9308 + return 1
9309 + self._unsatisfied_deps_for_display.append(
9310 + ((dep.root, dep.atom), {"myparent":dep.parent}))
9311 + return 0
9312 + # In some cases, dep_check will return deps that shouldn't
9313 + # be proccessed any further, so they are identified and
9314 + # discarded here. Try to discard as few as possible since
9315 + # discarded dependencies reduce the amount of information
9316 + # available for optimization of merge order.
9317 + if dep.priority.satisfied and \
9318 + not dep_pkg.installed and \
9319 + not (existing_node or empty or deep or update):
9320 + myarg = None
9321 + if dep.root == self.target_root:
9322 + try:
9323 + myarg = self._iter_atoms_for_pkg(dep_pkg).next()
9324 + except StopIteration:
9325 + pass
9326 + except portage.exception.InvalidDependString:
9327 + if not dep_pkg.installed:
9328 + # This shouldn't happen since the package
9329 + # should have been masked.
9330 + raise
9331 + if not myarg:
9332 + self._ignored_deps.append(dep)
9333 + return 1
9334 +
9335 + if not self._add_pkg(dep_pkg, dep):
9336 + return 0
9337 + return 1
9338 +
9339 + def _add_pkg(self, pkg, dep):
9340 + myparent = None
9341 + priority = None
9342 + depth = 0
9343 + if dep is None:
9344 + dep = Dependency()
9345 + else:
9346 + myparent = dep.parent
9347 + priority = dep.priority
9348 + depth = dep.depth
9349 + if priority is None:
9350 + priority = DepPriority()
9351 + """
9352 + Fills the digraph with nodes comprised of packages to merge.
9353 + mybigkey is the package spec of the package to merge.
9354 + myparent is the package depending on mybigkey ( or None )
9355 + addme = Should we add this package to the digraph or are we just looking at it's deps?
9356 + Think --onlydeps, we need to ignore packages in that case.
9357 + #stuff to add:
9358 + #SLOT-aware emerge
9359 + #IUSE-aware emerge -> USE DEP aware depgraph
9360 + #"no downgrade" emerge
9361 + """
9362 + # Ensure that the dependencies of the same package
9363 + # are never processed more than once.
9364 + previously_added = pkg in self.digraph
9365 +
9366 + # select the correct /var database that we'll be checking against
9367 + vardbapi = self.trees[pkg.root]["vartree"].dbapi
9368 + pkgsettings = self.pkgsettings[pkg.root]
9369 +
9370 + arg_atoms = None
9371 + if True:
9372 + try:
9373 + arg_atoms = list(self._iter_atoms_for_pkg(pkg))
9374 + except portage.exception.InvalidDependString, e:
9375 + if not pkg.installed:
9376 + show_invalid_depstring_notice(
9377 + pkg, pkg.metadata["PROVIDE"], str(e))
9378 + return 0
9379 + del e
9380 +
9381 + if not pkg.onlydeps:
9382 + if not pkg.installed and \
9383 + "empty" not in self.myparams and \
9384 + vardbapi.match(pkg.slot_atom):
9385 + # Increase the priority of dependencies on packages that
9386 + # are being rebuilt. This optimizes merge order so that
9387 + # dependencies are rebuilt/updated as soon as possible,
9388 + # which is needed especially when emerge is called by
9389 + # revdep-rebuild since dependencies may be affected by ABI
9390 + # breakage that has rendered them useless. Don't adjust
9391 + # priority here when in "empty" mode since all packages
9392 + # are being merged in that case.
9393 + priority.rebuild = True
9394 +
9395 + existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
9396 + slot_collision = False
9397 + if existing_node:
9398 + existing_node_matches = pkg.cpv == existing_node.cpv
9399 + if existing_node_matches and \
9400 + pkg != existing_node and \
9401 + dep.atom is not None:
9402 + # Use package set for matching since it will match via
9403 + # PROVIDE when necessary, while match_from_list does not.
9404 + atom_set = InternalPackageSet(initial_atoms=[dep.atom])
9405 + if not atom_set.findAtomForPackage(existing_node):
9406 + existing_node_matches = False
9407 + if existing_node_matches:
9408 + # The existing node can be reused.
9409 + if arg_atoms:
9410 + for parent_atom in arg_atoms:
9411 + parent, atom = parent_atom
9412 + self.digraph.add(existing_node, parent,
9413 + priority=priority)
9414 + self._add_parent_atom(existing_node, parent_atom)
9415 + # If a direct circular dependency is not an unsatisfied
9416 + # buildtime dependency then drop it here since otherwise
9417 + # it can skew the merge order calculation in an unwanted
9418 + # way.
9419 + if existing_node != myparent or \
9420 + (priority.buildtime and not priority.satisfied):
9421 + self.digraph.addnode(existing_node, myparent,
9422 + priority=priority)
9423 + if dep.atom is not None and dep.parent is not None:
9424 + self._add_parent_atom(existing_node,
9425 + (dep.parent, dep.atom))
9426 + return 1
9427 + else:
9428 +
9429 + # A slot collision has occurred. Sometimes this coincides
9430 + # with unresolvable blockers, so the slot collision will be
9431 + # shown later if there are no unresolvable blockers.
9432 + self._add_slot_conflict(pkg)
9433 + slot_collision = True
9434 +
9435 + if slot_collision:
9436 + # Now add this node to the graph so that self.display()
9437 + # can show use flags and --tree portage.output. This node is
9438 + # only being partially added to the graph. It must not be
9439 + # allowed to interfere with the other nodes that have been
9440 + # added. Do not overwrite data for existing nodes in
9441 + # self.mydbapi since that data will be used for blocker
9442 + # validation.
9443 + # Even though the graph is now invalid, continue to process
9444 + # dependencies so that things like --fetchonly can still
9445 + # function despite collisions.
9446 + pass
9447 + elif not previously_added:
9448 + self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
9449 + self.mydbapi[pkg.root].cpv_inject(pkg)
9450 + self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
9451 +
9452 + if not pkg.installed:
9453 + # Allow this package to satisfy old-style virtuals in case it
9454 + # doesn't already. Any pre-existing providers will be preferred
9455 + # over this one.
9456 + try:
9457 + pkgsettings.setinst(pkg.cpv, pkg.metadata)
9458 + # For consistency, also update the global virtuals.
9459 + settings = self.roots[pkg.root].settings
9460 + settings.unlock()
9461 + settings.setinst(pkg.cpv, pkg.metadata)
9462 + settings.lock()
9463 + except portage.exception.InvalidDependString, e:
9464 + show_invalid_depstring_notice(
9465 + pkg, pkg.metadata["PROVIDE"], str(e))
9466 + del e
9467 + return 0
9468 +
9469 + if arg_atoms:
9470 + self._set_nodes.add(pkg)
9471 +
9472 + # Do this even when addme is False (--onlydeps) so that the
9473 + # parent/child relationship is always known in case
9474 + # self._show_slot_collision_notice() needs to be called later.
9475 + self.digraph.add(pkg, myparent, priority=priority)
9476 + if dep.atom is not None and dep.parent is not None:
9477 + self._add_parent_atom(pkg, (dep.parent, dep.atom))
9478 +
9479 + if arg_atoms:
9480 + for parent_atom in arg_atoms:
9481 + parent, atom = parent_atom
9482 + self.digraph.add(pkg, parent, priority=priority)
9483 + self._add_parent_atom(pkg, parent_atom)
9484 +
9485 + """ This section determines whether we go deeper into dependencies or not.
9486 + We want to go deeper on a few occasions:
9487 + Installing package A, we need to make sure package A's deps are met.
9488 + emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
9489 + If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
9490 + """
9491 + dep_stack = self._dep_stack
9492 + if "recurse" not in self.myparams:
9493 + return 1
9494 + elif pkg.installed and \
9495 + "deep" not in self.myparams:
9496 + dep_stack = self._ignored_deps
9497 +
9498 + self.spinner.update()
9499 +
9500 + if arg_atoms:
9501 + depth = 0
9502 + pkg.depth = depth
9503 + if not previously_added:
9504 + dep_stack.append(pkg)
9505 + return 1
9506 +
9507 + def _add_parent_atom(self, pkg, parent_atom):
9508 + parent_atoms = self._parent_atoms.get(pkg)
9509 + if parent_atoms is None:
9510 + parent_atoms = set()
9511 + self._parent_atoms[pkg] = parent_atoms
9512 + parent_atoms.add(parent_atom)
9513 +
9514 + def _add_slot_conflict(self, pkg):
9515 + self._slot_collision_nodes.add(pkg)
9516 + slot_key = (pkg.slot_atom, pkg.root)
9517 + slot_nodes = self._slot_collision_info.get(slot_key)
9518 + if slot_nodes is None:
9519 + slot_nodes = set()
9520 + slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
9521 + self._slot_collision_info[slot_key] = slot_nodes
9522 + slot_nodes.add(pkg)
9523 +
9524 + def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
9525 +
9526 + mytype = pkg.type_name
9527 + myroot = pkg.root
9528 + mykey = pkg.cpv
9529 + metadata = pkg.metadata
9530 + myuse = pkg.use.enabled
9531 + jbigkey = pkg
9532 + depth = pkg.depth + 1
9533 + removal_action = "remove" in self.myparams
9534 +
9535 + edepend={}
9536 + depkeys = ["DEPEND","RDEPEND","PDEPEND"]
9537 + for k in depkeys:
9538 + edepend[k] = metadata[k]
9539 +
9540 + if not pkg.built and \
9541 + "--buildpkgonly" in self.myopts and \
9542 + "deep" not in self.myparams and \
9543 + "empty" not in self.myparams:
9544 + edepend["RDEPEND"] = ""
9545 + edepend["PDEPEND"] = ""
9546 + bdeps_optional = False
9547 +
9548 + if pkg.built and not removal_action:
9549 + if self.myopts.get("--with-bdeps", "n") == "y":
9550 + # Pull in build time deps as requested, but marked them as
9551 + # "optional" since they are not strictly required. This allows
9552 + # more freedom in the merge order calculation for solving
9553 + # circular dependencies. Don't convert to PDEPEND since that
9554 + # could make --with-bdeps=y less effective if it is used to
9555 + # adjust merge order to prevent built_with_use() calls from
9556 + # failing.
9557 + bdeps_optional = True
9558 + else:
9559 + # built packages do not have build time dependencies.
9560 + edepend["DEPEND"] = ""
9561 +
9562 + if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
9563 + edepend["DEPEND"] = ""
9564 +
9565 + bdeps_root = "/"
9566 + root_deps = self.myopts.get("--root-deps")
9567 + if root_deps is not None:
9568 + if root_deps is True:
9569 + bdeps_root = myroot
9570 + elif root_deps == "rdeps":
9571 + edepend["DEPEND"] = ""
9572 +
9573 + deps = (
9574 + (bdeps_root, edepend["DEPEND"],
9575 + self._priority(buildtime=(not bdeps_optional),
9576 + optional=bdeps_optional)),
9577 + (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
9578 + (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
9579 + )
9580 +
9581 + debug = "--debug" in self.myopts
9582 + strict = mytype != "installed"
9583 + try:
9584 + if not strict:
9585 + portage.dep._dep_check_strict = False
9586 +
9587 + for dep_root, dep_string, dep_priority in deps:
9588 + if not dep_string:
9589 + continue
9590 + if debug:
9591 + print
9592 + print "Parent: ", jbigkey
9593 + print "Depstring:", dep_string
9594 + print "Priority:", dep_priority
9595 +
9596 + try:
9597 +
9598 + dep_string = portage.dep.paren_normalize(
9599 + portage.dep.use_reduce(
9600 + portage.dep.paren_reduce(dep_string),
9601 + uselist=pkg.use.enabled))
9602 +
9603 + dep_string = list(self._queue_disjunctive_deps(
9604 + pkg, dep_root, dep_priority, dep_string))
9605 +
9606 + except portage.exception.InvalidDependString, e:
9607 + if pkg.installed:
9608 + del e
9609 + continue
9610 + show_invalid_depstring_notice(pkg, dep_string, str(e))
9611 + return 0
9612 +
9613 + if not dep_string:
9614 + continue
9615 +
9616 + dep_string = portage.dep.paren_enclose(dep_string)
9617 +
9618 + if not self._add_pkg_dep_string(
9619 + pkg, dep_root, dep_priority, dep_string,
9620 + allow_unsatisfied):
9621 + return 0
9622 +
9623 + except portage.exception.AmbiguousPackageName, e:
9624 + pkgs = e.args[0]
9625 + portage.writemsg("\n\n!!! An atom in the dependencies " + \
9626 + "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
9627 + for cpv in pkgs:
9628 + portage.writemsg(" %s\n" % cpv, noiselevel=-1)
9629 + portage.writemsg("\n", noiselevel=-1)
9630 + if mytype == "binary":
9631 + portage.writemsg(
9632 + "!!! This binary package cannot be installed: '%s'\n" % \
9633 + mykey, noiselevel=-1)
9634 + elif mytype == "ebuild":
9635 + portdb = self.roots[myroot].trees["porttree"].dbapi
9636 + myebuild, mylocation = portdb.findname2(mykey)
9637 + portage.writemsg("!!! This ebuild cannot be installed: " + \
9638 + "'%s'\n" % myebuild, noiselevel=-1)
9639 + portage.writemsg("!!! Please notify the package maintainer " + \
9640 + "that atoms must be fully-qualified.\n", noiselevel=-1)
9641 + return 0
9642 + finally:
9643 + portage.dep._dep_check_strict = True
9644 + return 1
9645 +
9646 + def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
9647 + allow_unsatisfied):
9648 + depth = pkg.depth + 1
9649 + debug = "--debug" in self.myopts
9650 + strict = pkg.type_name != "installed"
9651 +
9652 + if debug:
9653 + print
9654 + print "Parent: ", pkg
9655 + print "Depstring:", dep_string
9656 + print "Priority:", dep_priority
9657 +
9658 + try:
9659 + selected_atoms = self._select_atoms(dep_root,
9660 + dep_string, myuse=pkg.use.enabled, parent=pkg,
9661 + strict=strict, priority=dep_priority)
9662 + except portage.exception.InvalidDependString, e:
9663 + show_invalid_depstring_notice(pkg, dep_string, str(e))
9664 + del e
9665 + if pkg.installed:
9666 + return 1
9667 + return 0
9668 +
9669 + if debug:
9670 + print "Candidates:", selected_atoms
9671 +
9672 + vardb = self.roots[dep_root].trees["vartree"].dbapi
9673 +
9674 + for atom in selected_atoms:
9675 + try:
9676 +
9677 + atom = portage.dep.Atom(atom)
9678 +
9679 + mypriority = dep_priority.copy()
9680 + if not atom.blocker and vardb.match(atom):
9681 + mypriority.satisfied = True
9682 +
9683 + if not self._add_dep(Dependency(atom=atom,
9684 + blocker=atom.blocker, depth=depth, parent=pkg,
9685 + priority=mypriority, root=dep_root),
9686 + allow_unsatisfied=allow_unsatisfied):
9687 + return 0
9688 +
9689 + except portage.exception.InvalidAtom, e:
9690 + show_invalid_depstring_notice(
9691 + pkg, dep_string, str(e))
9692 + del e
9693 + if not pkg.installed:
9694 + return 0
9695 +
9696 + if debug:
9697 + print "Exiting...", pkg
9698 +
9699 + return 1
9700 +
9701 + def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
9702 + """
9703 + Queue disjunctive (virtual and ||) deps in self._dep_disjunctive_stack.
9704 + Yields non-disjunctive deps. Raises InvalidDependString when
9705 + necessary.
9706 + """
9707 + i = 0
9708 + while i < len(dep_struct):
9709 + x = dep_struct[i]
9710 + if isinstance(x, list):
9711 + for y in self._queue_disjunctive_deps(
9712 + pkg, dep_root, dep_priority, x):
9713 + yield y
9714 + elif x == "||":
9715 + self._queue_disjunction(pkg, dep_root, dep_priority,
9716 + [ x, dep_struct[ i + 1 ] ] )
9717 + i += 1
9718 + else:
9719 + try:
9720 + x = portage.dep.Atom(x)
9721 + except portage.exception.InvalidAtom:
9722 + if not pkg.installed:
9723 + raise portage.exception.InvalidDependString(
9724 + "invalid atom: '%s'" % x)
9725 + else:
9726 + # Note: Eventually this will check for PROPERTIES=virtual
9727 + # or whatever other metadata gets implemented for this
9728 + # purpose.
9729 + if x.cp.startswith('virtual/'):
9730 + self._queue_disjunction( pkg, dep_root,
9731 + dep_priority, [ str(x) ] )
9732 + else:
9733 + yield str(x)
9734 + i += 1
9735 +
9736 + def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
9737 + self._dep_disjunctive_stack.append(
9738 + (pkg, dep_root, dep_priority, dep_struct))
9739 +
9740 + def _pop_disjunction(self, allow_unsatisfied):
9741 + """
9742 + Pop one disjunctive dep from self._dep_disjunctive_stack, and use it to
9743 + populate self._dep_stack.
9744 + """
9745 + pkg, dep_root, dep_priority, dep_struct = \
9746 + self._dep_disjunctive_stack.pop()
9747 + dep_string = portage.dep.paren_enclose(dep_struct)
9748 + if not self._add_pkg_dep_string(
9749 + pkg, dep_root, dep_priority, dep_string, allow_unsatisfied):
9750 + return 0
9751 + return 1
9752 +
9753 + def _priority(self, **kwargs):
9754 + if "remove" in self.myparams:
9755 + priority_constructor = UnmergeDepPriority
9756 + else:
9757 + priority_constructor = DepPriority
9758 + return priority_constructor(**kwargs)
9759 +
9760 + def _dep_expand(self, root_config, atom_without_category):
9761 + """
9762 + @param root_config: a root config instance
9763 + @type root_config: RootConfig
9764 + @param atom_without_category: an atom without a category component
9765 + @type atom_without_category: String
9766 + @rtype: list
9767 + @returns: a list of atoms containing categories (possibly empty)
9768 + """
9769 + null_cp = portage.dep_getkey(insert_category_into_atom(
9770 + atom_without_category, "null"))
9771 + cat, atom_pn = portage.catsplit(null_cp)
9772 +
9773 + dbs = self._filtered_trees[root_config.root]["dbs"]
9774 + categories = set()
9775 + for db, pkg_type, built, installed, db_keys in dbs:
9776 + for cat in db.categories:
9777 + if db.cp_list("%s/%s" % (cat, atom_pn)):
9778 + categories.add(cat)
9779 +
9780 + deps = []
9781 + for cat in categories:
9782 + deps.append(insert_category_into_atom(
9783 + atom_without_category, cat))
9784 + return deps
9785 +
9786 + def _have_new_virt(self, root, atom_cp):
9787 + ret = False
9788 + for db, pkg_type, built, installed, db_keys in \
9789 + self._filtered_trees[root]["dbs"]:
9790 + if db.cp_list(atom_cp):
9791 + ret = True
9792 + break
9793 + return ret
9794 +
9795 + def _iter_atoms_for_pkg(self, pkg):
9796 + # TODO: add multiple $ROOT support
9797 + if pkg.root != self.target_root:
9798 + return
9799 + atom_arg_map = self._atom_arg_map
9800 + root_config = self.roots[pkg.root]
9801 + for atom in self._set_atoms.iterAtomsForPackage(pkg):
9802 + atom_cp = portage.dep_getkey(atom)
9803 + if atom_cp != pkg.cp and \
9804 + self._have_new_virt(pkg.root, atom_cp):
9805 + continue
9806 + visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
9807 + visible_pkgs.reverse() # descending order
9808 + higher_slot = None
9809 + for visible_pkg in visible_pkgs:
9810 + if visible_pkg.cp != atom_cp:
9811 + continue
9812 + if pkg >= visible_pkg:
9813 + # This is descending order, and we're not
9814 + # interested in any versions <= pkg given.
9815 + break
9816 + if pkg.slot_atom != visible_pkg.slot_atom:
9817 + higher_slot = visible_pkg
9818 + break
9819 + if higher_slot is not None:
9820 + continue
9821 + for arg in atom_arg_map[(atom, pkg.root)]:
9822 + if isinstance(arg, PackageArg) and \
9823 + arg.package != pkg:
9824 + continue
9825 + yield arg, atom
9826 +
9827 + def select_files(self, myfiles):
9828 + """Given a list of .tbz2s, .ebuilds sets, and deps, create the
9829 + appropriate depgraph and return a favorite list."""
9830 + debug = "--debug" in self.myopts
9831 + root_config = self.roots[self.target_root]
9832 + sets = root_config.sets
9833 + getSetAtoms = root_config.setconfig.getSetAtoms
9834 + myfavorites=[]
9835 + myroot = self.target_root
9836 + dbs = self._filtered_trees[myroot]["dbs"]
9837 + vardb = self.trees[myroot]["vartree"].dbapi
9838 + real_vardb = self._trees_orig[myroot]["vartree"].dbapi
9839 + portdb = self.trees[myroot]["porttree"].dbapi
9840 + bindb = self.trees[myroot]["bintree"].dbapi
9841 + pkgsettings = self.pkgsettings[myroot]
9842 + args = []
9843 + onlydeps = "--onlydeps" in self.myopts
9844 + lookup_owners = []
9845 + for x in myfiles:
9846 + ext = os.path.splitext(x)[1]
9847 + if ext==".tbz2":
9848 + if not os.path.exists(x):
9849 + if os.path.exists(
9850 + os.path.join(pkgsettings["PKGDIR"], "All", x)):
9851 + x = os.path.join(pkgsettings["PKGDIR"], "All", x)
9852 + elif os.path.exists(
9853 + os.path.join(pkgsettings["PKGDIR"], x)):
9854 + x = os.path.join(pkgsettings["PKGDIR"], x)
9855 + else:
9856 + print "\n\n!!! Binary package '"+str(x)+"' does not exist."
9857 + print "!!! Please ensure the tbz2 exists as specified.\n"
9858 + return 0, myfavorites
9859 + mytbz2=portage.xpak.tbz2(x)
9860 + mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
9861 + if os.path.realpath(x) != \
9862 + os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
9863 + print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
9864 + return 0, myfavorites
9865 + db_keys = list(bindb._aux_cache_keys)
9866 + metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
9867 + pkg = Package(type_name="binary", root_config=root_config,
9868 + cpv=mykey, built=True, metadata=metadata,
9869 + onlydeps=onlydeps)
9870 + self._pkg_cache[pkg] = pkg
9871 + args.append(PackageArg(arg=x, package=pkg,
9872 + root_config=root_config))
9873 + elif ext==".ebuild":
9874 + ebuild_path = portage.util.normalize_path(os.path.abspath(x))
9875 + pkgdir = os.path.dirname(ebuild_path)
9876 + tree_root = os.path.dirname(os.path.dirname(pkgdir))
9877 + cp = pkgdir[len(tree_root)+1:]
9878 + e = portage.exception.PackageNotFound(
9879 + ("%s is not in a valid portage tree " + \
9880 + "hierarchy or does not exist") % x)
9881 + if not portage.isvalidatom(cp):
9882 + raise e
9883 + cat = portage.catsplit(cp)[0]
9884 + mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
9885 + if not portage.isvalidatom("="+mykey):
9886 + raise e
9887 + ebuild_path = portdb.findname(mykey)
9888 + if ebuild_path:
9889 + if ebuild_path != os.path.join(os.path.realpath(tree_root),
9890 + cp, os.path.basename(ebuild_path)):
9891 + print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
9892 + return 0, myfavorites
9893 + if mykey not in portdb.xmatch(
9894 + "match-visible", portage.dep_getkey(mykey)):
9895 + print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
9896 + print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
9897 + print colorize("BAD", "*** page for details.")
9898 + countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
9899 + "Continuing...")
9900 + else:
9901 + raise portage.exception.PackageNotFound(
9902 + "%s is not in a valid portage tree hierarchy or does not exist" % x)
9903 + db_keys = list(portdb._aux_cache_keys)
9904 + metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
9905 + pkg = Package(type_name="ebuild", root_config=root_config,
9906 + cpv=mykey, metadata=metadata, onlydeps=onlydeps)
9907 + pkgsettings.setcpv(pkg)
9908 + pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
9909 + pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
9910 + self._pkg_cache[pkg] = pkg
9911 + args.append(PackageArg(arg=x, package=pkg,
9912 + root_config=root_config))
9913 + elif x.startswith(os.path.sep):
9914 + if not x.startswith(myroot):
9915 + portage.writemsg(("\n\n!!! '%s' does not start with" + \
9916 + " $ROOT.\n") % x, noiselevel=-1)
9917 + return 0, []
9918 + # Queue these up since it's most efficient to handle
9919 + # multiple files in a single iter_owners() call.
9920 + lookup_owners.append(x)
9921 + else:
9922 + if x in ("system", "world"):
9923 + x = SETPREFIX + x
9924 + if x.startswith(SETPREFIX):
9925 + s = x[len(SETPREFIX):]
9926 + if s not in sets:
9927 + raise portage.exception.PackageSetNotFound(s)
9928 + if s in self._sets:
9929 + continue
9930 + # Recursively expand sets so that containment tests in
9931 + # self._get_parent_sets() properly match atoms in nested
9932 + # sets (like if world contains system).
9933 + expanded_set = InternalPackageSet(
9934 + initial_atoms=getSetAtoms(s))
9935 + self._sets[s] = expanded_set
9936 + args.append(SetArg(arg=x, set=expanded_set,
9937 + root_config=root_config))
9938 + continue
9939 + if not is_valid_package_atom(x):
9940 + portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
9941 + noiselevel=-1)
9942 + portage.writemsg("!!! Please check ebuild(5) for full details.\n")
9943 + portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
9944 + return (0,[])
9945 + # Don't expand categories or old-style virtuals here unless
9946 + # necessary. Expansion of old-style virtuals here causes at
9947 + # least the following problems:
9948 + # 1) It's more difficult to determine which set(s) an atom
9949 + # came from, if any.
9950 + # 2) It takes away freedom from the resolver to choose other
9951 + # possible expansions when necessary.
9952 + if "/" in x:
9953 + args.append(AtomArg(arg=x, atom=x,
9954 + root_config=root_config))
9955 + continue
9956 + expanded_atoms = self._dep_expand(root_config, x)
9957 + installed_cp_set = set()
9958 + for atom in expanded_atoms:
9959 + atom_cp = portage.dep_getkey(atom)
9960 + if vardb.cp_list(atom_cp):
9961 + installed_cp_set.add(atom_cp)
9962 +
9963 + if len(installed_cp_set) > 1:
9964 + non_virtual_cps = set()
9965 + for atom_cp in installed_cp_set:
9966 + if not atom_cp.startswith("virtual/"):
9967 + non_virtual_cps.add(atom_cp)
9968 + if len(non_virtual_cps) == 1:
9969 + installed_cp_set = non_virtual_cps
9970 +
9971 + if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
9972 + installed_cp = iter(installed_cp_set).next()
9973 + expanded_atoms = [atom for atom in expanded_atoms \
9974 + if portage.dep_getkey(atom) == installed_cp]
9975 +
9976 + if len(expanded_atoms) > 1:
9977 + print
9978 + print
9979 + ambiguous_package_name(x, expanded_atoms, root_config,
9980 + self.spinner, self.myopts)
9981 + return False, myfavorites
9982 + if expanded_atoms:
9983 + atom = expanded_atoms[0]
9984 + else:
9985 + null_atom = insert_category_into_atom(x, "null")
9986 + null_cp = portage.dep_getkey(null_atom)
9987 + cat, atom_pn = portage.catsplit(null_cp)
9988 + virts_p = root_config.settings.get_virts_p().get(atom_pn)
9989 + if virts_p:
9990 + # Allow the depgraph to choose which virtual.
9991 + atom = insert_category_into_atom(x, "virtual")
9992 + else:
9993 + atom = insert_category_into_atom(x, "null")
9994 +
9995 + args.append(AtomArg(arg=x, atom=atom,
9996 + root_config=root_config))
9997 +
9998 + if lookup_owners:
9999 + relative_paths = []
10000 + search_for_multiple = False
10001 + if len(lookup_owners) > 1:
10002 + search_for_multiple = True
10003 +
10004 + for x in lookup_owners:
10005 + if not search_for_multiple and os.path.isdir(x):
10006 + search_for_multiple = True
10007 + relative_paths.append(x[len(myroot):])
10008 +
10009 + owners = set()
10010 + for pkg, relative_path in \
10011 + real_vardb._owners.iter_owners(relative_paths):
10012 + owners.add(pkg.mycpv)
10013 + if not search_for_multiple:
10014 + break
10015 +
10016 + if not owners:
10017 + portage.writemsg(("\n\n!!! '%s' is not claimed " + \
10018 + "by any package.\n") % lookup_owners[0], noiselevel=-1)
10019 + return 0, []
10020 +
10021 + for cpv in owners:
10022 + slot = vardb.aux_get(cpv, ["SLOT"])[0]
10023 + if not slot:
10024 + # portage now masks packages with missing slot, but it's
10025 + # possible that one was installed by an older version
10026 + atom = portage.cpv_getkey(cpv)
10027 + else:
10028 + atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
10029 + args.append(AtomArg(arg=atom, atom=atom,
10030 + root_config=root_config))
10031 +
10032 + if "--update" in self.myopts:
10033 + # In some cases, the greedy slots behavior can pull in a slot that
10034 + # the user would want to uninstall due to it being blocked by a
10035 + # newer version in a different slot. Therefore, it's necessary to
10036 + # detect and discard any that should be uninstalled. Each time
10037 + # that arguments are updated, package selections are repeated in
10038 + # order to ensure consistency with the current arguments:
10039 + #
10040 + # 1) Initialize args
10041 + # 2) Select packages and generate initial greedy atoms
10042 + # 3) Update args with greedy atoms
10043 + # 4) Select packages and generate greedy atoms again, while
10044 + # accounting for any blockers between selected packages
10045 + # 5) Update args with revised greedy atoms
10046 +
10047 + self._set_args(args)
10048 + greedy_args = []
10049 + for arg in args:
10050 + greedy_args.append(arg)
10051 + if not isinstance(arg, AtomArg):
10052 + continue
10053 + for atom in self._greedy_slots(arg.root_config, arg.atom):
10054 + greedy_args.append(
10055 + AtomArg(arg=arg.arg, atom=atom,
10056 + root_config=arg.root_config))
10057 +
10058 + self._set_args(greedy_args)
10059 + del greedy_args
10060 +
10061 + # Revise greedy atoms, accounting for any blockers
10062 + # between selected packages.
10063 + revised_greedy_args = []
10064 + for arg in args:
10065 + revised_greedy_args.append(arg)
10066 + if not isinstance(arg, AtomArg):
10067 + continue
10068 + for atom in self._greedy_slots(arg.root_config, arg.atom,
10069 + blocker_lookahead=True):
10070 + revised_greedy_args.append(
10071 + AtomArg(arg=arg.arg, atom=atom,
10072 + root_config=arg.root_config))
10073 + args = revised_greedy_args
10074 + del revised_greedy_args
10075 +
10076 + self._set_args(args)
10077 +
10078 + myfavorites = set(myfavorites)
10079 + for arg in args:
10080 + if isinstance(arg, (AtomArg, PackageArg)):
10081 + myfavorites.add(arg.atom)
10082 + elif isinstance(arg, SetArg):
10083 + myfavorites.add(arg.arg)
10084 + myfavorites = list(myfavorites)
10085 +
10086 + pprovideddict = pkgsettings.pprovideddict
10087 + if debug:
10088 + portage.writemsg("\n", noiselevel=-1)
10089 + # Order needs to be preserved since a feature of --nodeps
10090 + # is to allow the user to force a specific merge order.
10091 + args.reverse()
10092 + while args:
10093 + arg = args.pop()
10094 + for atom in arg.set:
10095 + self.spinner.update()
10096 + dep = Dependency(atom=atom, onlydeps=onlydeps,
10097 + root=myroot, parent=arg)
10098 + atom_cp = portage.dep_getkey(atom)
10099 + try:
10100 + pprovided = pprovideddict.get(portage.dep_getkey(atom))
10101 + if pprovided and portage.match_from_list(atom, pprovided):
10102 + # A provided package has been specified on the command line.
10103 + self._pprovided_args.append((arg, atom))
10104 + continue
10105 + if isinstance(arg, PackageArg):
10106 + if not self._add_pkg(arg.package, dep) or \
10107 + not self._create_graph():
10108 + sys.stderr.write(("\n\n!!! Problem resolving " + \
10109 + "dependencies for %s\n") % arg.arg)
10110 + return 0, myfavorites
10111 + continue
10112 + if debug:
10113 + portage.writemsg(" Arg: %s\n Atom: %s\n" % \
10114 + (arg, atom), noiselevel=-1)
10115 + pkg, existing_node = self._select_package(
10116 + myroot, atom, onlydeps=onlydeps)
10117 + if not pkg:
10118 + if not (isinstance(arg, SetArg) and \
10119 + arg.name in ("system", "world")):
10120 + self._unsatisfied_deps_for_display.append(
10121 + ((myroot, atom), {}))
10122 + return 0, myfavorites
10123 + self._missing_args.append((arg, atom))
10124 + continue
10125 + if atom_cp != pkg.cp:
10126 + # For old-style virtuals, we need to repeat the
10127 + # package.provided check against the selected package.
10128 + expanded_atom = atom.replace(atom_cp, pkg.cp)
10129 + pprovided = pprovideddict.get(pkg.cp)
10130 + if pprovided and \
10131 + portage.match_from_list(expanded_atom, pprovided):
10132 + # A provided package has been
10133 + # specified on the command line.
10134 + self._pprovided_args.append((arg, atom))
10135 + continue
10136 + if pkg.installed and "selective" not in self.myparams:
10137 + self._unsatisfied_deps_for_display.append(
10138 + ((myroot, atom), {}))
10139 + # Previous behavior was to bail out in this case, but
10140 + # since the dep is satisfied by the installed package,
10141 + # it's more friendly to continue building the graph
10142 + # and just show a warning message. Therefore, only bail
10143 + # out here if the atom is not from either the system or
10144 + # world set.
10145 + if not (isinstance(arg, SetArg) and \
10146 + arg.name in ("system", "world")):
10147 + return 0, myfavorites
10148 +
10149 + # Add the selected package to the graph as soon as possible
10150 + # so that later dep_check() calls can use it as feedback
10151 + # for making more consistent atom selections.
10152 + if not self._add_pkg(pkg, dep):
10153 + if isinstance(arg, SetArg):
10154 + sys.stderr.write(("\n\n!!! Problem resolving " + \
10155 + "dependencies for %s from %s\n") % \
10156 + (atom, arg.arg))
10157 + else:
10158 + sys.stderr.write(("\n\n!!! Problem resolving " + \
10159 + "dependencies for %s\n") % atom)
10160 + return 0, myfavorites
10161 +
10162 + except portage.exception.MissingSignature, e:
10163 + portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
10164 + portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
10165 + portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
10166 + portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
10167 + portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
10168 + return 0, myfavorites
10169 + except portage.exception.InvalidSignature, e:
10170 + portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
10171 + portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
10172 + portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
10173 + portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
10174 + portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
10175 + return 0, myfavorites
10176 + except SystemExit, e:
10177 + raise # Needed else can't exit
10178 + except Exception, e:
10179 + print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
10180 + print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
10181 + raise
10182 +
10183 + # Now that the root packages have been added to the graph,
10184 + # process the dependencies.
10185 + if not self._create_graph():
10186 + return 0, myfavorites
10187 +
10188 + missing=0
10189 + if "--usepkgonly" in self.myopts:
10190 + for xs in self.digraph.all_nodes():
10191 + if not isinstance(xs, Package):
10192 + continue
10193 + if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
10194 + if missing == 0:
10195 + print
10196 + missing += 1
10197 + print "Missing binary for:",xs[2]
10198 +
10199 + try:
10200 + self.altlist()
10201 + except self._unknown_internal_error:
10202 + return False, myfavorites
10203 +
10204 + # We're true here unless we are missing binaries.
10205 + return (not missing,myfavorites)
10206 +
10207 + def _set_args(self, args):
10208 + """
10209 + Create the "args" package set from atoms and packages given as
10210 + arguments. This method can be called multiple times if necessary.
10211 + The package selection cache is automatically invalidated, since
10212 + arguments influence package selections.
10213 + """
10214 + args_set = self._sets["args"]
10215 + args_set.clear()
10216 + for arg in args:
10217 + if not isinstance(arg, (AtomArg, PackageArg)):
10218 + continue
10219 + atom = arg.atom
10220 + if atom in args_set:
10221 + continue
10222 + args_set.add(atom)
10223 +
10224 + self._set_atoms.clear()
10225 + self._set_atoms.update(chain(*self._sets.itervalues()))
10226 + atom_arg_map = self._atom_arg_map
10227 + atom_arg_map.clear()
10228 + for arg in args:
10229 + for atom in arg.set:
10230 + atom_key = (atom, arg.root_config.root)
10231 + refs = atom_arg_map.get(atom_key)
10232 + if refs is None:
10233 + refs = []
10234 + atom_arg_map[atom_key] = refs
10235 + if arg not in refs:
10236 + refs.append(arg)
10237 +
10238 + # Invalidate the package selection cache, since
10239 + # arguments influence package selections.
10240 + self._highest_pkg_cache.clear()
10241 + for trees in self._filtered_trees.itervalues():
10242 + trees["porttree"].dbapi._clear_cache()
10243 +
10244 + def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
10245 + """
10246 + Return a list of slot atoms corresponding to installed slots that
10247 + differ from the slot of the highest visible match. When
10248 + blocker_lookahead is True, slot atoms that would trigger a blocker
10249 + conflict are automatically discarded, potentially allowing automatic
10250 + uninstallation of older slots when appropriate.
10251 + """
10252 + highest_pkg, in_graph = self._select_package(root_config.root, atom)
10253 + if highest_pkg is None:
10254 + return []
10255 + vardb = root_config.trees["vartree"].dbapi
10256 + slots = set()
10257 + for cpv in vardb.match(atom):
10258 + # don't mix new virtuals with old virtuals
10259 + if portage.cpv_getkey(cpv) == highest_pkg.cp:
10260 + slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
10261 +
10262 + slots.add(highest_pkg.metadata["SLOT"])
10263 + if len(slots) == 1:
10264 + return []
10265 + greedy_pkgs = []
10266 + slots.remove(highest_pkg.metadata["SLOT"])
10267 + while slots:
10268 + slot = slots.pop()
10269 + slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
10270 + pkg, in_graph = self._select_package(root_config.root, slot_atom)
10271 + if pkg is not None and \
10272 + pkg.cp == highest_pkg.cp and pkg < highest_pkg:
10273 + greedy_pkgs.append(pkg)
10274 + if not greedy_pkgs:
10275 + return []
10276 + if not blocker_lookahead:
10277 + return [pkg.slot_atom for pkg in greedy_pkgs]
10278 +
10279 + blockers = {}
10280 + blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
10281 + for pkg in greedy_pkgs + [highest_pkg]:
10282 + dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
10283 + try:
10284 + atoms = self._select_atoms(
10285 + pkg.root, dep_str, pkg.use.enabled,
10286 + parent=pkg, strict=True)
10287 + except portage.exception.InvalidDependString:
10288 + continue
10289 + blocker_atoms = (x for x in atoms if x.blocker)
10290 + blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
10291 +
10292 + if highest_pkg not in blockers:
10293 + return []
10294 +
10295 + # filter packages with invalid deps
10296 + greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
10297 +
10298 + # filter packages that conflict with highest_pkg
10299 + greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
10300 + (blockers[highest_pkg].findAtomForPackage(pkg) or \
10301 + blockers[pkg].findAtomForPackage(highest_pkg))]
10302 +
10303 + if not greedy_pkgs:
10304 + return []
10305 +
10306 + # If two packages conflict, discard the lower version.
10307 + discard_pkgs = set()
10308 + greedy_pkgs.sort(reverse=True)
10309 + for i in xrange(len(greedy_pkgs) - 1):
10310 + pkg1 = greedy_pkgs[i]
10311 + if pkg1 in discard_pkgs:
10312 + continue
10313 + for j in xrange(i + 1, len(greedy_pkgs)):
10314 + pkg2 = greedy_pkgs[j]
10315 + if pkg2 in discard_pkgs:
10316 + continue
10317 + if blockers[pkg1].findAtomForPackage(pkg2) or \
10318 + blockers[pkg2].findAtomForPackage(pkg1):
10319 + # pkg1 > pkg2
10320 + discard_pkgs.add(pkg2)
10321 +
10322 + return [pkg.slot_atom for pkg in greedy_pkgs \
10323 + if pkg not in discard_pkgs]
10324 +
10325 + def _select_atoms_from_graph(self, *pargs, **kwargs):
10326 + """
10327 + Prefer atoms matching packages that have already been
10328 + added to the graph or those that are installed and have
10329 + not been scheduled for replacement.
10330 + """
10331 + kwargs["trees"] = self._graph_trees
10332 + return self._select_atoms_highest_available(*pargs, **kwargs)
10333 +
10334 + def _select_atoms_highest_available(self, root, depstring,
10335 + myuse=None, parent=None, strict=True, trees=None, priority=None):
10336 + """This will raise InvalidDependString if necessary. If trees is
10337 + None then self._filtered_trees is used."""
10338 + pkgsettings = self.pkgsettings[root]
10339 + if trees is None:
10340 + trees = self._filtered_trees
10341 + if not getattr(priority, "buildtime", False):
10342 + # The parent should only be passed to dep_check() for buildtime
10343 + # dependencies since that's the only case when it's appropriate
10344 + # to trigger the circular dependency avoidance code which uses it.
10345 + # It's important not to trigger the same circular dependency
10346 + # avoidance code for runtime dependencies since it's not needed
10347 + # and it can promote an incorrect package choice.
10348 + parent = None
10349 + if True:
10350 + try:
10351 + if parent is not None:
10352 + trees[root]["parent"] = parent
10353 + if not strict:
10354 + portage.dep._dep_check_strict = False
10355 + mycheck = portage.dep_check(depstring, None,
10356 + pkgsettings, myuse=myuse,
10357 + myroot=root, trees=trees)
10358 + finally:
10359 + if parent is not None:
10360 + trees[root].pop("parent")
10361 + portage.dep._dep_check_strict = True
10362 + if not mycheck[0]:
10363 + raise portage.exception.InvalidDependString(mycheck[1])
10364 + selected_atoms = mycheck[1]
10365 + return selected_atoms
10366 +
10367 + def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
10368 + atom = portage.dep.Atom(atom)
10369 + atom_set = InternalPackageSet(initial_atoms=(atom,))
10370 + atom_without_use = atom
10371 + if atom.use:
10372 + atom_without_use = portage.dep.remove_slot(atom)
10373 + if atom.slot:
10374 + atom_without_use += ":" + atom.slot
10375 + atom_without_use = portage.dep.Atom(atom_without_use)
10376 + xinfo = '"%s"' % atom
10377 + if arg:
10378 + xinfo='"%s"' % arg
10379 + # Discard null/ from failed cpv_expand category expansion.
10380 + xinfo = xinfo.replace("null/", "")
10381 + masked_packages = []
10382 + missing_use = []
10383 + masked_pkg_instances = set()
10384 + missing_licenses = []
10385 + have_eapi_mask = False
10386 + pkgsettings = self.pkgsettings[root]
10387 + implicit_iuse = pkgsettings._get_implicit_iuse()
10388 + root_config = self.roots[root]
10389 + portdb = self.roots[root].trees["porttree"].dbapi
10390 + dbs = self._filtered_trees[root]["dbs"]
10391 + for db, pkg_type, built, installed, db_keys in dbs:
10392 + if installed:
10393 + continue
10394 + match = db.match
10395 + if hasattr(db, "xmatch"):
10396 + cpv_list = db.xmatch("match-all", atom_without_use)
10397 + else:
10398 + cpv_list = db.match(atom_without_use)
10399 + # descending order
10400 + cpv_list.reverse()
10401 + for cpv in cpv_list:
10402 + metadata, mreasons = get_mask_info(root_config, cpv,
10403 + pkgsettings, db, pkg_type, built, installed, db_keys)
10404 + if metadata is not None:
10405 + pkg = Package(built=built, cpv=cpv,
10406 + installed=installed, metadata=metadata,
10407 + root_config=root_config)
10408 + if pkg.cp != atom.cp:
10409 + # A cpv can be returned from dbapi.match() as an
10410 + # old-style virtual match even in cases when the
10411 + # package does not actually PROVIDE the virtual.
10412 + # Filter out any such false matches here.
10413 + if not atom_set.findAtomForPackage(pkg):
10414 + continue
10415 + if mreasons:
10416 + masked_pkg_instances.add(pkg)
10417 + if atom.use:
10418 + missing_use.append(pkg)
10419 + if not mreasons:
10420 + continue
10421 + masked_packages.append(
10422 + (root_config, pkgsettings, cpv, metadata, mreasons))
10423 +
10424 + missing_use_reasons = []
10425 + missing_iuse_reasons = []
10426 + for pkg in missing_use:
10427 + use = pkg.use.enabled
10428 + iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
10429 + iuse_re = re.compile("^(%s)$" % "|".join(iuse))
10430 + missing_iuse = []
10431 + for x in atom.use.required:
10432 + if iuse_re.match(x) is None:
10433 + missing_iuse.append(x)
10434 + mreasons = []
10435 + if missing_iuse:
10436 + mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
10437 + missing_iuse_reasons.append((pkg, mreasons))
10438 + else:
10439 + need_enable = sorted(atom.use.enabled.difference(use))
10440 + need_disable = sorted(atom.use.disabled.intersection(use))
10441 + if need_enable or need_disable:
10442 + changes = []
10443 + changes.extend(colorize("red", "+" + x) \
10444 + for x in need_enable)
10445 + changes.extend(colorize("blue", "-" + x) \
10446 + for x in need_disable)
10447 + mreasons.append("Change USE: %s" % " ".join(changes))
10448 + missing_use_reasons.append((pkg, mreasons))
10449 +
10450 + unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
10451 + in missing_use_reasons if pkg not in masked_pkg_instances]
10452 +
10453 + unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
10454 + in missing_iuse_reasons if pkg not in masked_pkg_instances]
10455 +
10456 + show_missing_use = False
10457 + if unmasked_use_reasons:
10458 + # Only show the latest version.
10459 + show_missing_use = unmasked_use_reasons[:1]
10460 + elif unmasked_iuse_reasons:
10461 + if missing_use_reasons:
10462 + # All packages with required IUSE are masked,
10463 + # so display a normal masking message.
10464 + pass
10465 + else:
10466 + show_missing_use = unmasked_iuse_reasons
10467 +
10468 + if show_missing_use:
10469 + print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
10470 + print "!!! One of the following packages is required to complete your request:"
10471 + for pkg, mreasons in show_missing_use:
10472 + print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
10473 +
10474 + elif masked_packages:
10475 + print "\n!!! " + \
10476 + colorize("BAD", "All ebuilds that could satisfy ") + \
10477 + colorize("INFORM", xinfo) + \
10478 + colorize("BAD", " have been masked.")
10479 + print "!!! One of the following masked packages is required to complete your request:"
10480 + have_eapi_mask = show_masked_packages(masked_packages)
10481 + if have_eapi_mask:
10482 + print
10483 + msg = ("The current version of portage supports " + \
10484 + "EAPI '%s'. You must upgrade to a newer version" + \
10485 + " of portage before EAPI masked packages can" + \
10486 + " be installed.") % portage.const.EAPI
10487 + from textwrap import wrap
10488 + for line in wrap(msg, 75):
10489 + print line
10490 + print
10491 + show_mask_docs()
10492 + else:
10493 + print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
10494 +
10495 + # Show parent nodes and the argument that pulled them in.
10496 + traversed_nodes = set()
10497 + node = myparent
10498 + msg = []
10499 + while node is not None:
10500 + traversed_nodes.add(node)
10501 + msg.append('(dependency required by "%s" [%s])' % \
10502 + (colorize('INFORM', str(node.cpv)), node.type_name))
10503 + # When traversing to parents, prefer arguments over packages
10504 + # since arguments are root nodes. Never traverse the same
10505 + # package twice, in order to prevent an infinite loop.
10506 + selected_parent = None
10507 + for parent in self.digraph.parent_nodes(node):
10508 + if isinstance(parent, DependencyArg):
10509 + msg.append('(dependency required by "%s" [argument])' % \
10510 + (colorize('INFORM', str(parent))))
10511 + selected_parent = None
10512 + break
10513 + if parent not in traversed_nodes:
10514 + selected_parent = parent
10515 + node = selected_parent
10516 + for line in msg:
10517 + print line
10518 +
10519 + print
10520 +
10521 + def _select_pkg_highest_available(self, root, atom, onlydeps=False):
10522 + cache_key = (root, atom, onlydeps)
10523 + ret = self._highest_pkg_cache.get(cache_key)
10524 + if ret is not None:
10525 + pkg, existing = ret
10526 + if pkg and not existing:
10527 + existing = self._slot_pkg_map[root].get(pkg.slot_atom)
10528 + if existing and existing == pkg:
10529 + # Update the cache to reflect that the
10530 + # package has been added to the graph.
10531 + ret = pkg, pkg
10532 + self._highest_pkg_cache[cache_key] = ret
10533 + return ret
10534 + ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
10535 + self._highest_pkg_cache[cache_key] = ret
10536 + pkg, existing = ret
10537 + if pkg is not None:
10538 + settings = pkg.root_config.settings
10539 + if visible(settings, pkg) and not (pkg.installed and \
10540 + settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
10541 + pkg.root_config.visible_pkgs.cpv_inject(pkg)
10542 + return ret
10543 +
10544 + def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
10545 + root_config = self.roots[root]
10546 + pkgsettings = self.pkgsettings[root]
10547 + dbs = self._filtered_trees[root]["dbs"]
10548 + vardb = self.roots[root].trees["vartree"].dbapi
10549 + portdb = self.roots[root].trees["porttree"].dbapi
10550 + # List of acceptable packages, ordered by type preference.
10551 + matched_packages = []
10552 + highest_version = None
10553 + if not isinstance(atom, portage.dep.Atom):
10554 + atom = portage.dep.Atom(atom)
10555 + atom_cp = atom.cp
10556 + atom_set = InternalPackageSet(initial_atoms=(atom,))
10557 + existing_node = None
10558 + myeb = None
10559 + usepkgonly = "--usepkgonly" in self.myopts
10560 + empty = "empty" in self.myparams
10561 + selective = "selective" in self.myparams
10562 + reinstall = False
10563 + noreplace = "--noreplace" in self.myopts
10564 + # Behavior of the "selective" parameter depends on
10565 + # whether or not a package matches an argument atom.
10566 + # If an installed package provides an old-style
10567 + # virtual that is no longer provided by an available
10568 + # package, the installed package may match an argument
10569 + # atom even though none of the available packages do.
10570 + # Therefore, "selective" logic does not consider
10571 + # whether or not an installed package matches an
10572 + # argument atom. It only considers whether or not
10573 + # available packages match argument atoms, which is
10574 + # represented by the found_available_arg flag.
10575 + found_available_arg = False
10576 + for find_existing_node in True, False:
10577 + if existing_node:
10578 + break
10579 + for db, pkg_type, built, installed, db_keys in dbs:
10580 + if existing_node:
10581 + break
10582 + if installed and not find_existing_node:
10583 + want_reinstall = reinstall or empty or \
10584 + (found_available_arg and not selective)
10585 + if want_reinstall and matched_packages:
10586 + continue
10587 + if hasattr(db, "xmatch"):
10588 + cpv_list = db.xmatch("match-all", atom)
10589 + else:
10590 + cpv_list = db.match(atom)
10591 +
10592 + # USE=multislot can make an installed package appear as if
10593 + # it doesn't satisfy a slot dependency. Rebuilding the ebuild
10594 + # won't do any good as long as USE=multislot is enabled since
10595 + # the newly built package still won't have the expected slot.
10596 + # Therefore, assume that such SLOT dependencies are already
10597 + # satisfied rather than forcing a rebuild.
10598 + if installed and not cpv_list and atom.slot:
10599 + for cpv in db.match(atom.cp):
10600 + slot_available = False
10601 + for other_db, other_type, other_built, \
10602 + other_installed, other_keys in dbs:
10603 + try:
10604 + if atom.slot == \
10605 + other_db.aux_get(cpv, ["SLOT"])[0]:
10606 + slot_available = True
10607 + break
10608 + except KeyError:
10609 + pass
10610 + if not slot_available:
10611 + continue
10612 + inst_pkg = self._pkg(cpv, "installed",
10613 + root_config, installed=installed)
10614 + # Remove the slot from the atom and verify that
10615 + # the package matches the resulting atom.
10616 + atom_without_slot = portage.dep.remove_slot(atom)
10617 + if atom.use:
10618 + atom_without_slot += str(atom.use)
10619 + atom_without_slot = portage.dep.Atom(atom_without_slot)
10620 + if portage.match_from_list(
10621 + atom_without_slot, [inst_pkg]):
10622 + cpv_list = [inst_pkg.cpv]
10623 + break
10624 +
10625 + if not cpv_list:
10626 + continue
10627 + pkg_status = "merge"
10628 + if installed or onlydeps:
10629 + pkg_status = "nomerge"
10630 + # descending order
10631 + cpv_list.reverse()
10632 + for cpv in cpv_list:
10633 + # Make --noreplace take precedence over --newuse.
10634 + if not installed and noreplace and \
10635 + cpv in vardb.match(atom):
10636 + # If the installed version is masked, it may
10637 + # be necessary to look at lower versions,
10638 + # in case there is a visible downgrade.
10639 + continue
10640 + reinstall_for_flags = None
10641 + cache_key = (pkg_type, root, cpv, pkg_status)
10642 + calculated_use = True
10643 + pkg = self._pkg_cache.get(cache_key)
10644 + if pkg is None:
10645 + calculated_use = False
10646 + try:
10647 + metadata = izip(db_keys, db.aux_get(cpv, db_keys))
10648 + except KeyError:
10649 + continue
10650 + pkg = Package(built=built, cpv=cpv,
10651 + installed=installed, metadata=metadata,
10652 + onlydeps=onlydeps, root_config=root_config,
10653 + type_name=pkg_type)
10654 + metadata = pkg.metadata
10655 + if not built:
10656 + metadata['CHOST'] = pkgsettings.get('CHOST', '')
10657 + if not built and ("?" in metadata["LICENSE"] or \
10658 + "?" in metadata["PROVIDE"]):
10659 + # This is avoided whenever possible because
10660 + # it's expensive. It only needs to be done here
10661 + # if it has an effect on visibility.
10662 + pkgsettings.setcpv(pkg)
10663 + metadata["USE"] = pkgsettings["PORTAGE_USE"]
10664 + calculated_use = True
10665 + self._pkg_cache[pkg] = pkg
10666 +
10667 + if not installed or (built and matched_packages):
10668 + # Only enforce visibility on installed packages
10669 + # if there is at least one other visible package
10670 + # available. By filtering installed masked packages
10671 + # here, packages that have been masked since they
10672 + # were installed can be automatically downgraded
10673 + # to an unmasked version.
10674 + try:
10675 + if not visible(pkgsettings, pkg):
10676 + continue
10677 + except portage.exception.InvalidDependString:
10678 + if not installed:
10679 + continue
10680 +
10681 + # Enable upgrade or downgrade to a version
10682 + # with visible KEYWORDS when the installed
10683 + # version is masked by KEYWORDS, but never
10684 + # reinstall the same exact version only due
10685 + # to a KEYWORDS mask.
10686 + if built and matched_packages:
10687 +
10688 + different_version = None
10689 + for avail_pkg in matched_packages:
10690 + if not portage.dep.cpvequal(
10691 + pkg.cpv, avail_pkg.cpv):
10692 + different_version = avail_pkg
10693 + break
10694 + if different_version is not None:
10695 +
10696 + if installed and \
10697 + pkgsettings._getMissingKeywords(
10698 + pkg.cpv, pkg.metadata):
10699 + continue
10700 +
10701 + # If the ebuild no longer exists or it's
10702 + # keywords have been dropped, reject built
10703 + # instances (installed or binary).
10704 + # If --usepkgonly is enabled, assume that
10705 + # the ebuild status should be ignored.
10706 + if not usepkgonly:
10707 + try:
10708 + pkg_eb = self._pkg(
10709 + pkg.cpv, "ebuild", root_config)
10710 + except portage.exception.PackageNotFound:
10711 + continue
10712 + else:
10713 + if not visible(pkgsettings, pkg_eb):
10714 + continue
10715 +
10716 + if not pkg.built and not calculated_use:
10717 + # This is avoided whenever possible because
10718 + # it's expensive.
10719 + pkgsettings.setcpv(pkg)
10720 + pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
10721 +
10722 + if pkg.cp != atom.cp:
10723 + # A cpv can be returned from dbapi.match() as an
10724 + # old-style virtual match even in cases when the
10725 + # package does not actually PROVIDE the virtual.
10726 + # Filter out any such false matches here.
10727 + if not atom_set.findAtomForPackage(pkg):
10728 + continue
10729 +
10730 + myarg = None
10731 + if root == self.target_root:
10732 + try:
10733 + # Ebuild USE must have been calculated prior
10734 + # to this point, in case atoms have USE deps.
10735 + myarg = self._iter_atoms_for_pkg(pkg).next()
10736 + except StopIteration:
10737 + pass
10738 + except portage.exception.InvalidDependString:
10739 + if not installed:
10740 + # masked by corruption
10741 + continue
10742 + if not installed and myarg:
10743 + found_available_arg = True
10744 +
10745 + if atom.use and not pkg.built:
10746 + use = pkg.use.enabled
10747 + if atom.use.enabled.difference(use):
10748 + continue
10749 + if atom.use.disabled.intersection(use):
10750 + continue
10751 + if pkg.cp == atom_cp:
10752 + if highest_version is None:
10753 + highest_version = pkg
10754 + elif pkg > highest_version:
10755 + highest_version = pkg
10756 + # At this point, we've found the highest visible
10757 + # match from the current repo. Any lower versions
10758 + # from this repo are ignored, so this so the loop
10759 + # will always end with a break statement below
10760 + # this point.
10761 + if find_existing_node:
10762 + e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
10763 + if not e_pkg:
10764 + break
10765 + if portage.dep.match_from_list(atom, [e_pkg]):
10766 + if highest_version and \
10767 + e_pkg.cp == atom_cp and \
10768 + e_pkg < highest_version and \
10769 + e_pkg.slot_atom != highest_version.slot_atom:
10770 + # There is a higher version available in a
10771 + # different slot, so this existing node is
10772 + # irrelevant.
10773 + pass
10774 + else:
10775 + matched_packages.append(e_pkg)
10776 + existing_node = e_pkg
10777 + break
10778 + # Compare built package to current config and
10779 + # reject the built package if necessary.
10780 + if built and not installed and \
10781 + ("--newuse" in self.myopts or \
10782 + "--reinstall" in self.myopts):
10783 + iuses = pkg.iuse.all
10784 + old_use = pkg.use.enabled
10785 + if myeb:
10786 + pkgsettings.setcpv(myeb)
10787 + else:
10788 + pkgsettings.setcpv(pkg)
10789 + now_use = pkgsettings["PORTAGE_USE"].split()
10790 + forced_flags = set()
10791 + forced_flags.update(pkgsettings.useforce)
10792 + forced_flags.update(pkgsettings.usemask)
10793 + cur_iuse = iuses
10794 + if myeb and not usepkgonly:
10795 + cur_iuse = myeb.iuse.all
10796 + if self._reinstall_for_flags(forced_flags,
10797 + old_use, iuses,
10798 + now_use, cur_iuse):
10799 + break
10800 + # Compare current config to installed package
10801 + # and do not reinstall if possible.
10802 + if not installed and \
10803 + ("--newuse" in self.myopts or \
10804 + "--reinstall" in self.myopts) and \
10805 + cpv in vardb.match(atom):
10806 + pkgsettings.setcpv(pkg)
10807 + forced_flags = set()
10808 + forced_flags.update(pkgsettings.useforce)
10809 + forced_flags.update(pkgsettings.usemask)
10810 + old_use = vardb.aux_get(cpv, ["USE"])[0].split()
10811 + old_iuse = set(filter_iuse_defaults(
10812 + vardb.aux_get(cpv, ["IUSE"])[0].split()))
10813 + cur_use = pkg.use.enabled
10814 + cur_iuse = pkg.iuse.all
10815 + reinstall_for_flags = \
10816 + self._reinstall_for_flags(
10817 + forced_flags, old_use, old_iuse,
10818 + cur_use, cur_iuse)
10819 + if reinstall_for_flags:
10820 + reinstall = True
10821 + if not built:
10822 + myeb = pkg
10823 + matched_packages.append(pkg)
10824 + if reinstall_for_flags:
10825 + self._reinstall_nodes[pkg] = \
10826 + reinstall_for_flags
10827 + break
10828 +
10829 + if not matched_packages:
10830 + return None, None
10831 +
10832 + if "--debug" in self.myopts:
10833 + for pkg in matched_packages:
10834 + portage.writemsg("%s %s\n" % \
10835 + ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
10836 +
10837 + # Filter out any old-style virtual matches if they are
10838 + # mixed with new-style virtual matches.
10839 + cp = portage.dep_getkey(atom)
10840 + if len(matched_packages) > 1 and \
10841 + "virtual" == portage.catsplit(cp)[0]:
10842 + for pkg in matched_packages:
10843 + if pkg.cp != cp:
10844 + continue
10845 + # Got a new-style virtual, so filter
10846 + # out any old-style virtuals.
10847 + matched_packages = [pkg for pkg in matched_packages \
10848 + if pkg.cp == cp]
10849 + break
10850 +
10851 + if len(matched_packages) > 1:
10852 + bestmatch = portage.best(
10853 + [pkg.cpv for pkg in matched_packages])
10854 + matched_packages = [pkg for pkg in matched_packages \
10855 + if portage.dep.cpvequal(pkg.cpv, bestmatch)]
10856 +
10857 + # ordered by type preference ("ebuild" type is the last resort)
10858 + return matched_packages[-1], existing_node
10859 +
10860 + def _select_pkg_from_graph(self, root, atom, onlydeps=False):
10861 + """
10862 + Select packages that have already been added to the graph or
10863 + those that are installed and have not been scheduled for
10864 + replacement.
10865 + """
10866 + graph_db = self._graph_trees[root]["porttree"].dbapi
10867 + matches = graph_db.match_pkgs(atom)
10868 + if not matches:
10869 + return None, None
10870 + pkg = matches[-1] # highest match
10871 + in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
10872 + return pkg, in_graph
10873 +
10874 + def _complete_graph(self):
10875 + """
10876 + Add any deep dependencies of required sets (args, system, world) that
10877 + have not been pulled into the graph yet. This ensures that the graph
10878 + is consistent such that initially satisfied deep dependencies are not
10879 + broken in the new graph. Initially unsatisfied dependencies are
10880 + irrelevant since we only want to avoid breaking dependencies that are
10881 + intially satisfied.
10882 +
10883 + Since this method can consume enough time to disturb users, it is
10884 + currently only enabled by the --complete-graph option.
10885 + """
10886 + if "--buildpkgonly" in self.myopts or \
10887 + "recurse" not in self.myparams:
10888 + return 1
10889 +
10890 + if "complete" not in self.myparams:
10891 + # Skip this to avoid consuming enough time to disturb users.
10892 + return 1
10893 +
10894 + # Put the depgraph into a mode that causes it to only
10895 + # select packages that have already been added to the
10896 + # graph or those that are installed and have not been
10897 + # scheduled for replacement. Also, toggle the "deep"
10898 + # parameter so that all dependencies are traversed and
10899 + # accounted for.
10900 + self._select_atoms = self._select_atoms_from_graph
10901 + self._select_package = self._select_pkg_from_graph
10902 + already_deep = "deep" in self.myparams
10903 + if not already_deep:
10904 + self.myparams.add("deep")
10905 +
10906 + for root in self.roots:
10907 + required_set_names = self._required_set_names.copy()
10908 + if root == self.target_root and \
10909 + (already_deep or "empty" in self.myparams):
10910 + required_set_names.difference_update(self._sets)
10911 + if not required_set_names and not self._ignored_deps:
10912 + continue
10913 + root_config = self.roots[root]
10914 + setconfig = root_config.setconfig
10915 + args = []
10916 + # Reuse existing SetArg instances when available.
10917 + for arg in self.digraph.root_nodes():
10918 + if not isinstance(arg, SetArg):
10919 + continue
10920 + if arg.root_config != root_config:
10921 + continue
10922 + if arg.name in required_set_names:
10923 + args.append(arg)
10924 + required_set_names.remove(arg.name)
10925 + # Create new SetArg instances only when necessary.
10926 + for s in required_set_names:
10927 + expanded_set = InternalPackageSet(
10928 + initial_atoms=setconfig.getSetAtoms(s))
10929 + atom = SETPREFIX + s
10930 + args.append(SetArg(arg=atom, set=expanded_set,
10931 + root_config=root_config))
10932 + vardb = root_config.trees["vartree"].dbapi
10933 + for arg in args:
10934 + for atom in arg.set:
10935 + self._dep_stack.append(
10936 + Dependency(atom=atom, root=root, parent=arg))
10937 + if self._ignored_deps:
10938 + self._dep_stack.extend(self._ignored_deps)
10939 + self._ignored_deps = []
10940 + if not self._create_graph(allow_unsatisfied=True):
10941 + return 0
10942 + # Check the unsatisfied deps to see if any initially satisfied deps
10943 + # will become unsatisfied due to an upgrade. Initially unsatisfied
10944 + # deps are irrelevant since we only want to avoid breaking deps
10945 + # that are initially satisfied.
10946 + while self._unsatisfied_deps:
10947 + dep = self._unsatisfied_deps.pop()
10948 + matches = vardb.match_pkgs(dep.atom)
10949 + if not matches:
10950 + self._initially_unsatisfied_deps.append(dep)
10951 + continue
10952 + # An scheduled installation broke a deep dependency.
10953 + # Add the installed package to the graph so that it
10954 + # will be appropriately reported as a slot collision
10955 + # (possibly solvable via backtracking).
10956 + pkg = matches[-1] # highest match
10957 + if not self._add_pkg(pkg, dep):
10958 + return 0
10959 + if not self._create_graph(allow_unsatisfied=True):
10960 + return 0
10961 + return 1
10962 +
10963 + def _pkg(self, cpv, type_name, root_config, installed=False):
10964 + """
10965 + Get a package instance from the cache, or create a new
10966 + one if necessary. Raises KeyError from aux_get if it
10967 + failures for some reason (package does not exist or is
10968 + corrupt).
10969 + """
10970 + operation = "merge"
10971 + if installed:
10972 + operation = "nomerge"
10973 + pkg = self._pkg_cache.get(
10974 + (type_name, root_config.root, cpv, operation))
10975 + if pkg is None:
10976 + tree_type = self.pkg_tree_map[type_name]
10977 + db = root_config.trees[tree_type].dbapi
10978 + db_keys = list(self._trees_orig[root_config.root][
10979 + tree_type].dbapi._aux_cache_keys)
10980 + try:
10981 + metadata = izip(db_keys, db.aux_get(cpv, db_keys))
10982 + except KeyError:
10983 + raise portage.exception.PackageNotFound(cpv)
10984 + pkg = Package(cpv=cpv, metadata=metadata,
10985 + root_config=root_config, installed=installed)
10986 + if type_name == "ebuild":
10987 + settings = self.pkgsettings[root_config.root]
10988 + settings.setcpv(pkg)
10989 + pkg.metadata["USE"] = settings["PORTAGE_USE"]
10990 + pkg.metadata['CHOST'] = settings.get('CHOST', '')
10991 + self._pkg_cache[pkg] = pkg
10992 + return pkg
10993 +
10994 + def validate_blockers(self):
10995 + """Remove any blockers from the digraph that do not match any of the
10996 + packages within the graph. If necessary, create hard deps to ensure
10997 + correct merge order such that mutually blocking packages are never
10998 + installed simultaneously."""
10999 +
11000 + if "--buildpkgonly" in self.myopts or \
11001 + "--nodeps" in self.myopts:
11002 + return True
11003 +
11004 + #if "deep" in self.myparams:
11005 + if True:
11006 + # Pull in blockers from all installed packages that haven't already
11007 + # been pulled into the depgraph. This is not enabled by default
11008 + # due to the performance penalty that is incurred by all the
11009 + # additional dep_check calls that are required.
11010 +
11011 + dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
11012 + for myroot in self.trees:
11013 + vardb = self.trees[myroot]["vartree"].dbapi
11014 + portdb = self.trees[myroot]["porttree"].dbapi
11015 + pkgsettings = self.pkgsettings[myroot]
11016 + final_db = self.mydbapi[myroot]
11017 +
11018 + blocker_cache = BlockerCache(myroot, vardb)
11019 + stale_cache = set(blocker_cache)
11020 + for pkg in vardb:
11021 + cpv = pkg.cpv
11022 + stale_cache.discard(cpv)
11023 + pkg_in_graph = self.digraph.contains(pkg)
11024 +
11025 + # Check for masked installed packages. Only warn about
11026 + # packages that are in the graph in order to avoid warning
11027 + # about those that will be automatically uninstalled during
11028 + # the merge process or by --depclean.
11029 + if pkg in final_db:
11030 + if pkg_in_graph and not visible(pkgsettings, pkg):
11031 + self._masked_installed.add(pkg)
11032 +
11033 + blocker_atoms = None
11034 + blockers = None
11035 + if pkg_in_graph:
11036 + blockers = []
11037 + try:
11038 + blockers.extend(
11039 + self._blocker_parents.child_nodes(pkg))
11040 + except KeyError:
11041 + pass
11042 + try:
11043 + blockers.extend(
11044 + self._irrelevant_blockers.child_nodes(pkg))
11045 + except KeyError:
11046 + pass
11047 + if blockers is not None:
11048 + blockers = set(str(blocker.atom) \
11049 + for blocker in blockers)
11050 +
11051 + # If this node has any blockers, create a "nomerge"
11052 + # node for it so that they can be enforced.
11053 + self.spinner.update()
11054 + blocker_data = blocker_cache.get(cpv)
11055 + if blocker_data is not None and \
11056 + blocker_data.counter != long(pkg.metadata["COUNTER"]):
11057 + blocker_data = None
11058 +
11059 + # If blocker data from the graph is available, use
11060 + # it to validate the cache and update the cache if
11061 + # it seems invalid.
11062 + if blocker_data is not None and \
11063 + blockers is not None:
11064 + if not blockers.symmetric_difference(
11065 + blocker_data.atoms):
11066 + continue
11067 + blocker_data = None
11068 +
11069 + if blocker_data is None and \
11070 + blockers is not None:
11071 + # Re-use the blockers from the graph.
11072 + blocker_atoms = sorted(blockers)
11073 + counter = long(pkg.metadata["COUNTER"])
11074 + blocker_data = \
11075 + blocker_cache.BlockerData(counter, blocker_atoms)
11076 + blocker_cache[pkg.cpv] = blocker_data
11077 + continue
11078 +
11079 + if blocker_data:
11080 + blocker_atoms = blocker_data.atoms
11081 + else:
11082 + # Use aux_get() to trigger FakeVartree global
11083 + # updates on *DEPEND when appropriate.
11084 + depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
11085 + # It is crucial to pass in final_db here in order to
11086 + # optimize dep_check calls by eliminating atoms via
11087 + # dep_wordreduce and dep_eval calls.
11088 + try:
11089 + portage.dep._dep_check_strict = False
11090 + try:
11091 + success, atoms = portage.dep_check(depstr,
11092 + final_db, pkgsettings, myuse=pkg.use.enabled,
11093 + trees=self._graph_trees, myroot=myroot)
11094 + except Exception, e:
11095 + if isinstance(e, SystemExit):
11096 + raise
11097 + # This is helpful, for example, if a ValueError
11098 + # is thrown from cpv_expand due to multiple
11099 + # matches (this can happen if an atom lacks a
11100 + # category).
11101 + show_invalid_depstring_notice(
11102 + pkg, depstr, str(e))
11103 + del e
11104 + raise
11105 + finally:
11106 + portage.dep._dep_check_strict = True
11107 + if not success:
11108 + replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
11109 + if replacement_pkg and \
11110 + replacement_pkg[0].operation == "merge":
11111 + # This package is being replaced anyway, so
11112 + # ignore invalid dependencies so as not to
11113 + # annoy the user too much (otherwise they'd be
11114 + # forced to manually unmerge it first).
11115 + continue
11116 + show_invalid_depstring_notice(pkg, depstr, atoms)
11117 + return False
11118 + blocker_atoms = [myatom for myatom in atoms \
11119 + if myatom.startswith("!")]
11120 + blocker_atoms.sort()
11121 + counter = long(pkg.metadata["COUNTER"])
11122 + blocker_cache[cpv] = \
11123 + blocker_cache.BlockerData(counter, blocker_atoms)
11124 + if blocker_atoms:
11125 + try:
11126 + for atom in blocker_atoms:
11127 + blocker = Blocker(atom=portage.dep.Atom(atom),
11128 + eapi=pkg.metadata["EAPI"], root=myroot)
11129 + self._blocker_parents.add(blocker, pkg)
11130 + except portage.exception.InvalidAtom, e:
11131 + depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
11132 + show_invalid_depstring_notice(
11133 + pkg, depstr, "Invalid Atom: %s" % (e,))
11134 + return False
11135 + for cpv in stale_cache:
11136 + del blocker_cache[cpv]
11137 + blocker_cache.flush()
11138 + del blocker_cache
11139 +
11140 + # Discard any "uninstall" tasks scheduled by previous calls
11141 + # to this method, since those tasks may not make sense given
11142 + # the current graph state.
11143 + previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
11144 + if previous_uninstall_tasks:
11145 + self._blocker_uninstalls = digraph()
11146 + self.digraph.difference_update(previous_uninstall_tasks)
11147 +
11148 + for blocker in self._blocker_parents.leaf_nodes():
11149 + self.spinner.update()
11150 + root_config = self.roots[blocker.root]
11151 + virtuals = root_config.settings.getvirtuals()
11152 + myroot = blocker.root
11153 + initial_db = self.trees[myroot]["vartree"].dbapi
11154 + final_db = self.mydbapi[myroot]
11155 +
11156 + provider_virtual = False
11157 + if blocker.cp in virtuals and \
11158 + not self._have_new_virt(blocker.root, blocker.cp):
11159 + provider_virtual = True
11160 +
11161 + # Use this to check PROVIDE for each matched package
11162 + # when necessary.
11163 + atom_set = InternalPackageSet(
11164 + initial_atoms=[blocker.atom])
11165 +
11166 + if provider_virtual:
11167 + atoms = []
11168 + for provider_entry in virtuals[blocker.cp]:
11169 + provider_cp = \
11170 + portage.dep_getkey(provider_entry)
11171 + atoms.append(blocker.atom.replace(
11172 + blocker.cp, provider_cp))
11173 + else:
11174 + atoms = [blocker.atom]
11175 +
11176 + blocked_initial = set()
11177 + for atom in atoms:
11178 + for pkg in initial_db.match_pkgs(atom):
11179 + if atom_set.findAtomForPackage(pkg):
11180 + blocked_initial.add(pkg)
11181 +
11182 + blocked_final = set()
11183 + for atom in atoms:
11184 + for pkg in final_db.match_pkgs(atom):
11185 + if atom_set.findAtomForPackage(pkg):
11186 + blocked_final.add(pkg)
11187 +
11188 + if not blocked_initial and not blocked_final:
11189 + parent_pkgs = self._blocker_parents.parent_nodes(blocker)
11190 + self._blocker_parents.remove(blocker)
11191 + # Discard any parents that don't have any more blockers.
11192 + for pkg in parent_pkgs:
11193 + self._irrelevant_blockers.add(blocker, pkg)
11194 + if not self._blocker_parents.child_nodes(pkg):
11195 + self._blocker_parents.remove(pkg)
11196 + continue
11197 + for parent in self._blocker_parents.parent_nodes(blocker):
11198 + unresolved_blocks = False
11199 + depends_on_order = set()
11200 + for pkg in blocked_initial:
11201 + if pkg.slot_atom == parent.slot_atom:
11202 + # TODO: Support blocks within slots in cases where it
11203 + # might make sense. For example, a new version might
11204 + # require that the old version be uninstalled at build
11205 + # time.
11206 + continue
11207 + if parent.installed:
11208 + # Two currently installed packages conflict with
11209 + # eachother. Ignore this case since the damage
11210 + # is already done and this would be likely to
11211 + # confuse users if displayed like a normal blocker.
11212 + continue
11213 +
11214 + self._blocked_pkgs.add(pkg, blocker)
11215 +
11216 + if parent.operation == "merge":
11217 + # Maybe the blocked package can be replaced or simply
11218 + # unmerged to resolve this block.
11219 + depends_on_order.add((pkg, parent))
11220 + continue
11221 + # None of the above blocker resolutions techniques apply,
11222 + # so apparently this one is unresolvable.
11223 + unresolved_blocks = True
11224 + for pkg in blocked_final:
11225 + if pkg.slot_atom == parent.slot_atom:
11226 + # TODO: Support blocks within slots.
11227 + continue
11228 + if parent.operation == "nomerge" and \
11229 + pkg.operation == "nomerge":
11230 + # This blocker will be handled the next time that a
11231 + # merge of either package is triggered.
11232 + continue
11233 +
11234 + self._blocked_pkgs.add(pkg, blocker)
11235 +
11236 + # Maybe the blocking package can be
11237 + # unmerged to resolve this block.
11238 + if parent.operation == "merge" and pkg.installed:
11239 + depends_on_order.add((pkg, parent))
11240 + continue
11241 + elif parent.operation == "nomerge":
11242 + depends_on_order.add((parent, pkg))
11243 + continue
11244 + # None of the above blocker resolutions techniques apply,
11245 + # so apparently this one is unresolvable.
11246 + unresolved_blocks = True
11247 +
11248 + # Make sure we don't unmerge any package that have been pulled
11249 + # into the graph.
11250 + if not unresolved_blocks and depends_on_order:
11251 + for inst_pkg, inst_task in depends_on_order:
11252 + if self.digraph.contains(inst_pkg) and \
11253 + self.digraph.parent_nodes(inst_pkg):
11254 + unresolved_blocks = True
11255 + break
11256 +
11257 + if not unresolved_blocks and depends_on_order:
11258 + for inst_pkg, inst_task in depends_on_order:
11259 + uninst_task = Package(built=inst_pkg.built,
11260 + cpv=inst_pkg.cpv, installed=inst_pkg.installed,
11261 + metadata=inst_pkg.metadata,
11262 + operation="uninstall",
11263 + root_config=inst_pkg.root_config,
11264 + type_name=inst_pkg.type_name)
11265 + self._pkg_cache[uninst_task] = uninst_task
11266 + # Enforce correct merge order with a hard dep.
11267 + self.digraph.addnode(uninst_task, inst_task,
11268 + priority=BlockerDepPriority.instance)
11269 + # Count references to this blocker so that it can be
11270 + # invalidated after nodes referencing it have been
11271 + # merged.
11272 + self._blocker_uninstalls.addnode(uninst_task, blocker)
11273 + if not unresolved_blocks and not depends_on_order:
11274 + self._irrelevant_blockers.add(blocker, parent)
11275 + self._blocker_parents.remove_edge(blocker, parent)
11276 + if not self._blocker_parents.parent_nodes(blocker):
11277 + self._blocker_parents.remove(blocker)
11278 + if not self._blocker_parents.child_nodes(parent):
11279 + self._blocker_parents.remove(parent)
11280 + if unresolved_blocks:
11281 + self._unsolvable_blockers.add(blocker, parent)
11282 +
11283 + return True
11284 +
11285 + def _accept_blocker_conflicts(self):
11286 + acceptable = False
11287 + for x in ("--buildpkgonly", "--fetchonly",
11288 + "--fetch-all-uri", "--nodeps"):
11289 + if x in self.myopts:
11290 + acceptable = True
11291 + break
11292 + return acceptable
11293 +
11294 + def _merge_order_bias(self, mygraph):
11295 + """
11296 + For optimal leaf node selection, promote deep system runtime deps and
11297 + order nodes from highest to lowest overall reference count.
11298 + """
11299 +
11300 + node_info = {}
11301 + for node in mygraph.order:
11302 + node_info[node] = len(mygraph.parent_nodes(node))
11303 + deep_system_deps = _find_deep_system_runtime_deps(mygraph)
11304 +
11305 + def cmp_merge_preference(node1, node2):
11306 +
11307 + if node1.operation == 'uninstall':
11308 + if node2.operation == 'uninstall':
11309 + return 0
11310 + return 1
11311 +
11312 + if node2.operation == 'uninstall':
11313 + if node1.operation == 'uninstall':
11314 + return 0
11315 + return -1
11316 +
11317 + node1_sys = node1 in deep_system_deps
11318 + node2_sys = node2 in deep_system_deps
11319 + if node1_sys != node2_sys:
11320 + if node1_sys:
11321 + return -1
11322 + return 1
11323 +
11324 + return node_info[node2] - node_info[node1]
11325 +
11326 + mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
11327 +
11328 + def altlist(self, reversed=False):
11329 +
11330 + while self._serialized_tasks_cache is None:
11331 + self._resolve_conflicts()
11332 + try:
11333 + self._serialized_tasks_cache, self._scheduler_graph = \
11334 + self._serialize_tasks()
11335 + except self._serialize_tasks_retry:
11336 + pass
11337 +
11338 + retlist = self._serialized_tasks_cache[:]
11339 + if reversed:
11340 + retlist.reverse()
11341 + return retlist
11342 +
11343 + def schedulerGraph(self):
11344 + """
11345 + The scheduler graph is identical to the normal one except that
11346 + uninstall edges are reversed in specific cases that require
11347 + conflicting packages to be temporarily installed simultaneously.
11348 + This is intended for use by the Scheduler in it's parallelization
11349 + logic. It ensures that temporary simultaneous installation of
11350 + conflicting packages is avoided when appropriate (especially for
11351 + !!atom blockers), but allowed in specific cases that require it.
11352 +
11353 + Note that this method calls break_refs() which alters the state of
11354 + internal Package instances such that this depgraph instance should
11355 + not be used to perform any more calculations.
11356 + """
11357 + if self._scheduler_graph is None:
11358 + self.altlist()
11359 + self.break_refs(self._scheduler_graph.order)
11360 + return self._scheduler_graph
11361 +
11362 + def break_refs(self, nodes):
11363 + """
11364 + Take a mergelist like that returned from self.altlist() and
11365 + break any references that lead back to the depgraph. This is
11366 + useful if you want to hold references to packages without
11367 + also holding the depgraph on the heap.
11368 + """
11369 + for node in nodes:
11370 + if hasattr(node, "root_config"):
11371 + # The FakeVartree references the _package_cache which
11372 + # references the depgraph. So that Package instances don't
11373 + # hold the depgraph and FakeVartree on the heap, replace
11374 + # the RootConfig that references the FakeVartree with the
11375 + # original RootConfig instance which references the actual
11376 + # vartree.
11377 + node.root_config = \
11378 + self._trees_orig[node.root_config.root]["root_config"]
11379 +
11380 + def _resolve_conflicts(self):
11381 + if not self._complete_graph():
11382 + raise self._unknown_internal_error()
11383 +
11384 + if not self.validate_blockers():
11385 + raise self._unknown_internal_error()
11386 +
11387 + if self._slot_collision_info:
11388 + self._process_slot_conflicts()
11389 +
11390 + def _serialize_tasks(self):
11391 +
11392 + if "--debug" in self.myopts:
11393 + writemsg("\ndigraph:\n\n", noiselevel=-1)
11394 + self.digraph.debug_print()
11395 + writemsg("\n", noiselevel=-1)
11396 +
11397 + scheduler_graph = self.digraph.copy()
11398 +
11399 + if '--nodeps' in self.myopts:
11400 + # Preserve the package order given on the command line.
11401 + return ([node for node in scheduler_graph \
11402 + if isinstance(node, Package) \
11403 + and node.operation == 'merge'], scheduler_graph)
11404 +
11405 + mygraph=self.digraph.copy()
11406 + # Prune "nomerge" root nodes if nothing depends on them, since
11407 + # otherwise they slow down merge order calculation. Don't remove
11408 + # non-root nodes since they help optimize merge order in some cases
11409 + # such as revdep-rebuild.
11410 + removed_nodes = set()
11411 + while True:
11412 + for node in mygraph.root_nodes():
11413 + if not isinstance(node, Package) or \
11414 + node.installed or node.onlydeps:
11415 + removed_nodes.add(node)
11416 + if removed_nodes:
11417 + self.spinner.update()
11418 + mygraph.difference_update(removed_nodes)
11419 + if not removed_nodes:
11420 + break
11421 + removed_nodes.clear()
11422 + self._merge_order_bias(mygraph)
11423 + def cmp_circular_bias(n1, n2):
11424 + """
11425 + RDEPEND is stronger than PDEPEND and this function
11426 + measures such a strength bias within a circular
11427 + dependency relationship.
11428 + """
11429 + n1_n2_medium = n2 in mygraph.child_nodes(n1,
11430 + ignore_priority=priority_range.ignore_medium_soft)
11431 + n2_n1_medium = n1 in mygraph.child_nodes(n2,
11432 + ignore_priority=priority_range.ignore_medium_soft)
11433 + if n1_n2_medium == n2_n1_medium:
11434 + return 0
11435 + elif n1_n2_medium:
11436 + return 1
11437 + return -1
11438 + myblocker_uninstalls = self._blocker_uninstalls.copy()
11439 + retlist=[]
11440 + # Contains uninstall tasks that have been scheduled to
11441 + # occur after overlapping blockers have been installed.
11442 + scheduled_uninstalls = set()
11443 + # Contains any Uninstall tasks that have been ignored
11444 + # in order to avoid the circular deps code path. These
11445 + # correspond to blocker conflicts that could not be
11446 + # resolved.
11447 + ignored_uninstall_tasks = set()
11448 + have_uninstall_task = False
11449 + complete = "complete" in self.myparams
11450 + asap_nodes = []
11451 +
11452 + def get_nodes(**kwargs):
11453 + """
11454 + Returns leaf nodes excluding Uninstall instances
11455 + since those should be executed as late as possible.
11456 + """
11457 + return [node for node in mygraph.leaf_nodes(**kwargs) \
11458 + if isinstance(node, Package) and \
11459 + (node.operation != "uninstall" or \
11460 + node in scheduled_uninstalls)]
11461 +
11462 + # sys-apps/portage needs special treatment if ROOT="/"
11463 + running_root = self._running_root.root
11464 + from portage.const import PORTAGE_PACKAGE_ATOM
11465 + runtime_deps = InternalPackageSet(
11466 + initial_atoms=[PORTAGE_PACKAGE_ATOM])
11467 + running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
11468 + PORTAGE_PACKAGE_ATOM)
11469 + replacement_portage = self.mydbapi[running_root].match_pkgs(
11470 + PORTAGE_PACKAGE_ATOM)
11471 +
11472 + if running_portage:
11473 + running_portage = running_portage[0]
11474 + else:
11475 + running_portage = None
11476 +
11477 + if replacement_portage:
11478 + replacement_portage = replacement_portage[0]
11479 + else:
11480 + replacement_portage = None
11481 +
11482 + if replacement_portage == running_portage:
11483 + replacement_portage = None
11484 +
11485 + if replacement_portage is not None:
11486 + # update from running_portage to replacement_portage asap
11487 + asap_nodes.append(replacement_portage)
11488 +
11489 + if running_portage is not None:
11490 + try:
11491 + portage_rdepend = self._select_atoms_highest_available(
11492 + running_root, running_portage.metadata["RDEPEND"],
11493 + myuse=running_portage.use.enabled,
11494 + parent=running_portage, strict=False)
11495 + except portage.exception.InvalidDependString, e:
11496 + portage.writemsg("!!! Invalid RDEPEND in " + \
11497 + "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
11498 + (running_root, running_portage.cpv, e), noiselevel=-1)
11499 + del e
11500 + portage_rdepend = []
11501 + runtime_deps.update(atom for atom in portage_rdepend \
11502 + if not atom.startswith("!"))
11503 +
11504 + def gather_deps(ignore_priority, mergeable_nodes,
11505 + selected_nodes, node):
11506 + """
11507 + Recursively gather a group of nodes that RDEPEND on
11508 + eachother. This ensures that they are merged as a group
11509 + and get their RDEPENDs satisfied as soon as possible.
11510 + """
11511 + if node in selected_nodes:
11512 + return True
11513 + if node not in mergeable_nodes:
11514 + return False
11515 + if node == replacement_portage and \
11516 + mygraph.child_nodes(node,
11517 + ignore_priority=priority_range.ignore_medium_soft):
11518 + # Make sure that portage always has all of it's
11519 + # RDEPENDs installed first.
11520 + return False
11521 + selected_nodes.add(node)
11522 + for child in mygraph.child_nodes(node,
11523 + ignore_priority=ignore_priority):
11524 + if not gather_deps(ignore_priority,
11525 + mergeable_nodes, selected_nodes, child):
11526 + return False
11527 + return True
11528 +
11529 + def ignore_uninst_or_med(priority):
11530 + if priority is BlockerDepPriority.instance:
11531 + return True
11532 + return priority_range.ignore_medium(priority)
11533 +
11534 + def ignore_uninst_or_med_soft(priority):
11535 + if priority is BlockerDepPriority.instance:
11536 + return True
11537 + return priority_range.ignore_medium_soft(priority)
11538 +
11539 + tree_mode = "--tree" in self.myopts
11540 + # Tracks whether or not the current iteration should prefer asap_nodes
11541 + # if available. This is set to False when the previous iteration
11542 + # failed to select any nodes. It is reset whenever nodes are
11543 + # successfully selected.
11544 + prefer_asap = True
11545 +
11546 + # Controls whether or not the current iteration should drop edges that
11547 + # are "satisfied" by installed packages, in order to solve circular
11548 + # dependencies. The deep runtime dependencies of installed packages are
11549 + # not checked in this case (bug #199856), so it must be avoided
11550 + # whenever possible.
11551 + drop_satisfied = False
11552 +
11553 + # State of variables for successive iterations that loosen the
11554 + # criteria for node selection.
11555 + #
11556 + # iteration prefer_asap drop_satisfied
11557 + # 1 True False
11558 + # 2 False False
11559 + # 3 False True
11560 + #
11561 + # If no nodes are selected on the last iteration, it is due to
11562 + # unresolved blockers or circular dependencies.
11563 +
11564 + while not mygraph.empty():
11565 + self.spinner.update()
11566 + selected_nodes = None
11567 + ignore_priority = None
11568 + if drop_satisfied or (prefer_asap and asap_nodes):
11569 + priority_range = DepPrioritySatisfiedRange
11570 + else:
11571 + priority_range = DepPriorityNormalRange
11572 + if prefer_asap and asap_nodes:
11573 + # ASAP nodes are merged before their soft deps. Go ahead and
11574 + # select root nodes here if necessary, since it's typical for
11575 + # the parent to have been removed from the graph already.
11576 + asap_nodes = [node for node in asap_nodes \
11577 + if mygraph.contains(node)]
11578 + for node in asap_nodes:
11579 + if not mygraph.child_nodes(node,
11580 + ignore_priority=priority_range.ignore_soft):
11581 + selected_nodes = [node]
11582 + asap_nodes.remove(node)
11583 + break
11584 + if not selected_nodes and \
11585 + not (prefer_asap and asap_nodes):
11586 + for i in xrange(priority_range.NONE,
11587 + priority_range.MEDIUM_SOFT + 1):
11588 + ignore_priority = priority_range.ignore_priority[i]
11589 + nodes = get_nodes(ignore_priority=ignore_priority)
11590 + if nodes:
11591 + # If there is a mix of uninstall nodes with other
11592 + # types, save the uninstall nodes for later since
11593 + # sometimes a merge node will render an uninstall
11594 + # node unnecessary (due to occupying the same slot),
11595 + # and we want to avoid executing a separate uninstall
11596 + # task in that case.
11597 + if len(nodes) > 1:
11598 + good_uninstalls = []
11599 + with_some_uninstalls_excluded = []
11600 + for node in nodes:
11601 + if node.operation == "uninstall":
11602 + slot_node = self.mydbapi[node.root
11603 + ].match_pkgs(node.slot_atom)
11604 + if slot_node and \
11605 + slot_node[0].operation == "merge":
11606 + continue
11607 + good_uninstalls.append(node)
11608 + with_some_uninstalls_excluded.append(node)
11609 + if good_uninstalls:
11610 + nodes = good_uninstalls
11611 + elif with_some_uninstalls_excluded:
11612 + nodes = with_some_uninstalls_excluded
11613 + else:
11614 + nodes = nodes
11615 +
11616 + if ignore_priority is None and not tree_mode:
11617 + # Greedily pop all of these nodes since no
11618 + # relationship has been ignored. This optimization
11619 + # destroys --tree output, so it's disabled in tree
11620 + # mode.
11621 + selected_nodes = nodes
11622 + else:
11623 + # For optimal merge order:
11624 + # * Only pop one node.
11625 + # * Removing a root node (node without a parent)
11626 + # will not produce a leaf node, so avoid it.
11627 + # * It's normal for a selected uninstall to be a
11628 + # root node, so don't check them for parents.
11629 + for node in nodes:
11630 + if node.operation == "uninstall" or \
11631 + mygraph.parent_nodes(node):
11632 + selected_nodes = [node]
11633 + break
11634 +
11635 + if selected_nodes:
11636 + break
11637 +
11638 + if not selected_nodes:
11639 + nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
11640 + if nodes:
11641 + mergeable_nodes = set(nodes)
11642 + if prefer_asap and asap_nodes:
11643 + nodes = asap_nodes
11644 + for i in xrange(priority_range.SOFT,
11645 + priority_range.MEDIUM_SOFT + 1):
11646 + ignore_priority = priority_range.ignore_priority[i]
11647 + for node in nodes:
11648 + if not mygraph.parent_nodes(node):
11649 + continue
11650 + selected_nodes = set()
11651 + if gather_deps(ignore_priority,
11652 + mergeable_nodes, selected_nodes, node):
11653 + break
11654 + else:
11655 + selected_nodes = None
11656 + if selected_nodes:
11657 + break
11658 +
11659 + if prefer_asap and asap_nodes and not selected_nodes:
11660 + # We failed to find any asap nodes to merge, so ignore
11661 + # them for the next iteration.
11662 + prefer_asap = False
11663 + continue
11664 +
11665 + if selected_nodes and ignore_priority is not None:
11666 + # Try to merge ignored medium_soft deps as soon as possible
11667 + # if they're not satisfied by installed packages.
11668 + for node in selected_nodes:
11669 + children = set(mygraph.child_nodes(node))
11670 + soft = children.difference(
11671 + mygraph.child_nodes(node,
11672 + ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
11673 + medium_soft = children.difference(
11674 + mygraph.child_nodes(node,
11675 + ignore_priority = \
11676 + DepPrioritySatisfiedRange.ignore_medium_soft))
11677 + medium_soft.difference_update(soft)
11678 + for child in medium_soft:
11679 + if child in selected_nodes:
11680 + continue
11681 + if child in asap_nodes:
11682 + continue
11683 + asap_nodes.append(child)
11684 +
11685 + if selected_nodes and len(selected_nodes) > 1:
11686 + if not isinstance(selected_nodes, list):
11687 + selected_nodes = list(selected_nodes)
11688 + selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
11689 +
11690 + if not selected_nodes and not myblocker_uninstalls.is_empty():
11691 + # An Uninstall task needs to be executed in order to
11692 + # avoid conflict if possible.
11693 +
11694 + if drop_satisfied:
11695 + priority_range = DepPrioritySatisfiedRange
11696 + else:
11697 + priority_range = DepPriorityNormalRange
11698 +
11699 + mergeable_nodes = get_nodes(
11700 + ignore_priority=ignore_uninst_or_med)
11701 +
11702 + min_parent_deps = None
11703 + uninst_task = None
11704 + for task in myblocker_uninstalls.leaf_nodes():
11705 + # Do some sanity checks so that system or world packages
11706 + # don't get uninstalled inappropriately here (only really
11707 + # necessary when --complete-graph has not been enabled).
11708 +
11709 + if task in ignored_uninstall_tasks:
11710 + continue
11711 +
11712 + if task in scheduled_uninstalls:
11713 + # It's been scheduled but it hasn't
11714 + # been executed yet due to dependence
11715 + # on installation of blocking packages.
11716 + continue
11717 +
11718 + root_config = self.roots[task.root]
11719 + inst_pkg = self._pkg_cache[
11720 + ("installed", task.root, task.cpv, "nomerge")]
11721 +
11722 + if self.digraph.contains(inst_pkg):
11723 + continue
11724 +
11725 + forbid_overlap = False
11726 + heuristic_overlap = False
11727 + for blocker in myblocker_uninstalls.parent_nodes(task):
11728 + if blocker.eapi in ("0", "1"):
11729 + heuristic_overlap = True
11730 + elif blocker.atom.blocker.overlap.forbid:
11731 + forbid_overlap = True
11732 + break
11733 + if forbid_overlap and running_root == task.root:
11734 + continue
11735 +
11736 + if heuristic_overlap and running_root == task.root:
11737 + # Never uninstall sys-apps/portage or it's essential
11738 + # dependencies, except through replacement.
11739 + try:
11740 + runtime_dep_atoms = \
11741 + list(runtime_deps.iterAtomsForPackage(task))
11742 + except portage.exception.InvalidDependString, e:
11743 + portage.writemsg("!!! Invalid PROVIDE in " + \
11744 + "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
11745 + (task.root, task.cpv, e), noiselevel=-1)
11746 + del e
11747 + continue
11748 +
11749 + # Don't uninstall a runtime dep if it appears
11750 + # to be the only suitable one installed.
11751 + skip = False
11752 + vardb = root_config.trees["vartree"].dbapi
11753 + for atom in runtime_dep_atoms:
11754 + other_version = None
11755 + for pkg in vardb.match_pkgs(atom):
11756 + if pkg.cpv == task.cpv and \
11757 + pkg.metadata["COUNTER"] == \
11758 + task.metadata["COUNTER"]:
11759 + continue
11760 + other_version = pkg
11761 + break
11762 + if other_version is None:
11763 + skip = True
11764 + break
11765 + if skip:
11766 + continue
11767 +
11768 + # For packages in the system set, don't take
11769 + # any chances. If the conflict can't be resolved
11770 + # by a normal replacement operation then abort.
11771 + skip = False
11772 + try:
11773 + for atom in root_config.sets[
11774 + "system"].iterAtomsForPackage(task):
11775 + skip = True
11776 + break
11777 + except portage.exception.InvalidDependString, e:
11778 + portage.writemsg("!!! Invalid PROVIDE in " + \
11779 + "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
11780 + (task.root, task.cpv, e), noiselevel=-1)
11781 + del e
11782 + skip = True
11783 + if skip:
11784 + continue
11785 +
11786 + # Note that the world check isn't always
11787 + # necessary since self._complete_graph() will
11788 + # add all packages from the system and world sets to the
11789 + # graph. This just allows unresolved conflicts to be
11790 + # detected as early as possible, which makes it possible
11791 + # to avoid calling self._complete_graph() when it is
11792 + # unnecessary due to blockers triggering an abortion.
11793 + if not complete:
11794 + # For packages in the world set, go ahead an uninstall
11795 + # when necessary, as long as the atom will be satisfied
11796 + # in the final state.
11797 + graph_db = self.mydbapi[task.root]
11798 + skip = False
11799 + try:
11800 + for atom in root_config.sets[
11801 + "world"].iterAtomsForPackage(task):
11802 + satisfied = False
11803 + for pkg in graph_db.match_pkgs(atom):
11804 + if pkg == inst_pkg:
11805 + continue
11806 + satisfied = True
11807 + break
11808 + if not satisfied:
11809 + skip = True
11810 + self._blocked_world_pkgs[inst_pkg] = atom
11811 + break
11812 + except portage.exception.InvalidDependString, e:
11813 + portage.writemsg("!!! Invalid PROVIDE in " + \
11814 + "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
11815 + (task.root, task.cpv, e), noiselevel=-1)
11816 + del e
11817 + skip = True
11818 + if skip:
11819 + continue
11820 +
11821 + # Check the deps of parent nodes to ensure that
11822 + # the chosen task produces a leaf node. Maybe
11823 + # this can be optimized some more to make the
11824 + # best possible choice, but the current algorithm
11825 + # is simple and should be near optimal for most
11826 + # common cases.
11827 + mergeable_parent = False
11828 + parent_deps = set()
11829 + for parent in mygraph.parent_nodes(task):
11830 + parent_deps.update(mygraph.child_nodes(parent,
11831 + ignore_priority=priority_range.ignore_medium_soft))
11832 + if parent in mergeable_nodes and \
11833 + gather_deps(ignore_uninst_or_med_soft,
11834 + mergeable_nodes, set(), parent):
11835 + mergeable_parent = True
11836 +
11837 + if not mergeable_parent:
11838 + continue
11839 +
11840 + parent_deps.remove(task)
11841 + if min_parent_deps is None or \
11842 + len(parent_deps) < min_parent_deps:
11843 + min_parent_deps = len(parent_deps)
11844 + uninst_task = task
11845 +
11846 + if uninst_task is not None:
11847 + # The uninstall is performed only after blocking
11848 + # packages have been merged on top of it. File
11849 + # collisions between blocking packages are detected
11850 + # and removed from the list of files to be uninstalled.
11851 + scheduled_uninstalls.add(uninst_task)
11852 + parent_nodes = mygraph.parent_nodes(uninst_task)
11853 +
11854 + # Reverse the parent -> uninstall edges since we want
11855 + # to do the uninstall after blocking packages have
11856 + # been merged on top of it.
11857 + mygraph.remove(uninst_task)
11858 + for blocked_pkg in parent_nodes:
11859 + mygraph.add(blocked_pkg, uninst_task,
11860 + priority=BlockerDepPriority.instance)
11861 + scheduler_graph.remove_edge(uninst_task, blocked_pkg)
11862 + scheduler_graph.add(blocked_pkg, uninst_task,
11863 + priority=BlockerDepPriority.instance)
11864 +
11865 + # Reset the state variables for leaf node selection and
11866 + # continue trying to select leaf nodes.
11867 + prefer_asap = True
11868 + drop_satisfied = False
11869 + continue
11870 +
11871 + if not selected_nodes:
11872 + # Only select root nodes as a last resort. This case should
11873 + # only trigger when the graph is nearly empty and the only
11874 + # remaining nodes are isolated (no parents or children). Since
11875 + # the nodes must be isolated, ignore_priority is not needed.
11876 + selected_nodes = get_nodes()
11877 +
11878 + if not selected_nodes and not drop_satisfied:
11879 + drop_satisfied = True
11880 + continue
11881 +
11882 + if not selected_nodes and not myblocker_uninstalls.is_empty():
11883 + # If possible, drop an uninstall task here in order to avoid
11884 + # the circular deps code path. The corresponding blocker will
11885 + # still be counted as an unresolved conflict.
11886 + uninst_task = None
11887 + for node in myblocker_uninstalls.leaf_nodes():
11888 + try:
11889 + mygraph.remove(node)
11890 + except KeyError:
11891 + pass
11892 + else:
11893 + uninst_task = node
11894 + ignored_uninstall_tasks.add(node)
11895 + break
11896 +
11897 + if uninst_task is not None:
11898 + # Reset the state variables for leaf node selection and
11899 + # continue trying to select leaf nodes.
11900 + prefer_asap = True
11901 + drop_satisfied = False
11902 + continue
11903 +
11904 + if not selected_nodes:
11905 + self._circular_deps_for_display = mygraph
11906 + raise self._unknown_internal_error()
11907 +
11908 + # At this point, we've succeeded in selecting one or more nodes, so
11909 + # reset state variables for leaf node selection.
11910 + prefer_asap = True
11911 + drop_satisfied = False
11912 +
11913 + mygraph.difference_update(selected_nodes)
11914 +
11915 + for node in selected_nodes:
11916 + if isinstance(node, Package) and \
11917 + node.operation == "nomerge":
11918 + continue
11919 +
11920 + # Handle interactions between blockers
11921 + # and uninstallation tasks.
11922 + solved_blockers = set()
11923 + uninst_task = None
11924 + if isinstance(node, Package) and \
11925 + "uninstall" == node.operation:
11926 + have_uninstall_task = True
11927 + uninst_task = node
11928 + else:
11929 + vardb = self.trees[node.root]["vartree"].dbapi
11930 + previous_cpv = vardb.match(node.slot_atom)
11931 + if previous_cpv:
11932 + # The package will be replaced by this one, so remove
11933 + # the corresponding Uninstall task if necessary.
11934 + previous_cpv = previous_cpv[0]
11935 + uninst_task = \
11936 + ("installed", node.root, previous_cpv, "uninstall")
11937 + try:
11938 + mygraph.remove(uninst_task)
11939 + except KeyError:
11940 + pass
11941 +
11942 + if uninst_task is not None and \
11943 + uninst_task not in ignored_uninstall_tasks and \
11944 + myblocker_uninstalls.contains(uninst_task):
11945 + blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
11946 + myblocker_uninstalls.remove(uninst_task)
11947 + # Discard any blockers that this Uninstall solves.
11948 + for blocker in blocker_nodes:
11949 + if not myblocker_uninstalls.child_nodes(blocker):
11950 + myblocker_uninstalls.remove(blocker)
11951 + solved_blockers.add(blocker)
11952 +
11953 + retlist.append(node)
11954 +
11955 + if (isinstance(node, Package) and \
11956 + "uninstall" == node.operation) or \
11957 + (uninst_task is not None and \
11958 + uninst_task in scheduled_uninstalls):
11959 + # Include satisfied blockers in the merge list
11960 + # since the user might be interested and also
11961 + # it serves as an indicator that blocking packages
11962 + # will be temporarily installed simultaneously.
11963 + for blocker in solved_blockers:
11964 + retlist.append(Blocker(atom=blocker.atom,
11965 + root=blocker.root, eapi=blocker.eapi,
11966 + satisfied=True))
11967 +
11968 + unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
11969 + for node in myblocker_uninstalls.root_nodes():
11970 + unsolvable_blockers.add(node)
11971 +
11972 + for blocker in unsolvable_blockers:
11973 + retlist.append(blocker)
11974 +
11975 + # If any Uninstall tasks need to be executed in order
11976 + # to avoid a conflict, complete the graph with any
11977 + # dependencies that may have been initially
11978 + # neglected (to ensure that unsafe Uninstall tasks
11979 + # are properly identified and blocked from execution).
11980 + if have_uninstall_task and \
11981 + not complete and \
11982 + not unsolvable_blockers:
11983 + self.myparams.add("complete")
11984 + raise self._serialize_tasks_retry("")
11985 +
11986 + if unsolvable_blockers and \
11987 + not self._accept_blocker_conflicts():
11988 + self._unsatisfied_blockers_for_display = unsolvable_blockers
11989 + self._serialized_tasks_cache = retlist[:]
11990 + self._scheduler_graph = scheduler_graph
11991 + raise self._unknown_internal_error()
11992 +
11993 + if self._slot_collision_info and \
11994 + not self._accept_blocker_conflicts():
11995 + self._serialized_tasks_cache = retlist[:]
11996 + self._scheduler_graph = scheduler_graph
11997 + raise self._unknown_internal_error()
11998 +
11999 + return retlist, scheduler_graph
12000 +
12001 + def _show_circular_deps(self, mygraph):
12002 + # No leaf nodes are available, so we have a circular
12003 + # dependency panic situation. Reduce the noise level to a
12004 + # minimum via repeated elimination of root nodes since they
12005 + # have no parents and thus can not be part of a cycle.
12006 + while True:
12007 + root_nodes = mygraph.root_nodes(
12008 + ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
12009 + if not root_nodes:
12010 + break
12011 + mygraph.difference_update(root_nodes)
12012 + # Display the USE flags that are enabled on nodes that are part
12013 + # of dependency cycles in case that helps the user decide to
12014 + # disable some of them.
12015 + display_order = []
12016 + tempgraph = mygraph.copy()
12017 + while not tempgraph.empty():
12018 + nodes = tempgraph.leaf_nodes()
12019 + if not nodes:
12020 + node = tempgraph.order[0]
12021 + else:
12022 + node = nodes[0]
12023 + display_order.append(node)
12024 + tempgraph.remove(node)
12025 + display_order.reverse()
12026 + self.myopts.pop("--quiet", None)
12027 + self.myopts.pop("--verbose", None)
12028 + self.myopts["--tree"] = True
12029 + portage.writemsg("\n\n", noiselevel=-1)
12030 + self.display(display_order)
12031 + prefix = colorize("BAD", " * ")
12032 + portage.writemsg("\n", noiselevel=-1)
12033 + portage.writemsg(prefix + "Error: circular dependencies:\n",
12034 + noiselevel=-1)
12035 + portage.writemsg("\n", noiselevel=-1)
12036 + mygraph.debug_print()
12037 + portage.writemsg("\n", noiselevel=-1)
12038 + portage.writemsg(prefix + "Note that circular dependencies " + \
12039 + "can often be avoided by temporarily\n", noiselevel=-1)
12040 + portage.writemsg(prefix + "disabling USE flags that trigger " + \
12041 + "optional dependencies.\n", noiselevel=-1)
12042 +
12043 + def _show_merge_list(self):
12044 + if self._serialized_tasks_cache is not None and \
12045 + not (self._displayed_list and \
12046 + (self._displayed_list == self._serialized_tasks_cache or \
12047 + self._displayed_list == \
12048 + list(reversed(self._serialized_tasks_cache)))):
12049 + display_list = self._serialized_tasks_cache[:]
12050 + if "--tree" in self.myopts:
12051 + display_list.reverse()
12052 + self.display(display_list)
12053 +
12054 + def _show_unsatisfied_blockers(self, blockers):
12055 + self._show_merge_list()
12056 + msg = "Error: The above package list contains " + \
12057 + "packages which cannot be installed " + \
12058 + "at the same time on the same system."
12059 + prefix = colorize("BAD", " * ")
12060 + from textwrap import wrap
12061 + portage.writemsg("\n", noiselevel=-1)
12062 + for line in wrap(msg, 70):
12063 + portage.writemsg(prefix + line + "\n", noiselevel=-1)
12064 +
12065 + # Display the conflicting packages along with the packages
12066 + # that pulled them in. This is helpful for troubleshooting
12067 + # cases in which blockers don't solve automatically and
12068 + # the reasons are not apparent from the normal merge list
12069 + # display.
12070 +
12071 + conflict_pkgs = {}
12072 + for blocker in blockers:
12073 + for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
12074 + self._blocker_parents.parent_nodes(blocker)):
12075 + parent_atoms = self._parent_atoms.get(pkg)
12076 + if not parent_atoms:
12077 + atom = self._blocked_world_pkgs.get(pkg)
12078 + if atom is not None:
12079 + parent_atoms = set([("@world", atom)])
12080 + if parent_atoms:
12081 + conflict_pkgs[pkg] = parent_atoms
12082 +
12083 + if conflict_pkgs:
12084 + # Reduce noise by pruning packages that are only
12085 + # pulled in by other conflict packages.
12086 + pruned_pkgs = set()
12087 + for pkg, parent_atoms in conflict_pkgs.iteritems():
12088 + relevant_parent = False
12089 + for parent, atom in parent_atoms:
12090 + if parent not in conflict_pkgs:
12091 + relevant_parent = True
12092 + break
12093 + if not relevant_parent:
12094 + pruned_pkgs.add(pkg)
12095 + for pkg in pruned_pkgs:
12096 + del conflict_pkgs[pkg]
12097 +
12098 + if conflict_pkgs:
12099 + msg = []
12100 + msg.append("\n")
12101 + indent = " "
12102 + # Max number of parents shown, to avoid flooding the display.
12103 + max_parents = 3
12104 + for pkg, parent_atoms in conflict_pkgs.iteritems():
12105 +
12106 + pruned_list = set()
12107 +
12108 + # Prefer packages that are not directly involved in a conflict.
12109 + for parent_atom in parent_atoms:
12110 + if len(pruned_list) >= max_parents:
12111 + break
12112 + parent, atom = parent_atom
12113 + if parent not in conflict_pkgs:
12114 + pruned_list.add(parent_atom)
12115 +
12116 + for parent_atom in parent_atoms:
12117 + if len(pruned_list) >= max_parents:
12118 + break
12119 + pruned_list.add(parent_atom)
12120 +
12121 + omitted_parents = len(parent_atoms) - len(pruned_list)
12122 + msg.append(indent + "%s pulled in by\n" % pkg)
12123 +
12124 + for parent_atom in pruned_list:
12125 + parent, atom = parent_atom
12126 + msg.append(2*indent)
12127 + if isinstance(parent,
12128 + (PackageArg, AtomArg)):
12129 + # For PackageArg and AtomArg types, it's
12130 + # redundant to display the atom attribute.
12131 + msg.append(str(parent))
12132 + else:
12133 + # Display the specific atom from SetArg or
12134 + # Package types.
12135 + msg.append("%s required by %s" % (atom, parent))
12136 + msg.append("\n")
12137 +
12138 + if omitted_parents:
12139 + msg.append(2*indent)
12140 + msg.append("(and %d more)\n" % omitted_parents)
12141 +
12142 + msg.append("\n")
12143 +
12144 + sys.stderr.write("".join(msg))
12145 + sys.stderr.flush()
12146 +
12147 + if "--quiet" not in self.myopts:
12148 + show_blocker_docs_link()
12149 +
12150 + def display(self, mylist, favorites=[], verbosity=None):
12151 +
12152 + # This is used to prevent display_problems() from
12153 + # redundantly displaying this exact same merge list
12154 + # again via _show_merge_list().
12155 + self._displayed_list = mylist
12156 +
12157 + if verbosity is None:
12158 + verbosity = ("--quiet" in self.myopts and 1 or \
12159 + "--verbose" in self.myopts and 3 or 2)
12160 + favorites_set = InternalPackageSet(favorites)
12161 + oneshot = "--oneshot" in self.myopts or \
12162 + "--onlydeps" in self.myopts
12163 + columns = "--columns" in self.myopts
12164 + changelogs=[]
12165 + p=[]
12166 + blockers = []
12167 +
12168 + counters = PackageCounters()
12169 +
12170 + if verbosity == 1 and "--verbose" not in self.myopts:
12171 + def create_use_string(*args):
12172 + return ""
12173 + else:
12174 + def create_use_string(name, cur_iuse, iuse_forced, cur_use,
12175 + old_iuse, old_use,
12176 + is_new, reinst_flags,
12177 + all_flags=(verbosity == 3 or "--quiet" in self.myopts),
12178 + alphabetical=("--alphabetical" in self.myopts)):
12179 + enabled = []
12180 + if alphabetical:
12181 + disabled = enabled
12182 + removed = enabled
12183 + else:
12184 + disabled = []
12185 + removed = []
12186 + cur_iuse = set(cur_iuse)
12187 + enabled_flags = cur_iuse.intersection(cur_use)
12188 + removed_iuse = set(old_iuse).difference(cur_iuse)
12189 + any_iuse = cur_iuse.union(old_iuse)
12190 + any_iuse = list(any_iuse)
12191 + any_iuse.sort()
12192 + for flag in any_iuse:
12193 + flag_str = None
12194 + isEnabled = False
12195 + reinst_flag = reinst_flags and flag in reinst_flags
12196 + if flag in enabled_flags:
12197 + isEnabled = True
12198 + if is_new or flag in old_use and \
12199 + (all_flags or reinst_flag):
12200 + flag_str = red(flag)
12201 + elif flag not in old_iuse:
12202 + flag_str = yellow(flag) + "%*"
12203 + elif flag not in old_use:
12204 + flag_str = green(flag) + "*"
12205 + elif flag in removed_iuse:
12206 + if all_flags or reinst_flag:
12207 + flag_str = yellow("-" + flag) + "%"
12208 + if flag in old_use:
12209 + flag_str += "*"
12210 + flag_str = "(" + flag_str + ")"
12211 + removed.append(flag_str)
12212 + continue
12213 + else:
12214 + if is_new or flag in old_iuse and \
12215 + flag not in old_use and \
12216 + (all_flags or reinst_flag):
12217 + flag_str = blue("-" + flag)
12218 + elif flag not in old_iuse:
12219 + flag_str = yellow("-" + flag)
12220 + if flag not in iuse_forced:
12221 + flag_str += "%"
12222 + elif flag in old_use:
12223 + flag_str = green("-" + flag) + "*"
12224 + if flag_str:
12225 + if flag in iuse_forced:
12226 + flag_str = "(" + flag_str + ")"
12227 + if isEnabled:
12228 + enabled.append(flag_str)
12229 + else:
12230 + disabled.append(flag_str)
12231 +
12232 + if alphabetical:
12233 + ret = " ".join(enabled)
12234 + else:
12235 + ret = " ".join(enabled + disabled + removed)
12236 + if ret:
12237 + ret = '%s="%s" ' % (name, ret)
12238 + return ret
12239 +
12240 + repo_display = RepoDisplay(self.roots)
12241 +
12242 + tree_nodes = []
12243 + display_list = []
12244 + mygraph = self.digraph.copy()
12245 +
12246 + # If there are any Uninstall instances, add the corresponding
12247 + # blockers to the digraph (useful for --tree display).
12248 +
12249 + executed_uninstalls = set(node for node in mylist \
12250 + if isinstance(node, Package) and node.operation == "unmerge")
12251 +
12252 + for uninstall in self._blocker_uninstalls.leaf_nodes():
12253 + uninstall_parents = \
12254 + self._blocker_uninstalls.parent_nodes(uninstall)
12255 + if not uninstall_parents:
12256 + continue
12257 +
12258 + # Remove the corresponding "nomerge" node and substitute
12259 + # the Uninstall node.
12260 + inst_pkg = self._pkg_cache[
12261 + ("installed", uninstall.root, uninstall.cpv, "nomerge")]
12262 + try:
12263 + mygraph.remove(inst_pkg)
12264 + except KeyError:
12265 + pass
12266 +
12267 + try:
12268 + inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
12269 + except KeyError:
12270 + inst_pkg_blockers = []
12271 +
12272 + # Break the Package -> Uninstall edges.
12273 + mygraph.remove(uninstall)
12274 +
12275 + # Resolution of a package's blockers
12276 + # depend on it's own uninstallation.
12277 + for blocker in inst_pkg_blockers:
12278 + mygraph.add(uninstall, blocker)
12279 +
12280 + # Expand Package -> Uninstall edges into
12281 + # Package -> Blocker -> Uninstall edges.
12282 + for blocker in uninstall_parents:
12283 + mygraph.add(uninstall, blocker)
12284 + for parent in self._blocker_parents.parent_nodes(blocker):
12285 + if parent != inst_pkg:
12286 + mygraph.add(blocker, parent)
12287 +
12288 + # If the uninstall task did not need to be executed because
12289 + # of an upgrade, display Blocker -> Upgrade edges since the
12290 + # corresponding Blocker -> Uninstall edges will not be shown.
12291 + upgrade_node = \
12292 + self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
12293 + if upgrade_node is not None and \
12294 + uninstall not in executed_uninstalls:
12295 + for blocker in uninstall_parents:
12296 + mygraph.add(upgrade_node, blocker)
12297 +
12298 + unsatisfied_blockers = []
12299 + i = 0
12300 + depth = 0
12301 + shown_edges = set()
12302 + for x in mylist:
12303 + if isinstance(x, Blocker) and not x.satisfied:
12304 + unsatisfied_blockers.append(x)
12305 + continue
12306 + graph_key = x
12307 + if "--tree" in self.myopts:
12308 + depth = len(tree_nodes)
12309 + while depth and graph_key not in \
12310 + mygraph.child_nodes(tree_nodes[depth-1]):
12311 + depth -= 1
12312 + if depth:
12313 + tree_nodes = tree_nodes[:depth]
12314 + tree_nodes.append(graph_key)
12315 + display_list.append((x, depth, True))
12316 + shown_edges.add((graph_key, tree_nodes[depth-1]))
12317 + else:
12318 + traversed_nodes = set() # prevent endless circles
12319 + traversed_nodes.add(graph_key)
12320 + def add_parents(current_node, ordered):
12321 + parent_nodes = None
12322 + # Do not traverse to parents if this node is an
12323 + # an argument or a direct member of a set that has
12324 + # been specified as an argument (system or world).
12325 + if current_node not in self._set_nodes:
12326 + parent_nodes = mygraph.parent_nodes(current_node)
12327 + if parent_nodes:
12328 + child_nodes = set(mygraph.child_nodes(current_node))
12329 + selected_parent = None
12330 + # First, try to avoid a direct cycle.
12331 + for node in parent_nodes:
12332 + if not isinstance(node, (Blocker, Package)):
12333 + continue
12334 + if node not in traversed_nodes and \
12335 + node not in child_nodes:
12336 + edge = (current_node, node)
12337 + if edge in shown_edges:
12338 + continue
12339 + selected_parent = node
12340 + break
12341 + if not selected_parent:
12342 + # A direct cycle is unavoidable.
12343 + for node in parent_nodes:
12344 + if not isinstance(node, (Blocker, Package)):
12345 + continue
12346 + if node not in traversed_nodes:
12347 + edge = (current_node, node)
12348 + if edge in shown_edges:
12349 + continue
12350 + selected_parent = node
12351 + break
12352 + if selected_parent:
12353 + shown_edges.add((current_node, selected_parent))
12354 + traversed_nodes.add(selected_parent)
12355 + add_parents(selected_parent, False)
12356 + display_list.append((current_node,
12357 + len(tree_nodes), ordered))
12358 + tree_nodes.append(current_node)
12359 + tree_nodes = []
12360 + add_parents(graph_key, True)
12361 + else:
12362 + display_list.append((x, depth, True))
12363 + mylist = display_list
12364 + for x in unsatisfied_blockers:
12365 + mylist.append((x, 0, True))
12366 +
12367 + last_merge_depth = 0
12368 + for i in xrange(len(mylist)-1,-1,-1):
12369 + graph_key, depth, ordered = mylist[i]
12370 + if not ordered and depth == 0 and i > 0 \
12371 + and graph_key == mylist[i-1][0] and \
12372 + mylist[i-1][1] == 0:
12373 + # An ordered node got a consecutive duplicate when the tree was
12374 + # being filled in.
12375 + del mylist[i]
12376 + continue
12377 + if ordered and graph_key[-1] != "nomerge":
12378 + last_merge_depth = depth
12379 + continue
12380 + if depth >= last_merge_depth or \
12381 + i < len(mylist) - 1 and \
12382 + depth >= mylist[i+1][1]:
12383 + del mylist[i]
12384 +
12385 + from portage import flatten
12386 + from portage.dep import use_reduce, paren_reduce
12387 + # files to fetch list - avoids counting a same file twice
12388 + # in size display (verbose mode)
12389 + myfetchlist=[]
12390 +
12391 + # Use this set to detect when all the "repoadd" strings are "[0]"
12392 + # and disable the entire repo display in this case.
12393 + repoadd_set = set()
12394 +
12395 + for mylist_index in xrange(len(mylist)):
12396 + x, depth, ordered = mylist[mylist_index]
12397 + pkg_type = x[0]
12398 + myroot = x[1]
12399 + pkg_key = x[2]
12400 + portdb = self.trees[myroot]["porttree"].dbapi
12401 + bindb = self.trees[myroot]["bintree"].dbapi
12402 + vardb = self.trees[myroot]["vartree"].dbapi
12403 + vartree = self.trees[myroot]["vartree"]
12404 + pkgsettings = self.pkgsettings[myroot]
12405 +
12406 + fetch=" "
12407 + indent = " " * depth
12408 +
12409 + if isinstance(x, Blocker):
12410 + if x.satisfied:
12411 + blocker_style = "PKG_BLOCKER_SATISFIED"
12412 + addl = "%s %s " % (colorize(blocker_style, "b"), fetch)
12413 + else:
12414 + blocker_style = "PKG_BLOCKER"
12415 + addl = "%s %s " % (colorize(blocker_style, "B"), fetch)
12416 + if ordered:
12417 + counters.blocks += 1
12418 + if x.satisfied:
12419 + counters.blocks_satisfied += 1
12420 + resolved = portage.key_expand(
12421 + str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
12422 + if "--columns" in self.myopts and "--quiet" in self.myopts:
12423 + addl += " " + colorize(blocker_style, resolved)
12424 + else:
12425 + addl = "[%s %s] %s%s" % \
12426 + (colorize(blocker_style, "blocks"),
12427 + addl, indent, colorize(blocker_style, resolved))
12428 + block_parents = self._blocker_parents.parent_nodes(x)
12429 + block_parents = set([pnode[2] for pnode in block_parents])
12430 + block_parents = ", ".join(block_parents)
12431 + if resolved!=x[2]:
12432 + addl += colorize(blocker_style,
12433 + " (\"%s\" is blocking %s)") % \
12434 + (str(x.atom).lstrip("!"), block_parents)
12435 + else:
12436 + addl += colorize(blocker_style,
12437 + " (is blocking %s)") % block_parents
12438 + if isinstance(x, Blocker) and x.satisfied:
12439 + if columns:
12440 + continue
12441 + p.append(addl)
12442 + else:
12443 + blockers.append(addl)
12444 + else:
12445 + pkg_status = x[3]
12446 + pkg_merge = ordered and pkg_status == "merge"
12447 + if not pkg_merge and pkg_status == "merge":
12448 + pkg_status = "nomerge"
12449 + built = pkg_type != "ebuild"
12450 + installed = pkg_type == "installed"
12451 + pkg = x
12452 + metadata = pkg.metadata
12453 + ebuild_path = None
12454 + repo_name = metadata["repository"]
12455 + if pkg_type == "ebuild":
12456 + ebuild_path = portdb.findname(pkg_key)
12457 + if not ebuild_path: # shouldn't happen
12458 + raise portage.exception.PackageNotFound(pkg_key)
12459 + repo_path_real = os.path.dirname(os.path.dirname(
12460 + os.path.dirname(ebuild_path)))
12461 + else:
12462 + repo_path_real = portdb.getRepositoryPath(repo_name)
12463 + pkg_use = list(pkg.use.enabled)
12464 + try:
12465 + restrict = flatten(use_reduce(paren_reduce(
12466 + pkg.metadata["RESTRICT"]), uselist=pkg_use))
12467 + except portage.exception.InvalidDependString, e:
12468 + if not pkg.installed:
12469 + show_invalid_depstring_notice(x,
12470 + pkg.metadata["RESTRICT"], str(e))
12471 + del e
12472 + return 1
12473 + restrict = []
12474 + if "ebuild" == pkg_type and x[3] != "nomerge" and \
12475 + "fetch" in restrict:
12476 + fetch = red("F")
12477 + if ordered:
12478 + counters.restrict_fetch += 1
12479 + if portdb.fetch_check(pkg_key, pkg_use):
12480 + fetch = green("f")
12481 + if ordered:
12482 + counters.restrict_fetch_satisfied += 1
12483 +
12484 + #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
12485 + #param is used for -u, where you still *do* want to see when something is being upgraded.
12486 + myoldbest = []
12487 + myinslotlist = None
12488 + installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
12489 + if vardb.cpv_exists(pkg_key):
12490 + addl=" "+yellow("R")+fetch+" "
12491 + if ordered:
12492 + if pkg_merge:
12493 + counters.reinst += 1
12494 + elif pkg_status == "uninstall":
12495 + counters.uninst += 1
12496 + # filter out old-style virtual matches
12497 + elif installed_versions and \
12498 + portage.cpv_getkey(installed_versions[0]) == \
12499 + portage.cpv_getkey(pkg_key):
12500 + myinslotlist = vardb.match(pkg.slot_atom)
12501 + # If this is the first install of a new-style virtual, we
12502 + # need to filter out old-style virtual matches.
12503 + if myinslotlist and \
12504 + portage.cpv_getkey(myinslotlist[0]) != \
12505 + portage.cpv_getkey(pkg_key):
12506 + myinslotlist = None
12507 + if myinslotlist:
12508 + myoldbest = myinslotlist[:]
12509 + addl = " " + fetch
12510 + if not portage.dep.cpvequal(pkg_key,
12511 + portage.best([pkg_key] + myoldbest)):
12512 + # Downgrade in slot
12513 + addl += turquoise("U")+blue("D")
12514 + if ordered:
12515 + counters.downgrades += 1
12516 + else:
12517 + # Update in slot
12518 + addl += turquoise("U") + " "
12519 + if ordered:
12520 + counters.upgrades += 1
12521 + else:
12522 + # New slot, mark it new.
12523 + addl = " " + green("NS") + fetch + " "
12524 + myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
12525 + if ordered:
12526 + counters.newslot += 1
12527 +
12528 + if "--changelog" in self.myopts:
12529 + inst_matches = vardb.match(pkg.slot_atom)
12530 + if inst_matches:
12531 + changelogs.extend(self.calc_changelog(
12532 + portdb.findname(pkg_key),
12533 + inst_matches[0], pkg_key))
12534 + else:
12535 + addl = " " + green("N") + " " + fetch + " "
12536 + if ordered:
12537 + counters.new += 1
12538 +
12539 + verboseadd = ""
12540 + repoadd = None
12541 +
12542 + if True:
12543 + # USE flag display
12544 + forced_flags = set()
12545 + pkgsettings.setcpv(pkg) # for package.use.{mask,force}
12546 + forced_flags.update(pkgsettings.useforce)
12547 + forced_flags.update(pkgsettings.usemask)
12548 +
12549 + cur_use = [flag for flag in pkg.use.enabled \
12550 + if flag in pkg.iuse.all]
12551 + cur_iuse = sorted(pkg.iuse.all)
12552 +
12553 + if myoldbest and myinslotlist:
12554 + previous_cpv = myoldbest[0]
12555 + else:
12556 + previous_cpv = pkg.cpv
12557 + if vardb.cpv_exists(previous_cpv):
12558 + old_iuse, old_use = vardb.aux_get(
12559 + previous_cpv, ["IUSE", "USE"])
12560 + old_iuse = list(set(
12561 + filter_iuse_defaults(old_iuse.split())))
12562 + old_iuse.sort()
12563 + old_use = old_use.split()
12564 + is_new = False
12565 + else:
12566 + old_iuse = []
12567 + old_use = []
12568 + is_new = True
12569 +
12570 + old_use = [flag for flag in old_use if flag in old_iuse]
12571 +
12572 + use_expand = pkgsettings["USE_EXPAND"].lower().split()
12573 + use_expand.sort()
12574 + use_expand.reverse()
12575 + use_expand_hidden = \
12576 + pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
12577 +
12578 + def map_to_use_expand(myvals, forcedFlags=False,
12579 + removeHidden=True):
12580 + ret = {}
12581 + forced = {}
12582 + for exp in use_expand:
12583 + ret[exp] = []
12584 + forced[exp] = set()
12585 + for val in myvals[:]:
12586 + if val.startswith(exp.lower()+"_"):
12587 + if val in forced_flags:
12588 + forced[exp].add(val[len(exp)+1:])
12589 + ret[exp].append(val[len(exp)+1:])
12590 + myvals.remove(val)
12591 + ret["USE"] = myvals
12592 + forced["USE"] = [val for val in myvals \
12593 + if val in forced_flags]
12594 + if removeHidden:
12595 + for exp in use_expand_hidden:
12596 + ret.pop(exp, None)
12597 + if forcedFlags:
12598 + return ret, forced
12599 + return ret
12600 +
12601 + # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
12602 + # are the only thing that triggered reinstallation.
12603 + reinst_flags_map = {}
12604 + reinstall_for_flags = self._reinstall_nodes.get(pkg)
12605 + reinst_expand_map = None
12606 + if reinstall_for_flags:
12607 + reinst_flags_map = map_to_use_expand(
12608 + list(reinstall_for_flags), removeHidden=False)
12609 + for k in list(reinst_flags_map):
12610 + if not reinst_flags_map[k]:
12611 + del reinst_flags_map[k]
12612 + if not reinst_flags_map.get("USE"):
12613 + reinst_expand_map = reinst_flags_map.copy()
12614 + reinst_expand_map.pop("USE", None)
12615 + if reinst_expand_map and \
12616 + not set(reinst_expand_map).difference(
12617 + use_expand_hidden):
12618 + use_expand_hidden = \
12619 + set(use_expand_hidden).difference(
12620 + reinst_expand_map)
12621 +
12622 + cur_iuse_map, iuse_forced = \
12623 + map_to_use_expand(cur_iuse, forcedFlags=True)
12624 + cur_use_map = map_to_use_expand(cur_use)
12625 + old_iuse_map = map_to_use_expand(old_iuse)
12626 + old_use_map = map_to_use_expand(old_use)
12627 +
12628 + use_expand.sort()
12629 + use_expand.insert(0, "USE")
12630 +
12631 + for key in use_expand:
12632 + if key in use_expand_hidden:
12633 + continue
12634 + verboseadd += create_use_string(key.upper(),
12635 + cur_iuse_map[key], iuse_forced[key],
12636 + cur_use_map[key], old_iuse_map[key],
12637 + old_use_map[key], is_new,
12638 + reinst_flags_map.get(key))
12639 +
12640 + if verbosity == 3:
12641 + # size verbose
12642 + mysize=0
12643 + if pkg_type == "ebuild" and pkg_merge:
12644 + try:
12645 + myfilesdict = portdb.getfetchsizes(pkg_key,
12646 + useflags=pkg_use, debug=self.edebug)
12647 + except portage.exception.InvalidDependString, e:
12648 + src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
12649 + show_invalid_depstring_notice(x, src_uri, str(e))
12650 + del e
12651 + return 1
12652 + if myfilesdict is None:
12653 + myfilesdict="[empty/missing/bad digest]"
12654 + else:
12655 + for myfetchfile in myfilesdict:
12656 + if myfetchfile not in myfetchlist:
12657 + mysize+=myfilesdict[myfetchfile]
12658 + myfetchlist.append(myfetchfile)
12659 + if ordered:
12660 + counters.totalsize += mysize
12661 + verboseadd += format_size(mysize)
12662 +
12663 + # overlay verbose
12664 + # assign index for a previous version in the same slot
12665 + has_previous = False
12666 + repo_name_prev = None
12667 + slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
12668 + metadata["SLOT"])
12669 + slot_matches = vardb.match(slot_atom)
12670 + if slot_matches:
12671 + has_previous = True
12672 + repo_name_prev = vardb.aux_get(slot_matches[0],
12673 + ["repository"])[0]
12674 +
12675 + # now use the data to generate output
12676 + if pkg.installed or not has_previous:
12677 + repoadd = repo_display.repoStr(repo_path_real)
12678 + else:
12679 + repo_path_prev = None
12680 + if repo_name_prev:
12681 + repo_path_prev = portdb.getRepositoryPath(
12682 + repo_name_prev)
12683 + if repo_path_prev == repo_path_real:
12684 + repoadd = repo_display.repoStr(repo_path_real)
12685 + else:
12686 + repoadd = "%s=>%s" % (
12687 + repo_display.repoStr(repo_path_prev),
12688 + repo_display.repoStr(repo_path_real))
12689 + if repoadd:
12690 + repoadd_set.add(repoadd)
12691 +
12692 + xs = [portage.cpv_getkey(pkg_key)] + \
12693 + list(portage.catpkgsplit(pkg_key)[2:])
12694 + if xs[2] == "r0":
12695 + xs[2] = ""
12696 + else:
12697 + xs[2] = "-" + xs[2]
12698 +
12699 + mywidth = 130
12700 + if "COLUMNWIDTH" in self.settings:
12701 + try:
12702 + mywidth = int(self.settings["COLUMNWIDTH"])
12703 + except ValueError, e:
12704 + portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
12705 + portage.writemsg(
12706 + "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
12707 + self.settings["COLUMNWIDTH"], noiselevel=-1)
12708 + del e
12709 + oldlp = mywidth - 30
12710 + newlp = oldlp - 30
12711 +
12712 + # Convert myoldbest from a list to a string.
12713 + if not myoldbest:
12714 + myoldbest = ""
12715 + else:
12716 + for pos, key in enumerate(myoldbest):
12717 + key = portage.catpkgsplit(key)[2] + \
12718 + "-" + portage.catpkgsplit(key)[3]
12719 + if key[-3:] == "-r0":
12720 + key = key[:-3]
12721 + myoldbest[pos] = key
12722 + myoldbest = blue("["+", ".join(myoldbest)+"]")
12723 +
12724 + pkg_cp = xs[0]
12725 + root_config = self.roots[myroot]
12726 + system_set = root_config.sets["system"]
12727 + world_set = root_config.sets["world"]
12728 +
12729 + pkg_system = False
12730 + pkg_world = False
12731 + try:
12732 + pkg_system = system_set.findAtomForPackage(pkg)
12733 + pkg_world = world_set.findAtomForPackage(pkg)
12734 + if not (oneshot or pkg_world) and \
12735 + myroot == self.target_root and \
12736 + favorites_set.findAtomForPackage(pkg):
12737 + # Maybe it will be added to world now.
12738 + if create_world_atom(pkg, favorites_set, root_config):
12739 + pkg_world = True
12740 + except portage.exception.InvalidDependString:
12741 + # This is reported elsewhere if relevant.
12742 + pass
12743 +
12744 + def pkgprint(pkg_str):
12745 + if pkg_merge:
12746 + if pkg_system:
12747 + return colorize("PKG_MERGE_SYSTEM", pkg_str)
12748 + elif pkg_world:
12749 + return colorize("PKG_MERGE_WORLD", pkg_str)
12750 + else:
12751 + return colorize("PKG_MERGE", pkg_str)
12752 + elif pkg_status == "uninstall":
12753 + return colorize("PKG_UNINSTALL", pkg_str)
12754 + else:
12755 + if pkg_system:
12756 + return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
12757 + elif pkg_world:
12758 + return colorize("PKG_NOMERGE_WORLD", pkg_str)
12759 + else:
12760 + return colorize("PKG_NOMERGE", pkg_str)
12761 +
12762 + try:
12763 + properties = flatten(use_reduce(paren_reduce(
12764 + pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
12765 + except portage.exception.InvalidDependString, e:
12766 + if not pkg.installed:
12767 + show_invalid_depstring_notice(pkg,
12768 + pkg.metadata["PROPERTIES"], str(e))
12769 + del e
12770 + return 1
12771 + properties = []
12772 + interactive = "interactive" in properties
12773 + if interactive and pkg.operation == "merge":
12774 + addl = colorize("WARN", "I") + addl[1:]
12775 + if ordered:
12776 + counters.interactive += 1
12777 +
12778 + if x[1]!="/":
12779 + if myoldbest:
12780 + myoldbest +=" "
12781 + if "--columns" in self.myopts:
12782 + if "--quiet" in self.myopts:
12783 + myprint=addl+" "+indent+pkgprint(pkg_cp)
12784 + myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
12785 + myprint=myprint+myoldbest
12786 + myprint=myprint+darkgreen("to "+x[1])
12787 + verboseadd = None
12788 + else:
12789 + if not pkg_merge:
12790 + myprint = "[%s] %s%s" % \
12791 + (pkgprint(pkg_status.ljust(13)),
12792 + indent, pkgprint(pkg.cp))
12793 + else:
12794 + myprint = "[%s %s] %s%s" % \
12795 + (pkgprint(pkg.type_name), addl,
12796 + indent, pkgprint(pkg.cp))
12797 + if (newlp-nc_len(myprint)) > 0:
12798 + myprint=myprint+(" "*(newlp-nc_len(myprint)))
12799 + myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
12800 + if (oldlp-nc_len(myprint)) > 0:
12801 + myprint=myprint+" "*(oldlp-nc_len(myprint))
12802 + myprint=myprint+myoldbest
12803 + myprint += darkgreen("to " + pkg.root)
12804 + else:
12805 + if not pkg_merge:
12806 + myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
12807 + else:
12808 + myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
12809 + myprint += indent + pkgprint(pkg_key) + " " + \
12810 + myoldbest + darkgreen("to " + myroot)
12811 + else:
12812 + if "--columns" in self.myopts:
12813 + if "--quiet" in self.myopts:
12814 + myprint=addl+" "+indent+pkgprint(pkg_cp)
12815 + myprint=myprint+" "+green(xs[1]+xs[2])+" "
12816 + myprint=myprint+myoldbest
12817 + verboseadd = None
12818 + else:
12819 + if not pkg_merge:
12820 + myprint = "[%s] %s%s" % \
12821 + (pkgprint(pkg_status.ljust(13)),
12822 + indent, pkgprint(pkg.cp))
12823 + else:
12824 + myprint = "[%s %s] %s%s" % \
12825 + (pkgprint(pkg.type_name), addl,
12826 + indent, pkgprint(pkg.cp))
12827 + if (newlp-nc_len(myprint)) > 0:
12828 + myprint=myprint+(" "*(newlp-nc_len(myprint)))
12829 + myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
12830 + if (oldlp-nc_len(myprint)) > 0:
12831 + myprint=myprint+(" "*(oldlp-nc_len(myprint)))
12832 + myprint += myoldbest
12833 + else:
12834 + if not pkg_merge:
12835 + myprint = "[%s] %s%s %s" % \
12836 + (pkgprint(pkg_status.ljust(13)),
12837 + indent, pkgprint(pkg.cpv),
12838 + myoldbest)
12839 + else:
12840 + myprint = "[%s %s] %s%s %s" % \
12841 + (pkgprint(pkg_type), addl, indent,
12842 + pkgprint(pkg.cpv), myoldbest)
12843 +
12844 + if columns and pkg.operation == "uninstall":
12845 + continue
12846 + p.append((myprint, verboseadd, repoadd))
12847 +
12848 + if "--tree" not in self.myopts and \
12849 + "--quiet" not in self.myopts and \
12850 + not self._opts_no_restart.intersection(self.myopts) and \
12851 + pkg.root == self._running_root.root and \
12852 + portage.match_from_list(
12853 + portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
12854 + not vardb.cpv_exists(pkg.cpv) and \
12855 + "--quiet" not in self.myopts:
12856 + if mylist_index < len(mylist) - 1:
12857 + p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
12858 + p.append(colorize("WARN", " then resume the merge."))
12859 +
12860 + out = sys.stdout
12861 + show_repos = repoadd_set and repoadd_set != set(["0"])
12862 +
12863 + for x in p:
12864 + if isinstance(x, basestring):
12865 + out.write("%s\n" % (x,))
12866 + continue
12867 +
12868 + myprint, verboseadd, repoadd = x
12869 +
12870 + if verboseadd:
12871 + myprint += " " + verboseadd
12872 +
12873 + if show_repos and repoadd:
12874 + myprint += " " + teal("[%s]" % repoadd)
12875 +
12876 + out.write("%s\n" % (myprint,))
12877 +
12878 + for x in blockers:
12879 + print x
12880 +
12881 + if verbosity == 3:
12882 + print
12883 + print counters
12884 + if show_repos:
12885 + sys.stdout.write(str(repo_display))
12886 +
12887 + if "--changelog" in self.myopts:
12888 + print
12889 + for revision,text in changelogs:
12890 + print bold('*'+revision)
12891 + sys.stdout.write(text)
12892 +
12893 + sys.stdout.flush()
12894 + return os.EX_OK
12895 +
12896 + def display_problems(self):
12897 + """
12898 + Display problems with the dependency graph such as slot collisions.
12899 + This is called internally by display() to show the problems _after_
12900 + the merge list where it is most likely to be seen, but if display()
12901 + is not going to be called then this method should be called explicitly
12902 + to ensure that the user is notified of problems with the graph.
12903 +
12904 + All output goes to stderr, except for unsatisfied dependencies which
12905 + go to stdout for parsing by programs such as autounmask.
12906 + """
12907 +
12908 + # Note that show_masked_packages() sends it's output to
12909 + # stdout, and some programs such as autounmask parse the
12910 + # output in cases when emerge bails out. However, when
12911 + # show_masked_packages() is called for installed packages
12912 + # here, the message is a warning that is more appropriate
12913 + # to send to stderr, so temporarily redirect stdout to
12914 + # stderr. TODO: Fix output code so there's a cleaner way
12915 + # to redirect everything to stderr.
12916 + sys.stdout.flush()
12917 + sys.stderr.flush()
12918 + stdout = sys.stdout
12919 + try:
12920 + sys.stdout = sys.stderr
12921 + self._display_problems()
12922 + finally:
12923 + sys.stdout = stdout
12924 + sys.stdout.flush()
12925 + sys.stderr.flush()
12926 +
12927 + # This goes to stdout for parsing by programs like autounmask.
12928 + for pargs, kwargs in self._unsatisfied_deps_for_display:
12929 + self._show_unsatisfied_dep(*pargs, **kwargs)
12930 +
12931 + def _display_problems(self):
12932 + if self._circular_deps_for_display is not None:
12933 + self._show_circular_deps(
12934 + self._circular_deps_for_display)
12935 +
12936 + # The user is only notified of a slot conflict if
12937 + # there are no unresolvable blocker conflicts.
12938 + if self._unsatisfied_blockers_for_display is not None:
12939 + self._show_unsatisfied_blockers(
12940 + self._unsatisfied_blockers_for_display)
12941 + else:
12942 + self._show_slot_collision_notice()
12943 +
12944 + # TODO: Add generic support for "set problem" handlers so that
12945 + # the below warnings aren't special cases for world only.
12946 +
12947 + if self._missing_args:
12948 + world_problems = False
12949 + if "world" in self._sets:
12950 + # Filter out indirect members of world (from nested sets)
12951 + # since only direct members of world are desired here.
12952 + world_set = self.roots[self.target_root].sets["world"]
12953 + for arg, atom in self._missing_args:
12954 + if arg.name == "world" and atom in world_set:
12955 + world_problems = True
12956 + break
12957 +
12958 + if world_problems:
12959 + sys.stderr.write("\n!!! Problems have been " + \
12960 + "detected with your world file\n")
12961 + sys.stderr.write("!!! Please run " + \
12962 + green("emaint --check world")+"\n\n")
12963 +
12964 + if self._missing_args:
12965 + sys.stderr.write("\n" + colorize("BAD", "!!!") + \
12966 + " Ebuilds for the following packages are either all\n")
12967 + sys.stderr.write(colorize("BAD", "!!!") + \
12968 + " masked or don't exist:\n")
12969 + sys.stderr.write(" ".join(str(atom) for arg, atom in \
12970 + self._missing_args) + "\n")
12971 +
12972 + if self._pprovided_args:
12973 + arg_refs = {}
12974 + for arg, atom in self._pprovided_args:
12975 + if isinstance(arg, SetArg):
12976 + parent = arg.name
12977 + arg_atom = (atom, atom)
12978 + else:
12979 + parent = "args"
12980 + arg_atom = (arg.arg, atom)
12981 + refs = arg_refs.setdefault(arg_atom, [])
12982 + if parent not in refs:
12983 + refs.append(parent)
12984 + msg = []
12985 + msg.append(bad("\nWARNING: "))
12986 + if len(self._pprovided_args) > 1:
12987 + msg.append("Requested packages will not be " + \
12988 + "merged because they are listed in\n")
12989 + else:
12990 + msg.append("A requested package will not be " + \
12991 + "merged because it is listed in\n")
12992 + msg.append("package.provided:\n\n")
12993 + problems_sets = set()
12994 + for (arg, atom), refs in arg_refs.iteritems():
12995 + ref_string = ""
12996 + if refs:
12997 + problems_sets.update(refs)
12998 + refs.sort()
12999 + ref_string = ", ".join(["'%s'" % name for name in refs])
13000 + ref_string = " pulled in by " + ref_string
13001 + msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
13002 + msg.append("\n")
13003 + if "world" in problems_sets:
13004 + msg.append("This problem can be solved in one of the following ways:\n\n")
13005 + msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
13006 + msg.append(" B) Uninstall offending packages (cleans them from world).\n")
13007 + msg.append(" C) Remove offending entries from package.provided.\n\n")
13008 + msg.append("The best course of action depends on the reason that an offending\n")
13009 + msg.append("package.provided entry exists.\n\n")
13010 + sys.stderr.write("".join(msg))
13011 +
13012 + masked_packages = []
13013 + for pkg in self._masked_installed:
13014 + root_config = pkg.root_config
13015 + pkgsettings = self.pkgsettings[pkg.root]
13016 + mreasons = get_masking_status(pkg, pkgsettings, root_config)
13017 + masked_packages.append((root_config, pkgsettings,
13018 + pkg.cpv, pkg.metadata, mreasons))
13019 + if masked_packages:
13020 + sys.stderr.write("\n" + colorize("BAD", "!!!") + \
13021 + " The following installed packages are masked:\n")
13022 + show_masked_packages(masked_packages)
13023 + show_mask_docs()
13024 + print
13025 +
13026 + def calc_changelog(self,ebuildpath,current,next):
13027 + if ebuildpath == None or not os.path.exists(ebuildpath):
13028 + return []
13029 + current = '-'.join(portage.catpkgsplit(current)[1:])
13030 + if current.endswith('-r0'):
13031 + current = current[:-3]
13032 + next = '-'.join(portage.catpkgsplit(next)[1:])
13033 + if next.endswith('-r0'):
13034 + next = next[:-3]
13035 + changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
13036 + try:
13037 + changelog = open(changelogpath).read()
13038 + except SystemExit, e:
13039 + raise # Needed else can't exit
13040 + except:
13041 + return []
13042 + divisions = self.find_changelog_tags(changelog)
13043 + #print 'XX from',current,'to',next
13044 + #for div,text in divisions: print 'XX',div
13045 + # skip entries for all revisions above the one we are about to emerge
13046 + for i in range(len(divisions)):
13047 + if divisions[i][0]==next:
13048 + divisions = divisions[i:]
13049 + break
13050 + # find out how many entries we are going to display
13051 + for i in range(len(divisions)):
13052 + if divisions[i][0]==current:
13053 + divisions = divisions[:i]
13054 + break
13055 + else:
13056 + # couldnt find the current revision in the list. display nothing
13057 + return []
13058 + return divisions
13059 +
13060 + def find_changelog_tags(self,changelog):
13061 + divs = []
13062 + release = None
13063 + while 1:
13064 + match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
13065 + if match is None:
13066 + if release is not None:
13067 + divs.append((release,changelog))
13068 + return divs
13069 + if release is not None:
13070 + divs.append((release,changelog[:match.start()]))
13071 + changelog = changelog[match.end():]
13072 + release = match.group(1)
13073 + if release.endswith('.ebuild'):
13074 + release = release[:-7]
13075 + if release.endswith('-r0'):
13076 + release = release[:-3]
13077 +
13078 + def saveNomergeFavorites(self):
13079 + """Find atoms in favorites that are not in the mergelist and add them
13080 + to the world file if necessary."""
13081 + for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
13082 + "--oneshot", "--onlydeps", "--pretend"):
13083 + if x in self.myopts:
13084 + return
13085 + root_config = self.roots[self.target_root]
13086 + world_set = root_config.sets["world"]
13087 +
13088 + world_locked = False
13089 + if hasattr(world_set, "lock"):
13090 + world_set.lock()
13091 + world_locked = True
13092 +
13093 + if hasattr(world_set, "load"):
13094 + world_set.load() # maybe it's changed on disk
13095 +
13096 + args_set = self._sets["args"]
13097 + portdb = self.trees[self.target_root]["porttree"].dbapi
13098 + added_favorites = set()
13099 + for x in self._set_nodes:
13100 + pkg_type, root, pkg_key, pkg_status = x
13101 + if pkg_status != "nomerge":
13102 + continue
13103 +
13104 + try:
13105 + myfavkey = create_world_atom(x, args_set, root_config)
13106 + if myfavkey:
13107 + if myfavkey in added_favorites:
13108 + continue
13109 + added_favorites.add(myfavkey)
13110 + except portage.exception.InvalidDependString, e:
13111 + writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
13112 + (pkg_key, str(e)), noiselevel=-1)
13113 + writemsg("!!! see '%s'\n\n" % os.path.join(
13114 + root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
13115 + del e
13116 + all_added = []
13117 + for k in self._sets:
13118 + if k in ("args", "world") or not root_config.sets[k].world_candidate:
13119 + continue
13120 + s = SETPREFIX + k
13121 + if s in world_set:
13122 + continue
13123 + all_added.append(SETPREFIX + k)
13124 + all_added.extend(added_favorites)
13125 + all_added.sort()
13126 + for a in all_added:
13127 + print ">>> Recording %s in \"world\" favorites file..." % \
13128 + colorize("INFORM", str(a))
13129 + if all_added:
13130 + world_set.update(all_added)
13131 +
13132 + if world_locked:
13133 + world_set.unlock()
13134 +
13135 + def loadResumeCommand(self, resume_data, skip_masked=True,
13136 + skip_missing=True):
13137 + """
13138 + Add a resume command to the graph and validate it in the process. This
13139 + will raise a PackageNotFound exception if a package is not available.
13140 + """
13141 +
13142 + if not isinstance(resume_data, dict):
13143 + return False
13144 +
13145 + mergelist = resume_data.get("mergelist")
13146 + if not isinstance(mergelist, list):
13147 + mergelist = []
13148 +
13149 + fakedb = self.mydbapi
13150 + trees = self.trees
13151 + serialized_tasks = []
13152 + masked_tasks = []
13153 + for x in mergelist:
13154 + if not (isinstance(x, list) and len(x) == 4):
13155 + continue
13156 + pkg_type, myroot, pkg_key, action = x
13157 + if pkg_type not in self.pkg_tree_map:
13158 + continue
13159 + if action != "merge":
13160 + continue
13161 + tree_type = self.pkg_tree_map[pkg_type]
13162 + mydb = trees[myroot][tree_type].dbapi
13163 + db_keys = list(self._trees_orig[myroot][
13164 + tree_type].dbapi._aux_cache_keys)
13165 + try:
13166 + metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
13167 + except KeyError:
13168 + # It does no exist or it is corrupt.
13169 + if action == "uninstall":
13170 + continue
13171 + if skip_missing:
13172 + # TODO: log these somewhere
13173 + continue
13174 + raise portage.exception.PackageNotFound(pkg_key)
13175 + installed = action == "uninstall"
13176 + built = pkg_type != "ebuild"
13177 + root_config = self.roots[myroot]
13178 + pkg = Package(built=built, cpv=pkg_key,
13179 + installed=installed, metadata=metadata,
13180 + operation=action, root_config=root_config,
13181 + type_name=pkg_type)
13182 + if pkg_type == "ebuild":
13183 + pkgsettings = self.pkgsettings[myroot]
13184 + pkgsettings.setcpv(pkg)
13185 + pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
13186 + pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
13187 + self._pkg_cache[pkg] = pkg
13188 +
13189 + root_config = self.roots[pkg.root]
13190 + if "merge" == pkg.operation and \
13191 + not visible(root_config.settings, pkg):
13192 + if skip_masked:
13193 + masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
13194 + else:
13195 + self._unsatisfied_deps_for_display.append(
13196 + ((pkg.root, "="+pkg.cpv), {"myparent":None}))
13197 +
13198 + fakedb[myroot].cpv_inject(pkg)
13199 + serialized_tasks.append(pkg)
13200 + self.spinner.update()
13201 +
13202 + if self._unsatisfied_deps_for_display:
13203 + return False
13204 +
13205 + if not serialized_tasks or "--nodeps" in self.myopts:
13206 + self._serialized_tasks_cache = serialized_tasks
13207 + self._scheduler_graph = self.digraph
13208 + else:
13209 + self._select_package = self._select_pkg_from_graph
13210 + self.myparams.add("selective")
13211 + # Always traverse deep dependencies in order to account for
13212 + # potentially unsatisfied dependencies of installed packages.
13213 + # This is necessary for correct --keep-going or --resume operation
13214 + # in case a package from a group of circularly dependent packages
13215 + # fails. In this case, a package which has recently been installed
13216 + # may have an unsatisfied circular dependency (pulled in by
13217 + # PDEPEND, for example). So, even though a package is already
13218 + # installed, it may not have all of it's dependencies satisfied, so
13219 + # it may not be usable. If such a package is in the subgraph of
13220 + # deep depenedencies of a scheduled build, that build needs to
13221 + # be cancelled. In order for this type of situation to be
13222 + # recognized, deep traversal of dependencies is required.
13223 + self.myparams.add("deep")
13224 +
13225 + favorites = resume_data.get("favorites")
13226 + args_set = self._sets["args"]
13227 + if isinstance(favorites, list):
13228 + args = self._load_favorites(favorites)
13229 + else:
13230 + args = []
13231 +
13232 + for task in serialized_tasks:
13233 + if isinstance(task, Package) and \
13234 + task.operation == "merge":
13235 + if not self._add_pkg(task, None):
13236 + return False
13237 +
13238 + # Packages for argument atoms need to be explicitly
13239 + # added via _add_pkg() so that they are included in the
13240 + # digraph (needed at least for --tree display).
13241 + for arg in args:
13242 + for atom in arg.set:
13243 + pkg, existing_node = self._select_package(
13244 + arg.root_config.root, atom)
13245 + if existing_node is None and \
13246 + pkg is not None:
13247 + if not self._add_pkg(pkg, Dependency(atom=atom,
13248 + root=pkg.root, parent=arg)):
13249 + return False
13250 +
13251 + # Allow unsatisfied deps here to avoid showing a masking
13252 + # message for an unsatisfied dep that isn't necessarily
13253 + # masked.
13254 + if not self._create_graph(allow_unsatisfied=True):
13255 + return False
13256 +
13257 + unsatisfied_deps = []
13258 + for dep in self._unsatisfied_deps:
13259 + if not isinstance(dep.parent, Package):
13260 + continue
13261 + if dep.parent.operation == "merge":
13262 + unsatisfied_deps.append(dep)
13263 + continue
13264 +
13265 + # For unsatisfied deps of installed packages, only account for
13266 + # them if they are in the subgraph of dependencies of a package
13267 + # which is scheduled to be installed.
13268 + unsatisfied_install = False
13269 + traversed = set()
13270 + dep_stack = self.digraph.parent_nodes(dep.parent)
13271 + while dep_stack:
13272 + node = dep_stack.pop()
13273 + if not isinstance(node, Package):
13274 + continue
13275 + if node.operation == "merge":
13276 + unsatisfied_install = True
13277 + break
13278 + if node in traversed:
13279 + continue
13280 + traversed.add(node)
13281 + dep_stack.extend(self.digraph.parent_nodes(node))
13282 +
13283 + if unsatisfied_install:
13284 + unsatisfied_deps.append(dep)
13285 +
13286 + if masked_tasks or unsatisfied_deps:
13287 + # This probably means that a required package
13288 + # was dropped via --skipfirst. It makes the
13289 + # resume list invalid, so convert it to a
13290 + # UnsatisfiedResumeDep exception.
13291 + raise self.UnsatisfiedResumeDep(self,
13292 + masked_tasks + unsatisfied_deps)
13293 + self._serialized_tasks_cache = None
13294 + try:
13295 + self.altlist()
13296 + except self._unknown_internal_error:
13297 + return False
13298 +
13299 + return True
13300 +
13301 + def _load_favorites(self, favorites):
13302 + """
13303 + Use a list of favorites to resume state from a
13304 + previous select_files() call. This creates similar
13305 + DependencyArg instances to those that would have
13306 + been created by the original select_files() call.
13307 + This allows Package instances to be matched with
13308 + DependencyArg instances during graph creation.
13309 + """
13310 + root_config = self.roots[self.target_root]
13311 + getSetAtoms = root_config.setconfig.getSetAtoms
13312 + sets = root_config.sets
13313 + args = []
13314 + for x in favorites:
13315 + if not isinstance(x, basestring):
13316 + continue
13317 + if x in ("system", "world"):
13318 + x = SETPREFIX + x
13319 + if x.startswith(SETPREFIX):
13320 + s = x[len(SETPREFIX):]
13321 + if s not in sets:
13322 + continue
13323 + if s in self._sets:
13324 + continue
13325 + # Recursively expand sets so that containment tests in
13326 + # self._get_parent_sets() properly match atoms in nested
13327 + # sets (like if world contains system).
13328 + expanded_set = InternalPackageSet(
13329 + initial_atoms=getSetAtoms(s))
13330 + self._sets[s] = expanded_set
13331 + args.append(SetArg(arg=x, set=expanded_set,
13332 + root_config=root_config))
13333 + else:
13334 + if not portage.isvalidatom(x):
13335 + continue
13336 + args.append(AtomArg(arg=x, atom=x,
13337 + root_config=root_config))
13338 +
13339 + self._set_args(args)
13340 + return args
13341 +
13342 + class UnsatisfiedResumeDep(portage.exception.PortageException):
13343 + """
13344 + A dependency of a resume list is not installed. This
13345 + can occur when a required package is dropped from the
13346 + merge list via --skipfirst.
13347 + """
13348 + def __init__(self, depgraph, value):
13349 + portage.exception.PortageException.__init__(self, value)
13350 + self.depgraph = depgraph
13351 +
13352 + class _internal_exception(portage.exception.PortageException):
13353 + def __init__(self, value=""):
13354 + portage.exception.PortageException.__init__(self, value)
13355 +
13356 + class _unknown_internal_error(_internal_exception):
13357 + """
13358 + Used by the depgraph internally to terminate graph creation.
13359 + The specific reason for the failure should have been dumped
13360 + to stderr, unfortunately, the exact reason for the failure
13361 + may not be known.
13362 + """
13363 +
13364 + class _serialize_tasks_retry(_internal_exception):
13365 + """
13366 + This is raised by the _serialize_tasks() method when it needs to
13367 + be called again for some reason. The only case that it's currently
13368 + used for is when neglected dependencies need to be added to the
13369 + graph in order to avoid making a potentially unsafe decision.
13370 + """
13371 +
13372 + class _dep_check_composite_db(portage.dbapi):
13373 + """
13374 + A dbapi-like interface that is optimized for use in dep_check() calls.
13375 + This is built on top of the existing depgraph package selection logic.
13376 + Some packages that have been added to the graph may be masked from this
13377 + view in order to influence the atom preference selection that occurs
13378 + via dep_check().
13379 + """
13380 + def __init__(self, depgraph, root):
13381 + portage.dbapi.__init__(self)
13382 + self._depgraph = depgraph
13383 + self._root = root
13384 + self._match_cache = {}
13385 + self._cpv_pkg_map = {}
13386 +
13387 + def _clear_cache(self):
13388 + self._match_cache.clear()
13389 + self._cpv_pkg_map.clear()
13390 +
13391 + def match(self, atom):
13392 + ret = self._match_cache.get(atom)
13393 + if ret is not None:
13394 + return ret[:]
13395 + orig_atom = atom
13396 + if "/" not in atom:
13397 + atom = self._dep_expand(atom)
13398 + pkg, existing = self._depgraph._select_package(self._root, atom)
13399 + if not pkg:
13400 + ret = []
13401 + else:
13402 + # Return the highest available from select_package() as well as
13403 + # any matching slots in the graph db.
13404 + slots = set()
13405 + slots.add(pkg.metadata["SLOT"])
13406 + atom_cp = portage.dep_getkey(atom)
13407 + if pkg.cp.startswith("virtual/"):
13408 + # For new-style virtual lookahead that occurs inside
13409 + # dep_check(), examine all slots. This is needed
13410 + # so that newer slots will not unnecessarily be pulled in
13411 + # when a satisfying lower slot is already installed. For
13412 + # example, if virtual/jdk-1.4 is satisfied via kaffe then
13413 + # there's no need to pull in a newer slot to satisfy a
13414 + # virtual/jdk dependency.
13415 + for db, pkg_type, built, installed, db_keys in \
13416 + self._depgraph._filtered_trees[self._root]["dbs"]:
13417 + for cpv in db.match(atom):
13418 + if portage.cpv_getkey(cpv) != pkg.cp:
13419 + continue
13420 + slots.add(db.aux_get(cpv, ["SLOT"])[0])
13421 + ret = []
13422 + if self._visible(pkg):
13423 + self._cpv_pkg_map[pkg.cpv] = pkg
13424 + ret.append(pkg.cpv)
13425 + slots.remove(pkg.metadata["SLOT"])
13426 + while slots:
13427 + slot_atom = "%s:%s" % (atom_cp, slots.pop())
13428 + pkg, existing = self._depgraph._select_package(
13429 + self._root, slot_atom)
13430 + if not pkg:
13431 + continue
13432 + if not self._visible(pkg):
13433 + continue
13434 + self._cpv_pkg_map[pkg.cpv] = pkg
13435 + ret.append(pkg.cpv)
13436 + if ret:
13437 + self._cpv_sort_ascending(ret)
13438 + self._match_cache[orig_atom] = ret
13439 + return ret[:]
13440 +
13441 + def _visible(self, pkg):
13442 + if pkg.installed and "selective" not in self._depgraph.myparams:
13443 + try:
13444 + arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
13445 + except (StopIteration, portage.exception.InvalidDependString):
13446 + arg = None
13447 + if arg:
13448 + return False
13449 + if pkg.installed:
13450 + try:
13451 + if not visible(
13452 + self._depgraph.pkgsettings[pkg.root], pkg):
13453 + return False
13454 + except portage.exception.InvalidDependString:
13455 + pass
13456 + in_graph = self._depgraph._slot_pkg_map[
13457 + self._root].get(pkg.slot_atom)
13458 + if in_graph is None:
13459 + # Mask choices for packages which are not the highest visible
13460 + # version within their slot (since they usually trigger slot
13461 + # conflicts).
13462 + highest_visible, in_graph = self._depgraph._select_package(
13463 + self._root, pkg.slot_atom)
13464 + if pkg != highest_visible:
13465 + return False
13466 + elif in_graph != pkg:
13467 + # Mask choices for packages that would trigger a slot
13468 + # conflict with a previously selected package.
13469 + return False
13470 + return True
13471 +
13472 + def _dep_expand(self, atom):
13473 + """
13474 + This is only needed for old installed packages that may
13475 + contain atoms that are not fully qualified with a specific
13476 + category. Emulate the cpv_expand() function that's used by
13477 + dbapi.match() in cases like this. If there are multiple
13478 + matches, it's often due to a new-style virtual that has
13479 + been added, so try to filter those out to avoid raising
13480 + a ValueError.
13481 + """
13482 + root_config = self._depgraph.roots[self._root]
13483 + orig_atom = atom
13484 + expanded_atoms = self._depgraph._dep_expand(root_config, atom)
13485 + if len(expanded_atoms) > 1:
13486 + non_virtual_atoms = []
13487 + for x in expanded_atoms:
13488 + if not portage.dep_getkey(x).startswith("virtual/"):
13489 + non_virtual_atoms.append(x)
13490 + if len(non_virtual_atoms) == 1:
13491 + expanded_atoms = non_virtual_atoms
13492 + if len(expanded_atoms) > 1:
13493 + # compatible with portage.cpv_expand()
13494 + raise portage.exception.AmbiguousPackageName(
13495 + [portage.dep_getkey(x) for x in expanded_atoms])
13496 + if expanded_atoms:
13497 + atom = expanded_atoms[0]
13498 + else:
13499 + null_atom = insert_category_into_atom(atom, "null")
13500 + null_cp = portage.dep_getkey(null_atom)
13501 + cat, atom_pn = portage.catsplit(null_cp)
13502 + virts_p = root_config.settings.get_virts_p().get(atom_pn)
13503 + if virts_p:
13504 + # Allow the resolver to choose which virtual.
13505 + atom = insert_category_into_atom(atom, "virtual")
13506 + else:
13507 + atom = insert_category_into_atom(atom, "null")
13508 + return atom
13509 +
13510 + def aux_get(self, cpv, wants):
13511 + metadata = self._cpv_pkg_map[cpv].metadata
13512 + return [metadata.get(x, "") for x in wants]
13513 +
13514 +
13515 +def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
13516 +
13517 + if "--quiet" in myopts:
13518 + print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
13519 + print "!!! one of the following fully-qualified ebuild names instead:\n"
13520 + for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
13521 + print " " + colorize("INFORM", cp)
13522 + return
13523 +
13524 + s = search(root_config, spinner, "--searchdesc" in myopts,
13525 + "--quiet" not in myopts, "--usepkg" in myopts,
13526 + "--usepkgonly" in myopts)
13527 + null_cp = portage.dep_getkey(insert_category_into_atom(
13528 + arg, "null"))
13529 + cat, atom_pn = portage.catsplit(null_cp)
13530 + s.searchkey = atom_pn
13531 + for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
13532 + s.addCP(cp)
13533 + s.output()
13534 + print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
13535 + print "!!! one of the above fully-qualified ebuild names instead.\n"
13536 +
13537 +def insert_category_into_atom(atom, category):
13538 + alphanum = re.search(r'\w', atom)
13539 + if alphanum:
13540 + ret = atom[:alphanum.start()] + "%s/" % category + \
13541 + atom[alphanum.start():]
13542 + else:
13543 + ret = None
13544 + return ret
13545 +
13546 +def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
13547 + """
13548 + Construct a depgraph for the given resume list. This will raise
13549 + PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
13550 + @rtype: tuple
13551 + @returns: (success, depgraph, dropped_tasks)
13552 + """
13553 + skip_masked = True
13554 + skip_unsatisfied = True
13555 + mergelist = mtimedb["resume"]["mergelist"]
13556 + dropped_tasks = set()
13557 + while True:
13558 + mydepgraph = depgraph(settings, trees,
13559 + myopts, myparams, spinner)
13560 + try:
13561 + success = mydepgraph.loadResumeCommand(mtimedb["resume"],
13562 + skip_masked=skip_masked)
13563 + except depgraph.UnsatisfiedResumeDep, e:
13564 + if not skip_unsatisfied:
13565 + raise
13566 +
13567 + graph = mydepgraph.digraph
13568 + unsatisfied_parents = dict((dep.parent, dep.parent) \
13569 + for dep in e.value)
13570 + traversed_nodes = set()
13571 + unsatisfied_stack = list(unsatisfied_parents)
13572 + while unsatisfied_stack:
13573 + pkg = unsatisfied_stack.pop()
13574 + if pkg in traversed_nodes:
13575 + continue
13576 + traversed_nodes.add(pkg)
13577 +
13578 + # If this package was pulled in by a parent
13579 + # package scheduled for merge, removing this
13580 + # package may cause the the parent package's
13581 + # dependency to become unsatisfied.
13582 + for parent_node in graph.parent_nodes(pkg):
13583 + if not isinstance(parent_node, Package) \
13584 + or parent_node.operation not in ("merge", "nomerge"):
13585 + continue
13586 + unsatisfied = \
13587 + graph.child_nodes(parent_node,
13588 + ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
13589 + if pkg in unsatisfied:
13590 + unsatisfied_parents[parent_node] = parent_node
13591 + unsatisfied_stack.append(parent_node)
13592 +
13593 + pruned_mergelist = []
13594 + for x in mergelist:
13595 + if isinstance(x, list) and \
13596 + tuple(x) not in unsatisfied_parents:
13597 + pruned_mergelist.append(x)
13598 +
13599 + # If the mergelist doesn't shrink then this loop is infinite.
13600 + if len(pruned_mergelist) == len(mergelist):
13601 + # This happens if a package can't be dropped because
13602 + # it's already installed, but it has unsatisfied PDEPEND.
13603 + raise
13604 + mergelist[:] = pruned_mergelist
13605 +
13606 + # Exclude installed packages that have been removed from the graph due
13607 + # to failure to build/install runtime dependencies after the dependent
13608 + # package has already been installed.
13609 + dropped_tasks.update(pkg for pkg in \
13610 + unsatisfied_parents if pkg.operation != "nomerge")
13611 + mydepgraph.break_refs(unsatisfied_parents)
13612 +
13613 + del e, graph, traversed_nodes, \
13614 + unsatisfied_parents, unsatisfied_stack
13615 + continue
13616 + else:
13617 + break
13618 + return (success, mydepgraph, dropped_tasks)
13619 +
13620 +def get_mask_info(root_config, cpv, pkgsettings,
13621 + db, pkg_type, built, installed, db_keys):
13622 + eapi_masked = False
13623 + try:
13624 + metadata = dict(izip(db_keys,
13625 + db.aux_get(cpv, db_keys)))
13626 + except KeyError:
13627 + metadata = None
13628 + if metadata and not built:
13629 + pkgsettings.setcpv(cpv, mydb=metadata)
13630 + metadata["USE"] = pkgsettings["PORTAGE_USE"]
13631 + metadata['CHOST'] = pkgsettings.get('CHOST', '')
13632 + if metadata is None:
13633 + mreasons = ["corruption"]
13634 + else:
13635 + eapi = metadata['EAPI']
13636 + if eapi[:1] == '-':
13637 + eapi = eapi[1:]
13638 + if not portage.eapi_is_supported(eapi):
13639 + mreasons = ['EAPI %s' % eapi]
13640 + else:
13641 + pkg = Package(type_name=pkg_type, root_config=root_config,
13642 + cpv=cpv, built=built, installed=installed, metadata=metadata)
13643 + mreasons = get_masking_status(pkg, pkgsettings, root_config)
13644 + return metadata, mreasons
13645 +
13646 +def show_masked_packages(masked_packages):
13647 + shown_licenses = set()
13648 + shown_comments = set()
13649 + # Maybe there is both an ebuild and a binary. Only
13650 + # show one of them to avoid redundant appearance.
13651 + shown_cpvs = set()
13652 + have_eapi_mask = False
13653 + for (root_config, pkgsettings, cpv,
13654 + metadata, mreasons) in masked_packages:
13655 + if cpv in shown_cpvs:
13656 + continue
13657 + shown_cpvs.add(cpv)
13658 + comment, filename = None, None
13659 + if "package.mask" in mreasons:
13660 + comment, filename = \
13661 + portage.getmaskingreason(
13662 + cpv, metadata=metadata,
13663 + settings=pkgsettings,
13664 + portdb=root_config.trees["porttree"].dbapi,
13665 + return_location=True)
13666 + missing_licenses = []
13667 + if metadata:
13668 + if not portage.eapi_is_supported(metadata["EAPI"]):
13669 + have_eapi_mask = True
13670 + try:
13671 + missing_licenses = \
13672 + pkgsettings._getMissingLicenses(
13673 + cpv, metadata)
13674 + except portage.exception.InvalidDependString:
13675 + # This will have already been reported
13676 + # above via mreasons.
13677 + pass
13678 +
13679 + print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
13680 + if comment and comment not in shown_comments:
13681 + print filename+":"
13682 + print comment
13683 + shown_comments.add(comment)
13684 + portdb = root_config.trees["porttree"].dbapi
13685 + for l in missing_licenses:
13686 + l_path = portdb.findLicensePath(l)
13687 + if l in shown_licenses:
13688 + continue
13689 + msg = ("A copy of the '%s' license" + \
13690 + " is located at '%s'.") % (l, l_path)
13691 + print msg
13692 + print
13693 + shown_licenses.add(l)
13694 + return have_eapi_mask
13695 +
13696 +def show_mask_docs():
13697 + print "For more information, see the MASKED PACKAGES section in the emerge"
13698 + print "man page or refer to the Gentoo Handbook."
13699 +
13700 +def filter_iuse_defaults(iuse):
13701 + for flag in iuse:
13702 + if flag.startswith("+") or flag.startswith("-"):
13703 + yield flag[1:]
13704 + else:
13705 + yield flag
13706 +
13707 +def show_blocker_docs_link():
13708 + print
13709 + print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
13710 + print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
13711 + print
13712 + print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
13713 + print
13714 +
13715 +def get_masking_status(pkg, pkgsettings, root_config):
13716 +
13717 + mreasons = portage.getmaskingstatus(
13718 + pkg, settings=pkgsettings,
13719 + portdb=root_config.trees["porttree"].dbapi)
13720 +
13721 + if not pkg.installed:
13722 + if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
13723 + mreasons.append("CHOST: %s" % \
13724 + pkg.metadata["CHOST"])
13725 +
13726 + if pkg.built and not pkg.installed:
13727 + if not "EPREFIX" in pkg.metadata or not pkg.metadata["EPREFIX"]:
13728 + mreasons.append("missing EPREFIX")
13729 + elif len(pkg.metadata["EPREFIX"].strip()) < len(pkgsettings["EPREFIX"]):
13730 + mreasons.append("EPREFIX: '%s' too small" % pkg.metadata["EPREFIX"])
13731 +
13732 + if not pkg.metadata["SLOT"]:
13733 + mreasons.append("invalid: SLOT is undefined")
13734 +
13735 + return mreasons
13736
13737 Copied: main/branches/prefix/pym/_emerge/is_valid_package_atom.py (from rev 13672, main/trunk/pym/_emerge/is_valid_package_atom.py)
13738 ===================================================================
13739 --- main/branches/prefix/pym/_emerge/is_valid_package_atom.py (rev 0)
13740 +++ main/branches/prefix/pym/_emerge/is_valid_package_atom.py 2009-06-27 14:07:14 UTC (rev 13710)
13741 @@ -0,0 +1,17 @@
13742 +import re
13743 +
13744 +# for an explanation on this logic, see pym/_emerge/__init__.py
13745 +import os
13746 +import sys
13747 +if os.environ.__contains__("PORTAGE_PYTHONPATH"):
13748 + sys.path.insert(0, os.environ["PORTAGE_PYTHONPATH"])
13749 +else:
13750 + sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "pym"))
13751 +import portage
13752 +
13753 +def is_valid_package_atom(x):
13754 + if "/" not in x:
13755 + alphanum = re.search(r'\w', x)
13756 + if alphanum:
13757 + x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
13758 + return portage.isvalidatom(x)