Gentoo Archives: gentoo-commits

From: Magnus Granberg <zorry@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] dev/zorry:master commit in: gobs/bin/, gobs/pym/
Date: Thu, 29 Nov 2012 22:22:39
Message-Id: 1354227006.dbdc835ba201fb580985101955725e85f1b83586.zorry@gentoo
1 commit: dbdc835ba201fb580985101955725e85f1b83586
2 Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
3 AuthorDate: Thu Nov 29 22:10:06 2012 +0000
4 Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
5 CommitDate: Thu Nov 29 22:10:06 2012 +0000
6 URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=dbdc835b
7
8 Rework on the db and some code
9
10 ---
11 gobs/bin/gobs_host_jobs | 3 +-
12 gobs/pym/ConnectionManager.py | 3 +-
13 gobs/pym/buildquerydb.py | 2 +-
14 gobs/pym/categories.py | 30 -
15 gobs/pym/depgraph.py | 1637 +++++++++++++++++++++++------------------
16 gobs/pym/jobs.py | 136 ++--
17 gobs/pym/old_cpv.py | 14 +-
18 gobs/pym/package.py | 561 ++++++++-------
19 gobs/pym/pgsql.py | 8 +-
20 gobs/pym/pgsql_querys.py | 308 ++++++++
21 gobs/pym/repoman_gobs.py | 10 +-
22 gobs/pym/text.py | 4 +-
23 gobs/pym/updatedb.py | 129 ++--
24 13 files changed, 1673 insertions(+), 1172 deletions(-)
25
26 diff --git a/gobs/bin/gobs_host_jobs b/gobs/bin/gobs_host_jobs
27 index 5ece453..72683c9 100755
28 --- a/gobs/bin/gobs_host_jobs
29 +++ b/gobs/bin/gobs_host_jobs
30 @@ -1,5 +1,4 @@
31 #!/usr/bin/python
32 -# Copyright 2006-2011 Gentoo Foundation
33 # Distributed under the terms of the GNU General Public License v2
34
35 from __future__ import print_function
36 @@ -15,7 +14,7 @@ from gobs.ConnectionManager import connectionManager
37 CM=connectionManager(gobs_settings_dict)
38 #selectively import the pgsql/mysql querys
39 if CM.getName()=='pgsql':
40 - from gobs.pgsql import *
41 + from gobs.pgsql_querys import add_gobs_logs
42
43 import logging
44 import time
45
46 diff --git a/gobs/pym/ConnectionManager.py b/gobs/pym/ConnectionManager.py
47 index ff0e07f..5e8a763 100644
48 --- a/gobs/pym/ConnectionManager.py
49 +++ b/gobs/pym/ConnectionManager.py
50 @@ -11,6 +11,7 @@ class connectionManager(object):
51 if not cls._instance:
52 cls._instance = super(connectionManager, cls).__new__(cls, *args, **kwargs)
53 #read the sql user/host etc and store it in the local object
54 + cls._backend=settings_dict['sql_backend']
55 cls._host=settings_dict['sql_host']
56 cls._user=settings_dict['sql_user']
57 cls._password=settings_dict['sql_passwd']
58 @@ -21,7 +22,7 @@ class connectionManager(object):
59 cls._connectionNumber=numberOfconnections
60 #always create 1 connection
61 cls._pool=pool.ThreadedConnectionPool(1,cls._connectionNumber,host=cls._host,database=cls._database,user=cls._user,password=cls._password)
62 - cls._name='pgsql'
63 + cls._name=cls._backend
64 except ImportError:
65 print("Please install a recent version of dev-python/psycopg for Python")
66 sys.exit(1)
67
68 diff --git a/gobs/pym/buildquerydb.py b/gobs/pym/buildquerydb.py
69 index e0745e2..abd5ea9 100644
70 --- a/gobs/pym/buildquerydb.py
71 +++ b/gobs/pym/buildquerydb.py
72 @@ -68,7 +68,7 @@ def add_buildquery_main(config_id):
73 log_msg = "Check configs done"
74 add_gobs_logs(conn, log_msg, "info", config_profile)
75 # Get default config from the configs table and default_config=1
76 - default_config_root = "/var/lib/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/" + config_id + "/"
77 + default_config_root = "/var/cache/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/" + config_id + "/"
78 # Set config_root (PORTAGE_CONFIGROOT) to default_config_root
79 mysettings = portage.config(config_root = default_config_root)
80 myportdb = portage.portdbapi(mysettings=mysettings)
81
82 diff --git a/gobs/pym/categories.py b/gobs/pym/categories.py
83 deleted file mode 100644
84 index dae1207..0000000
85 --- a/gobs/pym/categories.py
86 +++ /dev/null
87 @@ -1,30 +0,0 @@
88 -#from gobs.text import gobs_text
89 -from gobs.text import get_file_text
90 -import portage
91 -from gobs.readconf import get_conf_settings
92 -reader=get_conf_settings()
93 -gobs_settings_dict=reader.read_gobs_settings_all()
94 -# make a CM
95 -from gobs.ConnectionManager import connectionManager
96 -CM=connectionManager(gobs_settings_dict)
97 -#selectively import the pgsql/mysql querys
98 -if CM.getName()=='pgsql':
99 - from gobs.pgsql import *
100 -
101 -class gobs_categories(object):
102 -
103 - def __init__(self, mysettings):
104 - self._mysettings = mysettings
105 -
106 - def update_categories_db(self, categories):
107 - conn=CM.getConnection()
108 - # Update categories_meta in the db
109 - categories_dir = self._mysettings['PORTDIR'] + "/" + categories + "/"
110 - categories_metadata_xml_checksum_tree = portage.checksum.sha256hash(categories_dir + "metadata.xml")[0]
111 - categories_metadata_xml_text_tree = get_file_text(categories_dir + "metadata.xml")
112 - categories_metadata_xml_checksum_db = get_categories_checksum_db(conn, categories)
113 - if categories_metadata_xml_checksum_db is None:
114 - add_new_categories_meta_sql(conn,categories, categories_metadata_xml_checksum_tree, categories_metadata_xml_text_tree)
115 - elif categories_metadata_xml_checksum_db != categories_metadata_xml_checksum_tree:
116 - update_categories_meta_sql(conn,categories, categories_metadata_xml_checksum_tree, categories_metadata_xml_text_tree)
117 - CM.putConnection(conn)
118
119 diff --git a/gobs/pym/depgraph.py b/gobs/pym/depgraph.py
120 index 7c46cde..0a6afc8 100644
121 --- a/gobs/pym/depgraph.py
122 +++ b/gobs/pym/depgraph.py
123 @@ -1,6 +1,5 @@
124 # Copyright 1999-2012 Gentoo Foundation
125 # Distributed under the terms of the GNU General Public License v2
126 -# Copy of ../pym/_emerge/depgraph.py from Portage
127
128 from __future__ import print_function
129
130 @@ -19,14 +18,19 @@ from portage import os, OrderedDict
131 from portage import _unicode_decode, _unicode_encode, _encodings
132 from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH
133 from portage.dbapi import dbapi
134 +from portage.dbapi.dep_expand import dep_expand
135 from portage.dep import Atom, best_match_to_list, extract_affecting_use, \
136 - check_required_use, human_readable_required_use, _repo_separator, \
137 - _RequiredUseBranch, _RequiredUseLeaf
138 -from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use
139 -from portage.exception import InvalidAtom, InvalidDependString, PortageException
140 + check_required_use, human_readable_required_use, match_from_list, \
141 + _repo_separator
142 +from portage.dep._slot_operator import ignore_built_slot_operator_deps
143 +from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use, \
144 + _get_eapi_attrs
145 +from portage.exception import (InvalidAtom, InvalidData, InvalidDependString,
146 + PackageNotFound, PortageException)
147 from portage.output import colorize, create_color_func, \
148 darkgreen, green
149 bad = create_color_func("BAD")
150 +from portage.package.ebuild.config import _get_feature_flags
151 from portage.package.ebuild.getmaskingstatus import \
152 _getmaskingstatus, _MaskReason
153 from portage._sets import SETPREFIX
154 @@ -73,6 +77,9 @@ from _emerge.resolver.output import Display
155 if sys.hexversion >= 0x3000000:
156 basestring = str
157 long = int
158 + _unicode = str
159 +else:
160 + _unicode = unicode
161
162 class _scheduler_graph_config(object):
163 def __init__(self, trees, pkg_cache, graph, mergelist):
164 @@ -85,9 +92,9 @@ def _wildcard_set(atoms):
165 pkgs = InternalPackageSet(allow_wildcard=True)
166 for x in atoms:
167 try:
168 - x = Atom(x, allow_wildcard=True)
169 + x = Atom(x, allow_wildcard=True, allow_repo=False)
170 except portage.exception.InvalidAtom:
171 - x = Atom("*/" + x, allow_wildcard=True)
172 + x = Atom("*/" + x, allow_wildcard=True, allow_repo=False)
173 pkgs.add(x)
174 return pkgs
175
176 @@ -110,6 +117,8 @@ class _frozen_depgraph_config(object):
177 self._pkg_cache = {}
178 self._highest_license_masked = {}
179 dynamic_deps = myopts.get("--dynamic-deps", "y") != "n"
180 + ignore_built_slot_operator_deps = myopts.get(
181 + "--ignore-built-slot-operator-deps", "n") == "y"
182 for myroot in trees:
183 self.trees[myroot] = {}
184 # Create a RootConfig instance that references
185 @@ -124,7 +133,8 @@ class _frozen_depgraph_config(object):
186 FakeVartree(trees[myroot]["root_config"],
187 pkg_cache=self._pkg_cache,
188 pkg_root_config=self.roots[myroot],
189 - dynamic_deps=dynamic_deps)
190 + dynamic_deps=dynamic_deps,
191 + ignore_built_slot_operator_deps=ignore_built_slot_operator_deps)
192 self.pkgsettings[myroot] = portage.config(
193 clone=self.trees[myroot]["vartree"].settings)
194
195 @@ -366,12 +376,15 @@ class _dynamic_depgraph_config(object):
196 # This use used to check if we have accounted for blockers
197 # relevant to a package.
198 self._traversed_pkg_deps = set()
199 - self._slot_collision_info = {}
200 + # This should be ordered such that the backtracker will
201 + # attempt to solve conflicts which occurred earlier first,
202 + # since an earlier conflict can be the cause of a conflict
203 + # which occurs later.
204 + self._slot_collision_info = OrderedDict()
205 # Slot collision nodes are not allowed to block other packages since
206 # blocker validation is only able to account for one package per slot.
207 self._slot_collision_nodes = set()
208 self._parent_atoms = {}
209 - self._slot_conflict_parent_atoms = set()
210 self._slot_conflict_handler = None
211 self._circular_dependency_handler = None
212 self._serialized_tasks_cache = None
213 @@ -400,7 +413,9 @@ class _dynamic_depgraph_config(object):
214 self._needed_p_mask_changes = backtrack_parameters.needed_p_mask_changes
215 self._needed_license_changes = backtrack_parameters.needed_license_changes
216 self._needed_use_config_changes = backtrack_parameters.needed_use_config_changes
217 + self._needed_required_use_config_changes = backtrack_parameters.needed_required_use_config_changes
218 self._runtime_pkg_mask = backtrack_parameters.runtime_pkg_mask
219 + self._slot_operator_replace_installed = backtrack_parameters.slot_operator_replace_installed
220 self._need_restart = False
221 # For conditions that always require user intervention, such as
222 # unsatisfied REQUIRED_USE (currently has no autounmask support).
223 @@ -410,6 +425,8 @@ class _dynamic_depgraph_config(object):
224 self._autounmask = depgraph._frozen_config.myopts.get('--autounmask') != 'n'
225 self._success_without_autounmask = False
226 self._traverse_ignored_deps = False
227 + self._complete_mode = False
228 + self._slot_operator_deps = {}
229
230 for myroot in depgraph._frozen_config.trees:
231 self.sets[myroot] = _depgraph_sets()
232 @@ -487,8 +504,6 @@ class _dynamic_depgraph_config(object):
233 class depgraph(object):
234
235 pkg_tree_map = RootConfig.pkg_tree_map
236 -
237 - _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
238
239 def __init__(self, settings, trees, myopts, myparams, spinner,
240 frozen_config=None, backtrack_parameters=BacktrackParameter(), allow_backtracking=False):
241 @@ -521,10 +536,6 @@ class depgraph(object):
242 preload_installed_pkgs = \
243 "--nodeps" not in self._frozen_config.myopts
244
245 - if self._frozen_config.myopts.get("--root-deps") is not None and \
246 - myroot != self._frozen_config.target_root:
247 - continue
248 -
249 fake_vartree = self._frozen_config.trees[myroot]["vartree"]
250 if not fake_vartree.dbapi:
251 # This needs to be called for the first depgraph, but not for
252 @@ -599,11 +610,17 @@ class depgraph(object):
253 "due to non matching USE:\n\n", noiselevel=-1)
254
255 for pkg, flags in self._dynamic_config.ignored_binaries.items():
256 - writemsg(" =%s" % pkg.cpv, noiselevel=-1)
257 + flag_display = []
258 + for flag in sorted(flags):
259 + if flag not in pkg.use.enabled:
260 + flag = "-" + flag
261 + flag_display.append(flag)
262 + flag_display = " ".join(flag_display)
263 + # The user can paste this line into package.use
264 + writemsg(" =%s %s" % (pkg.cpv, flag_display), noiselevel=-1)
265 if pkg.root_config.settings["ROOT"] != "/":
266 - writemsg(" for %s" % (pkg.root,), noiselevel=-1)
267 - writemsg("\n use flag(s): %s\n" % ", ".join(sorted(flags)),
268 - noiselevel=-1)
269 + writemsg(" # for %s" % (pkg.root,), noiselevel=-1)
270 + writemsg("\n", noiselevel=-1)
271
272 msg = [
273 "",
274 @@ -615,7 +632,7 @@ class depgraph(object):
275 for line in msg:
276 if line:
277 line = colorize("INFORM", line)
278 - writemsg_stdout(line + "\n", noiselevel=-1)
279 + writemsg(line + "\n", noiselevel=-1)
280
281 def _show_missed_update(self):
282
283 @@ -801,37 +818,400 @@ class depgraph(object):
284
285 def _process_slot_conflicts(self):
286 """
287 + If there are any slot conflicts and backtracking is enabled,
288 + _complete_graph should complete the graph before this method
289 + is called, so that all relevant reverse dependencies are
290 + available for use in backtracking decisions.
291 + """
292 + for (slot_atom, root), slot_nodes in \
293 + self._dynamic_config._slot_collision_info.items():
294 + self._process_slot_conflict(root, slot_atom, slot_nodes)
295 +
296 + def _process_slot_conflict(self, root, slot_atom, slot_nodes):
297 + """
298 Process slot conflict data to identify specific atoms which
299 lead to conflict. These atoms only match a subset of the
300 packages that have been pulled into a given slot.
301 """
302 - for (slot_atom, root), slot_nodes \
303 - in self._dynamic_config._slot_collision_info.items():
304
305 - all_parent_atoms = set()
306 - for pkg in slot_nodes:
307 - parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
308 - if not parent_atoms:
309 + debug = "--debug" in self._frozen_config.myopts
310 +
311 + slot_parent_atoms = set()
312 + for pkg in slot_nodes:
313 + parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
314 + if not parent_atoms:
315 + continue
316 + slot_parent_atoms.update(parent_atoms)
317 +
318 + conflict_pkgs = []
319 + conflict_atoms = {}
320 + for pkg in slot_nodes:
321 +
322 + if self._dynamic_config._allow_backtracking and \
323 + pkg in self._dynamic_config._runtime_pkg_mask:
324 + if debug:
325 + writemsg_level(
326 + "!!! backtracking loop detected: %s %s\n" % \
327 + (pkg,
328 + self._dynamic_config._runtime_pkg_mask[pkg]),
329 + level=logging.DEBUG, noiselevel=-1)
330 +
331 + parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
332 + if parent_atoms is None:
333 + parent_atoms = set()
334 + self._dynamic_config._parent_atoms[pkg] = parent_atoms
335 +
336 + all_match = True
337 + for parent_atom in slot_parent_atoms:
338 + if parent_atom in parent_atoms:
339 continue
340 - all_parent_atoms.update(parent_atoms)
341 + # Use package set for matching since it will match via
342 + # PROVIDE when necessary, while match_from_list does not.
343 + parent, atom = parent_atom
344 + atom_set = InternalPackageSet(
345 + initial_atoms=(atom,), allow_repo=True)
346 + if atom_set.findAtomForPackage(pkg,
347 + modified_use=self._pkg_use_enabled(pkg)):
348 + parent_atoms.add(parent_atom)
349 + else:
350 + all_match = False
351 + conflict_atoms.setdefault(parent_atom, set()).add(pkg)
352
353 - for pkg in slot_nodes:
354 - parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
355 - if parent_atoms is None:
356 - parent_atoms = set()
357 - self._dynamic_config._parent_atoms[pkg] = parent_atoms
358 - for parent_atom in all_parent_atoms:
359 - if parent_atom in parent_atoms:
360 + if not all_match:
361 + conflict_pkgs.append(pkg)
362 +
363 + if conflict_pkgs and \
364 + self._dynamic_config._allow_backtracking and \
365 + not self._accept_blocker_conflicts():
366 + remaining = []
367 + for pkg in conflict_pkgs:
368 + if self._slot_conflict_backtrack_abi(pkg,
369 + slot_nodes, conflict_atoms):
370 + backtrack_infos = self._dynamic_config._backtrack_infos
371 + config = backtrack_infos.setdefault("config", {})
372 + config.setdefault("slot_conflict_abi", set()).add(pkg)
373 + else:
374 + remaining.append(pkg)
375 + if remaining:
376 + self._slot_confict_backtrack(root, slot_atom,
377 + slot_parent_atoms, remaining)
378 +
379 + def _slot_confict_backtrack(self, root, slot_atom,
380 + all_parents, conflict_pkgs):
381 +
382 + debug = "--debug" in self._frozen_config.myopts
383 + existing_node = self._dynamic_config._slot_pkg_map[root][slot_atom]
384 + backtrack_data = []
385 + # The ordering of backtrack_data can make
386 + # a difference here, because both mask actions may lead
387 + # to valid, but different, solutions and the one with
388 + # 'existing_node' masked is usually the better one. Because
389 + # of that, we choose an order such that
390 + # the backtracker will first explore the choice with
391 + # existing_node masked. The backtracker reverses the
392 + # order, so the order it uses is the reverse of the
393 + # order shown here. See bug #339606.
394 + if existing_node in conflict_pkgs and \
395 + existing_node is not conflict_pkgs[-1]:
396 + conflict_pkgs.remove(existing_node)
397 + conflict_pkgs.append(existing_node)
398 + for to_be_masked in conflict_pkgs:
399 + # For missed update messages, find out which
400 + # atoms matched to_be_selected that did not
401 + # match to_be_masked.
402 + parent_atoms = \
403 + self._dynamic_config._parent_atoms.get(to_be_masked, set())
404 + conflict_atoms = set(parent_atom for parent_atom in all_parents \
405 + if parent_atom not in parent_atoms)
406 + backtrack_data.append((to_be_masked, conflict_atoms))
407 +
408 + if len(backtrack_data) > 1:
409 + # NOTE: Generally, we prefer to mask the higher
410 + # version since this solves common cases in which a
411 + # lower version is needed so that all dependencies
412 + # will be satisfied (bug #337178). However, if
413 + # existing_node happens to be installed then we
414 + # mask that since this is a common case that is
415 + # triggered when --update is not enabled.
416 + if existing_node.installed:
417 + pass
418 + elif any(pkg > existing_node for pkg in conflict_pkgs):
419 + backtrack_data.reverse()
420 +
421 + to_be_masked = backtrack_data[-1][0]
422 +
423 + self._dynamic_config._backtrack_infos.setdefault(
424 + "slot conflict", []).append(backtrack_data)
425 + self._dynamic_config._need_restart = True
426 + if debug:
427 + msg = []
428 + msg.append("")
429 + msg.append("")
430 + msg.append("backtracking due to slot conflict:")
431 + msg.append(" first package: %s" % existing_node)
432 + msg.append(" package to mask: %s" % to_be_masked)
433 + msg.append(" slot: %s" % slot_atom)
434 + msg.append(" parents: %s" % ", ".join( \
435 + "(%s, '%s')" % (ppkg, atom) for ppkg, atom in all_parents))
436 + msg.append("")
437 + writemsg_level("".join("%s\n" % l for l in msg),
438 + noiselevel=-1, level=logging.DEBUG)
439 +
440 + def _slot_conflict_backtrack_abi(self, pkg, slot_nodes, conflict_atoms):
441 + """
442 + If one or more conflict atoms have a slot/sub-slot dep that can be resolved
443 + by rebuilding the parent package, then schedule the rebuild via
444 + backtracking, and return True. Otherwise, return False.
445 + """
446 +
447 + found_update = False
448 + for parent_atom, conflict_pkgs in conflict_atoms.items():
449 + parent, atom = parent_atom
450 + if atom.slot_operator != "=" or not parent.built:
451 + continue
452 +
453 + if pkg not in conflict_pkgs:
454 + continue
455 +
456 + for other_pkg in slot_nodes:
457 + if other_pkg in conflict_pkgs:
458 + continue
459 +
460 + dep = Dependency(atom=atom, child=other_pkg,
461 + parent=parent, root=pkg.root)
462 +
463 + if self._slot_operator_update_probe(dep):
464 + self._slot_operator_update_backtrack(dep)
465 + found_update = True
466 +
467 + return found_update
468 +
469 + def _slot_operator_update_backtrack(self, dep, new_child_slot=None):
470 + if new_child_slot is None:
471 + child = dep.child
472 + else:
473 + child = new_child_slot
474 + if "--debug" in self._frozen_config.myopts:
475 + msg = []
476 + msg.append("")
477 + msg.append("")
478 + msg.append("backtracking due to missed slot abi update:")
479 + msg.append(" child package: %s" % child)
480 + if new_child_slot is not None:
481 + msg.append(" new child slot package: %s" % new_child_slot)
482 + msg.append(" parent package: %s" % dep.parent)
483 + msg.append(" atom: %s" % dep.atom)
484 + msg.append("")
485 + writemsg_level("\n".join(msg),
486 + noiselevel=-1, level=logging.DEBUG)
487 + backtrack_infos = self._dynamic_config._backtrack_infos
488 + config = backtrack_infos.setdefault("config", {})
489 +
490 + # mask unwanted binary packages if necessary
491 + abi_masks = {}
492 + if new_child_slot is None:
493 + if not child.installed:
494 + abi_masks.setdefault(child, {})["slot_operator_mask_built"] = None
495 + if not dep.parent.installed:
496 + abi_masks.setdefault(dep.parent, {})["slot_operator_mask_built"] = None
497 + if abi_masks:
498 + config.setdefault("slot_operator_mask_built", {}).update(abi_masks)
499 +
500 + # trigger replacement of installed packages if necessary
501 + abi_reinstalls = set()
502 + if dep.parent.installed:
503 + abi_reinstalls.add((dep.parent.root, dep.parent.slot_atom))
504 + if new_child_slot is None and child.installed:
505 + abi_reinstalls.add((child.root, child.slot_atom))
506 + if abi_reinstalls:
507 + config.setdefault("slot_operator_replace_installed",
508 + set()).update(abi_reinstalls)
509 +
510 + self._dynamic_config._need_restart = True
511 +
512 + def _slot_operator_update_probe(self, dep, new_child_slot=False):
513 + """
514 + slot/sub-slot := operators tend to prevent updates from getting pulled in,
515 + since installed packages pull in packages with the slot/sub-slot that they
516 + were built against. Detect this case so that we can schedule rebuilds
517 + and reinstalls when appropriate.
518 + NOTE: This function only searches for updates that involve upgrades
519 + to higher versions, since the logic required to detect when a
520 + downgrade would be desirable is not implemented.
521 + """
522 +
523 + if dep.child.installed and \
524 + self._frozen_config.excluded_pkgs.findAtomForPackage(dep.child,
525 + modified_use=self._pkg_use_enabled(dep.child)):
526 + return None
527 +
528 + if dep.parent.installed and \
529 + self._frozen_config.excluded_pkgs.findAtomForPackage(dep.parent,
530 + modified_use=self._pkg_use_enabled(dep.parent)):
531 + return None
532 +
533 + debug = "--debug" in self._frozen_config.myopts
534 + want_downgrade = None
535 +
536 + for replacement_parent in self._iter_similar_available(dep.parent,
537 + dep.parent.slot_atom):
538 +
539 + for atom in replacement_parent.validated_atoms:
540 + if not atom.slot_operator == "=" or \
541 + atom.blocker or \
542 + atom.cp != dep.atom.cp:
543 + continue
544 +
545 + # Discard USE deps, we're only searching for an approximate
546 + # pattern, and dealing with USE states is too complex for
547 + # this purpose.
548 + atom = atom.without_use
549 +
550 + if replacement_parent.built and \
551 + portage.dep._match_slot(atom, dep.child):
552 + # Our selected replacement_parent appears to be built
553 + # for the existing child selection. So, discard this
554 + # parent and search for another.
555 + break
556 +
557 + for pkg in self._iter_similar_available(
558 + dep.child, atom):
559 + if pkg.slot == dep.child.slot and \
560 + pkg.sub_slot == dep.child.sub_slot:
561 + # If slot/sub-slot is identical, then there's
562 + # no point in updating.
563 continue
564 - # Use package set for matching since it will match via
565 - # PROVIDE when necessary, while match_from_list does not.
566 - parent, atom = parent_atom
567 - atom_set = InternalPackageSet(
568 - initial_atoms=(atom,), allow_repo=True)
569 - if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
570 - parent_atoms.add(parent_atom)
571 + if new_child_slot:
572 + if pkg.slot == dep.child.slot:
573 + continue
574 + if pkg < dep.child:
575 + # the new slot only matters if the
576 + # package version is higher
577 + continue
578 else:
579 - self._dynamic_config._slot_conflict_parent_atoms.add(parent_atom)
580 + if pkg.slot != dep.child.slot:
581 + continue
582 + if pkg < dep.child:
583 + if want_downgrade is None:
584 + want_downgrade = self._downgrade_probe(dep.child)
585 + # be careful not to trigger a rebuild when
586 + # the only version available with a
587 + # different slot_operator is an older version
588 + if not want_downgrade:
589 + continue
590 +
591 + if debug:
592 + msg = []
593 + msg.append("")
594 + msg.append("")
595 + msg.append("slot_operator_update_probe:")
596 + msg.append(" existing child package: %s" % dep.child)
597 + msg.append(" existing parent package: %s" % dep.parent)
598 + msg.append(" new child package: %s" % pkg)
599 + msg.append(" new parent package: %s" % replacement_parent)
600 + msg.append("")
601 + writemsg_level("\n".join(msg),
602 + noiselevel=-1, level=logging.DEBUG)
603 +
604 + return pkg
605 +
606 + if debug:
607 + msg = []
608 + msg.append("")
609 + msg.append("")
610 + msg.append("slot_operator_update_probe:")
611 + msg.append(" existing child package: %s" % dep.child)
612 + msg.append(" existing parent package: %s" % dep.parent)
613 + msg.append(" new child package: %s" % None)
614 + msg.append(" new parent package: %s" % None)
615 + msg.append("")
616 + writemsg_level("\n".join(msg),
617 + noiselevel=-1, level=logging.DEBUG)
618 +
619 + return None
620 +
621 + def _downgrade_probe(self, pkg):
622 + """
623 + Detect cases where a downgrade of the given package is considered
624 + desirable due to the current version being masked or unavailable.
625 + """
626 + available_pkg = None
627 + for available_pkg in self._iter_similar_available(pkg,
628 + pkg.slot_atom):
629 + if available_pkg >= pkg:
630 + # There's an available package of the same or higher
631 + # version, so downgrade seems undesirable.
632 + return False
633 +
634 + return available_pkg is not None
635 +
636 + def _iter_similar_available(self, graph_pkg, atom):
637 + """
638 + Given a package that's in the graph, do a rough check to
639 + see if a similar package is available to install. The given
640 + graph_pkg itself may be yielded only if it's not installed.
641 + """
642 +
643 + usepkgonly = "--usepkgonly" in self._frozen_config.myopts
644 + useoldpkg_atoms = self._frozen_config.useoldpkg_atoms
645 + use_ebuild_visibility = self._frozen_config.myopts.get(
646 + '--use-ebuild-visibility', 'n') != 'n'
647 +
648 + for pkg in self._iter_match_pkgs_any(
649 + graph_pkg.root_config, atom):
650 + if pkg.cp != graph_pkg.cp:
651 + # discard old-style virtual match
652 + continue
653 + if pkg.installed:
654 + continue
655 + if pkg in self._dynamic_config._runtime_pkg_mask:
656 + continue
657 + if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
658 + modified_use=self._pkg_use_enabled(pkg)):
659 + continue
660 + if not self._pkg_visibility_check(pkg):
661 + continue
662 + if pkg.built:
663 + if self._equiv_binary_installed(pkg):
664 + continue
665 + if not (not use_ebuild_visibility and
666 + (usepkgonly or useoldpkg_atoms.findAtomForPackage(
667 + pkg, modified_use=self._pkg_use_enabled(pkg)))) and \
668 + not self._equiv_ebuild_visible(pkg):
669 + continue
670 + yield pkg
671 +
672 + def _slot_operator_trigger_reinstalls(self):
673 + """
674 + Search for packages with slot-operator deps on older slots, and schedule
675 + rebuilds if they can link to a newer slot that's in the graph.
676 + """
677 +
678 + rebuild_if_new_slot = self._dynamic_config.myparams.get(
679 + "rebuild_if_new_slot", "y") == "y"
680 +
681 + for slot_key, slot_info in self._dynamic_config._slot_operator_deps.items():
682 +
683 + for dep in slot_info:
684 + if not (dep.child.built and dep.parent and
685 + isinstance(dep.parent, Package) and dep.parent.built):
686 + continue
687 +
688 + # Check for slot update first, since we don't want to
689 + # trigger reinstall of the child package when a newer
690 + # slot will be used instead.
691 + if rebuild_if_new_slot:
692 + new_child = self._slot_operator_update_probe(dep,
693 + new_child_slot=True)
694 + if new_child:
695 + self._slot_operator_update_backtrack(dep,
696 + new_child_slot=new_child)
697 + break
698 +
699 + if dep.want_update:
700 + if self._slot_operator_update_probe(dep):
701 + self._slot_operator_update_backtrack(dep)
702 + break
703
704 def _reinstall_for_flags(self, pkg, forced_flags,
705 orig_use, orig_iuse, cur_use, cur_iuse):
706 @@ -845,18 +1225,22 @@ class depgraph(object):
707 in ("y", "auto"))
708 newuse = "--newuse" in self._frozen_config.myopts
709 changed_use = "changed-use" == self._frozen_config.myopts.get("--reinstall")
710 + feature_flags = _get_feature_flags(
711 + _get_eapi_attrs(pkg.metadata["EAPI"]))
712
713 if newuse or (binpkg_respect_use and not changed_use):
714 flags = set(orig_iuse.symmetric_difference(
715 cur_iuse).difference(forced_flags))
716 flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
717 cur_iuse.intersection(cur_use)))
718 + flags.difference_update(feature_flags)
719 if flags:
720 return flags
721
722 elif changed_use or binpkg_respect_use:
723 - flags = orig_iuse.intersection(orig_use).symmetric_difference(
724 - cur_iuse.intersection(cur_use))
725 + flags = set(orig_iuse.intersection(orig_use).symmetric_difference(
726 + cur_iuse.intersection(cur_use)))
727 + flags.difference_update(feature_flags)
728 if flags:
729 return flags
730 return None
731 @@ -1100,12 +1484,13 @@ class depgraph(object):
732 # package selection, since we want to prompt the user
733 # for USE adjustment rather than have REQUIRED_USE
734 # affect package selection and || dep choices.
735 - """if not pkg.built and pkg.metadata.get("REQUIRED_USE") and \
736 + if not pkg.built and pkg.metadata.get("REQUIRED_USE") and \
737 eapi_has_required_use(pkg.metadata["EAPI"]):
738 required_use_is_sat = check_required_use(
739 pkg.metadata["REQUIRED_USE"],
740 self._pkg_use_enabled(pkg),
741 - pkg.iuse.is_valid_flag)
742 + pkg.iuse.is_valid_flag,
743 + eapi=pkg.metadata["EAPI"])
744 if not required_use_is_sat:
745 if dep.atom is not None and dep.parent is not None:
746 self._add_parent_atom(pkg, (dep.parent, dep.atom))
747 @@ -1119,9 +1504,10 @@ class depgraph(object):
748 if atom is None:
749 atom = Atom("=" + pkg.cpv)
750 self._dynamic_config._unsatisfied_deps_for_display.append(
751 - ((pkg.root, atom), {"myparent":dep.parent}))
752 + ((pkg.root, atom),
753 + {"myparent" : dep.parent, "show_req_use" : pkg}))
754 self._dynamic_config._skip_restart = True
755 - return 0"""
756 + return 0
757
758 if not pkg.onlydeps:
759
760 @@ -1150,121 +1536,7 @@ class depgraph(object):
761 (dep.parent, dep.atom))
762 return 1
763 else:
764 - # A slot conflict has occurred.
765 - # The existing node should not already be in
766 - # runtime_pkg_mask, since that would trigger an
767 - # infinite backtracking loop.
768 - if self._dynamic_config._allow_backtracking and \
769 - existing_node in \
770 - self._dynamic_config._runtime_pkg_mask:
771 - if "--debug" in self._frozen_config.myopts:
772 - writemsg(
773 - "!!! backtracking loop detected: %s %s\n" % \
774 - (existing_node,
775 - self._dynamic_config._runtime_pkg_mask[
776 - existing_node]), noiselevel=-1)
777 - elif self._dynamic_config._allow_backtracking and \
778 - not self._accept_blocker_conflicts() and \
779 - not self.need_restart():
780 -
781 - self._add_slot_conflict(pkg)
782 - if dep.atom is not None and dep.parent is not None:
783 - self._add_parent_atom(pkg, (dep.parent, dep.atom))
784 -
785 - if arg_atoms:
786 - for parent_atom in arg_atoms:
787 - parent, atom = parent_atom
788 - self._add_parent_atom(pkg, parent_atom)
789 - self._process_slot_conflicts()
790 -
791 - backtrack_data = []
792 - fallback_data = []
793 - all_parents = set()
794 - # The ordering of backtrack_data can make
795 - # a difference here, because both mask actions may lead
796 - # to valid, but different, solutions and the one with
797 - # 'existing_node' masked is usually the better one. Because
798 - # of that, we choose an order such that
799 - # the backtracker will first explore the choice with
800 - # existing_node masked. The backtracker reverses the
801 - # order, so the order it uses is the reverse of the
802 - # order shown here. See bug #339606.
803 - for to_be_selected, to_be_masked in (existing_node, pkg), (pkg, existing_node):
804 - # For missed update messages, find out which
805 - # atoms matched to_be_selected that did not
806 - # match to_be_masked.
807 - parent_atoms = \
808 - self._dynamic_config._parent_atoms.get(to_be_selected, set())
809 - if parent_atoms:
810 - conflict_atoms = self._dynamic_config._slot_conflict_parent_atoms.intersection(parent_atoms)
811 - if conflict_atoms:
812 - parent_atoms = conflict_atoms
813 -
814 - all_parents.update(parent_atoms)
815 -
816 - all_match = True
817 - for parent, atom in parent_atoms:
818 - i = InternalPackageSet(initial_atoms=(atom,),
819 - allow_repo=True)
820 - if not i.findAtomForPackage(to_be_masked):
821 - all_match = False
822 - break
823 -
824 - fallback_data.append((to_be_masked, parent_atoms))
825 -
826 - if all_match:
827 - # 'to_be_masked' does not violate any parent atom, which means
828 - # there is no point in masking it.
829 - pass
830 - else:
831 - backtrack_data.append((to_be_masked, parent_atoms))
832 -
833 - if not backtrack_data:
834 - # This shouldn't happen, but fall back to the old
835 - # behavior if this gets triggered somehow.
836 - backtrack_data = fallback_data
837 -
838 - if len(backtrack_data) > 1:
839 - # NOTE: Generally, we prefer to mask the higher
840 - # version since this solves common cases in which a
841 - # lower version is needed so that all dependencies
842 - # will be satisfied (bug #337178). However, if
843 - # existing_node happens to be installed then we
844 - # mask that since this is a common case that is
845 - # triggered when --update is not enabled.
846 - if existing_node.installed:
847 - pass
848 - elif pkg > existing_node:
849 - backtrack_data.reverse()
850 -
851 - to_be_masked = backtrack_data[-1][0]
852 -
853 - self._dynamic_config._backtrack_infos["slot conflict"] = backtrack_data
854 - self._dynamic_config._need_restart = True
855 - if "--debug" in self._frozen_config.myopts:
856 - msg = []
857 - msg.append("")
858 - msg.append("")
859 - msg.append("backtracking due to slot conflict:")
860 - if backtrack_data is fallback_data:
861 - msg.append("!!! backtrack_data fallback")
862 - msg.append(" first package: %s" % existing_node)
863 - msg.append(" second package: %s" % pkg)
864 - msg.append(" package to mask: %s" % to_be_masked)
865 - msg.append(" slot: %s" % pkg.slot_atom)
866 - msg.append(" parents: %s" % ", ".join( \
867 - "(%s, '%s')" % (ppkg, atom) for ppkg, atom in all_parents))
868 - msg.append("")
869 - writemsg_level("".join("%s\n" % l for l in msg),
870 - noiselevel=-1, level=logging.DEBUG)
871 - return 0
872 -
873 - # A slot collision has occurred. Sometimes this coincides
874 - # with unresolvable blockers, so the slot collision will be
875 - # shown later if there are no unresolvable blockers.
876 self._add_slot_conflict(pkg)
877 - slot_collision = True
878 -
879 if debug:
880 writemsg_level(
881 "%s%s %s\n" % ("Slot Conflict:".ljust(15),
882 @@ -1273,6 +1545,8 @@ class depgraph(object):
883 modified_use=self._pkg_use_enabled(existing_node))),
884 level=logging.DEBUG, noiselevel=-1)
885
886 + slot_collision = True
887 +
888 if slot_collision:
889 # Now add this node to the graph so that self.display()
890 # can show use flags and --tree portage.output. This node is
891 @@ -1329,10 +1603,27 @@ class depgraph(object):
892 # Installing package A, we need to make sure package A's deps are met.
893 # emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
894 # If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
895 - if arg_atoms:
896 - depth = 0
897 + if arg_atoms and depth > 0:
898 + for parent, atom in arg_atoms:
899 + if parent.reset_depth:
900 + depth = 0
901 + break
902 +
903 + if previously_added and pkg.depth is not None:
904 + depth = min(pkg.depth, depth)
905 pkg.depth = depth
906 deep = self._dynamic_config.myparams.get("deep", 0)
907 + update = "--update" in self._frozen_config.myopts
908 +
909 + dep.want_update = (not self._dynamic_config._complete_mode and
910 + (arg_atoms or update) and
911 + not (deep is not True and depth > deep))
912 +
913 + dep.child = pkg
914 + if (not pkg.onlydeps and pkg.built and
915 + dep.atom and dep.atom.slot_operator_built):
916 + self._add_slot_operator_dep(dep)
917 +
918 recurse = deep is True or depth + 1 <= deep
919 dep_stack = self._dynamic_config._dep_stack
920 if "recurse" not in self._dynamic_config.myparams:
921 @@ -1364,6 +1655,14 @@ class depgraph(object):
922 self._dynamic_config._parent_atoms[pkg] = parent_atoms
923 parent_atoms.add(parent_atom)
924
925 + def _add_slot_operator_dep(self, dep):
926 + slot_key = (dep.root, dep.child.slot_atom)
927 + slot_info = self._dynamic_config._slot_operator_deps.get(slot_key)
928 + if slot_info is None:
929 + slot_info = []
930 + self._dynamic_config._slot_operator_deps[slot_key] = slot_info
931 + slot_info.append(dep)
932 +
933 def _add_slot_conflict(self, pkg):
934 self._dynamic_config._slot_collision_nodes.add(pkg)
935 slot_key = (pkg.slot_atom, pkg.root)
936 @@ -1379,10 +1678,10 @@ class depgraph(object):
937 myroot = pkg.root
938 metadata = pkg.metadata
939 removal_action = "remove" in self._dynamic_config.myparams
940 + eapi_attrs = _get_eapi_attrs(pkg.metadata["EAPI"])
941
942 edepend={}
943 - depkeys = ["DEPEND","RDEPEND","PDEPEND"]
944 - for k in depkeys:
945 + for k in Package._dep_keys:
946 edepend[k] = metadata[k]
947
948 if not pkg.built and \
949 @@ -1409,31 +1708,44 @@ class depgraph(object):
950 # Removal actions never traverse ignored buildtime
951 # dependencies, so it's safe to discard them early.
952 edepend["DEPEND"] = ""
953 + edepend["HDEPEND"] = ""
954 ignore_build_time_deps = True
955
956 + ignore_depend_deps = ignore_build_time_deps
957 + ignore_hdepend_deps = ignore_build_time_deps
958 +
959 if removal_action:
960 depend_root = myroot
961 else:
962 - depend_root = self._frozen_config._running_root.root
963 - root_deps = self._frozen_config.myopts.get("--root-deps")
964 - if root_deps is not None:
965 - if root_deps is True:
966 - depend_root = myroot
967 - elif root_deps == "rdeps":
968 - ignore_build_time_deps = True
969 + if eapi_attrs.hdepend:
970 + depend_root = myroot
971 + else:
972 + depend_root = self._frozen_config._running_root.root
973 + root_deps = self._frozen_config.myopts.get("--root-deps")
974 + if root_deps is not None:
975 + if root_deps is True:
976 + depend_root = myroot
977 + elif root_deps == "rdeps":
978 + ignore_depend_deps = True
979
980 # If rebuild mode is not enabled, it's safe to discard ignored
981 # build-time dependencies. If you want these deps to be traversed
982 # in "complete" mode then you need to specify --with-bdeps=y.
983 - if ignore_build_time_deps and \
984 - not self._rebuild.rebuild:
985 - edepend["DEPEND"] = ""
986 + if not self._rebuild.rebuild:
987 + if ignore_depend_deps:
988 + edepend["DEPEND"] = ""
989 + if ignore_hdepend_deps:
990 + edepend["HDEPEND"] = ""
991
992 deps = (
993 (depend_root, edepend["DEPEND"],
994 self._priority(buildtime=True,
995 - optional=(pkg.built or ignore_build_time_deps),
996 - ignored=ignore_build_time_deps)),
997 + optional=(pkg.built or ignore_depend_deps),
998 + ignored=ignore_depend_deps)),
999 + (self._frozen_config._running_root.root, edepend["HDEPEND"],
1000 + self._priority(buildtime=True,
1001 + optional=(pkg.built or ignore_hdepend_deps),
1002 + ignored=ignore_hdepend_deps)),
1003 (myroot, edepend["RDEPEND"],
1004 self._priority(runtime=True)),
1005 (myroot, edepend["PDEPEND"],
1006 @@ -1455,7 +1767,10 @@ class depgraph(object):
1007
1008 try:
1009 dep_string = portage.dep.use_reduce(dep_string,
1010 - uselist=self._pkg_use_enabled(pkg), is_valid_flag=pkg.iuse.is_valid_flag)
1011 + uselist=self._pkg_use_enabled(pkg),
1012 + is_valid_flag=pkg.iuse.is_valid_flag,
1013 + opconvert=True, token_class=Atom,
1014 + eapi=pkg.metadata['EAPI'])
1015 except portage.exception.InvalidDependString as e:
1016 if not pkg.installed:
1017 # should have been masked before it was selected
1018 @@ -1467,7 +1782,9 @@ class depgraph(object):
1019 # practical to ignore this issue for installed packages.
1020 try:
1021 dep_string = portage.dep.use_reduce(dep_string,
1022 - uselist=self._pkg_use_enabled(pkg))
1023 + uselist=self._pkg_use_enabled(pkg),
1024 + opconvert=True, token_class=Atom,
1025 + eapi=pkg.metadata['EAPI'])
1026 except portage.exception.InvalidDependString as e:
1027 self._dynamic_config._masked_installed.add(pkg)
1028 del e
1029 @@ -1488,9 +1805,6 @@ class depgraph(object):
1030 if not dep_string:
1031 continue
1032
1033 - dep_string = portage.dep.paren_enclose(dep_string,
1034 - unevaluated_atom=True)
1035 -
1036 if not self._add_pkg_dep_string(
1037 pkg, dep_root, dep_priority, dep_string,
1038 allow_unsatisfied):
1039 @@ -1524,7 +1838,9 @@ class depgraph(object):
1040 if debug:
1041 writemsg_level("\nParent: %s\n" % (pkg,),
1042 noiselevel=-1, level=logging.DEBUG)
1043 - writemsg_level("Depstring: %s\n" % (dep_string,),
1044 + dep_repr = portage.dep.paren_enclose(dep_string,
1045 + unevaluated_atom=True, opconvert=True)
1046 + writemsg_level("Depstring: %s\n" % (dep_repr,),
1047 noiselevel=-1, level=logging.DEBUG)
1048 writemsg_level("Priority: %s\n" % (dep_priority,),
1049 noiselevel=-1, level=logging.DEBUG)
1050 @@ -1602,16 +1918,11 @@ class depgraph(object):
1051 self._dynamic_config._slot_pkg_map[dep.child.root].get(
1052 dep.child.slot_atom) is None:
1053 myarg = None
1054 - if dep.root == self._frozen_config.target_root:
1055 - try:
1056 - myarg = next(self._iter_atoms_for_pkg(dep.child))
1057 - except StopIteration:
1058 - pass
1059 - except InvalidDependString:
1060 - if not dep.child.installed:
1061 - # This shouldn't happen since the package
1062 - # should have been masked.
1063 - raise
1064 + try:
1065 + myarg = next(self._iter_atoms_for_pkg(dep.child), None)
1066 + except InvalidDependString:
1067 + if not dep.child.installed:
1068 + raise
1069
1070 if myarg is None:
1071 # Existing child selection may not be valid unless
1072 @@ -1717,14 +2028,11 @@ class depgraph(object):
1073 self._dynamic_config._slot_pkg_map[dep.child.root].get(
1074 dep.child.slot_atom) is None:
1075 myarg = None
1076 - if dep.root == self._frozen_config.target_root:
1077 - try:
1078 - myarg = next(self._iter_atoms_for_pkg(dep.child))
1079 - except StopIteration:
1080 - pass
1081 - except InvalidDependString:
1082 - if not dep.child.installed:
1083 - raise
1084 + try:
1085 + myarg = next(self._iter_atoms_for_pkg(dep.child), None)
1086 + except InvalidDependString:
1087 + if not dep.child.installed:
1088 + raise
1089
1090 if myarg is None:
1091 ignored = True
1092 @@ -1818,9 +2126,14 @@ class depgraph(object):
1093 # Yield ~, =*, < and <= atoms first, since those are more likely to
1094 # cause slot conflicts, and we want those atoms to be displayed
1095 # in the resulting slot conflict message (see bug #291142).
1096 + # Give similar treatment to slot/sub-slot atoms.
1097 conflict_atoms = []
1098 normal_atoms = []
1099 + abi_atoms = []
1100 for atom in cp_atoms:
1101 + if atom.slot_operator_built:
1102 + abi_atoms.append(atom)
1103 + continue
1104 conflict = False
1105 for child_pkg in atom_pkg_graph.child_nodes(atom):
1106 existing_node, matches = \
1107 @@ -1833,7 +2146,7 @@ class depgraph(object):
1108 else:
1109 normal_atoms.append(atom)
1110
1111 - for atom in chain(conflict_atoms, normal_atoms):
1112 + for atom in chain(abi_atoms, conflict_atoms, normal_atoms):
1113 child_pkgs = atom_pkg_graph.child_nodes(atom)
1114 # if more than one child, yield highest version
1115 if len(child_pkgs) > 1:
1116 @@ -1846,34 +2159,22 @@ class depgraph(object):
1117 Yields non-disjunctive deps. Raises InvalidDependString when
1118 necessary.
1119 """
1120 - i = 0
1121 - while i < len(dep_struct):
1122 - x = dep_struct[i]
1123 + for x in dep_struct:
1124 if isinstance(x, list):
1125 - for y in self._queue_disjunctive_deps(
1126 - pkg, dep_root, dep_priority, x):
1127 - yield y
1128 - elif x == "||":
1129 - self._queue_disjunction(pkg, dep_root, dep_priority,
1130 - [ x, dep_struct[ i + 1 ] ] )
1131 - i += 1
1132 + if x and x[0] == "||":
1133 + self._queue_disjunction(pkg, dep_root, dep_priority, [x])
1134 + else:
1135 + for y in self._queue_disjunctive_deps(
1136 + pkg, dep_root, dep_priority, x):
1137 + yield y
1138 else:
1139 - try:
1140 - x = portage.dep.Atom(x, eapi=pkg.metadata["EAPI"])
1141 - except portage.exception.InvalidAtom:
1142 - if not pkg.installed:
1143 - raise portage.exception.InvalidDependString(
1144 - "invalid atom: '%s'" % x)
1145 + # Note: Eventually this will check for PROPERTIES=virtual
1146 + # or whatever other metadata gets implemented for this
1147 + # purpose.
1148 + if x.cp.startswith('virtual/'):
1149 + self._queue_disjunction(pkg, dep_root, dep_priority, [x])
1150 else:
1151 - # Note: Eventually this will check for PROPERTIES=virtual
1152 - # or whatever other metadata gets implemented for this
1153 - # purpose.
1154 - if x.cp.startswith('virtual/'):
1155 - self._queue_disjunction( pkg, dep_root,
1156 - dep_priority, [ str(x) ] )
1157 - else:
1158 - yield str(x)
1159 - i += 1
1160 + yield x
1161
1162 def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
1163 self._dynamic_config._dep_disjunctive_stack.append(
1164 @@ -1886,10 +2187,8 @@ class depgraph(object):
1165 """
1166 pkg, dep_root, dep_priority, dep_struct = \
1167 self._dynamic_config._dep_disjunctive_stack.pop()
1168 - dep_string = portage.dep.paren_enclose(dep_struct,
1169 - unevaluated_atom=True)
1170 if not self._add_pkg_dep_string(
1171 - pkg, dep_root, dep_priority, dep_string, allow_unsatisfied):
1172 + pkg, dep_root, dep_priority, dep_struct, allow_unsatisfied):
1173 return 0
1174 return 1
1175
1176 @@ -1999,8 +2298,18 @@ class depgraph(object):
1177 writemsg("!!! Please ensure the tbz2 exists as specified.\n\n", noiselevel=-1)
1178 return 0, myfavorites
1179 mytbz2=portage.xpak.tbz2(x)
1180 - mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
1181 - if os.path.realpath(x) != \
1182 + mykey = None
1183 + cat = mytbz2.getfile("CATEGORY")
1184 + if cat is not None:
1185 + cat = _unicode_decode(cat.strip(),
1186 + encoding=_encodings['repo.content'])
1187 + mykey = cat + "/" + os.path.basename(x)[:-5]
1188 +
1189 + if mykey is None:
1190 + writemsg(colorize("BAD", "\n*** Package is missing CATEGORY metadata: %s.\n\n" % x), noiselevel=-1)
1191 + self._dynamic_config._skip_restart = True
1192 + return 0, myfavorites
1193 + elif os.path.realpath(x) != \
1194 os.path.realpath(bindb.bintree.getname(mykey)):
1195 writemsg(colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n\n"), noiselevel=-1)
1196 self._dynamic_config._skip_restart = True
1197 @@ -2193,13 +2502,8 @@ class depgraph(object):
1198 return 0, []
1199
1200 for cpv in owners:
1201 - slot = vardb.aux_get(cpv, ["SLOT"])[0]
1202 - if not slot:
1203 - # portage now masks packages with missing slot, but it's
1204 - # possible that one was installed by an older version
1205 - atom = Atom(portage.cpv_getkey(cpv))
1206 - else:
1207 - atom = Atom("%s:%s" % (portage.cpv_getkey(cpv), slot))
1208 + pkg = vardb._pkg_str(cpv, None)
1209 + atom = Atom("%s:%s" % (pkg.cp, pkg.slot))
1210 args.append(AtomArg(arg=atom, atom=atom,
1211 root_config=root_config))
1212
1213 @@ -2247,6 +2551,7 @@ class depgraph(object):
1214 args = revised_greedy_args
1215 del revised_greedy_args
1216
1217 + args.extend(self._gen_reinstall_sets())
1218 self._set_args(args)
1219
1220 myfavorites = set(myfavorites)
1221 @@ -2254,7 +2559,8 @@ class depgraph(object):
1222 if isinstance(arg, (AtomArg, PackageArg)):
1223 myfavorites.add(arg.atom)
1224 elif isinstance(arg, SetArg):
1225 - myfavorites.add(arg.arg)
1226 + if not arg.internal:
1227 + myfavorites.add(arg.arg)
1228 myfavorites = list(myfavorites)
1229
1230 if debug:
1231 @@ -2264,7 +2570,33 @@ class depgraph(object):
1232 self._dynamic_config._initial_arg_list = args[:]
1233
1234 return self._resolve(myfavorites)
1235 -
1236 +
1237 + def _gen_reinstall_sets(self):
1238 +
1239 + atom_list = []
1240 + for root, atom in self._rebuild.rebuild_list:
1241 + atom_list.append((root, '__auto_rebuild__', atom))
1242 + for root, atom in self._rebuild.reinstall_list:
1243 + atom_list.append((root, '__auto_reinstall__', atom))
1244 + for root, atom in self._dynamic_config._slot_operator_replace_installed:
1245 + atom_list.append((root, '__auto_slot_operator_replace_installed__', atom))
1246 +
1247 + set_dict = {}
1248 + for root, set_name, atom in atom_list:
1249 + set_dict.setdefault((root, set_name), []).append(atom)
1250 +
1251 + for (root, set_name), atoms in set_dict.items():
1252 + yield SetArg(arg=(SETPREFIX + set_name),
1253 + # Set reset_depth=False here, since we don't want these
1254 + # special sets to interact with depth calculations (see
1255 + # the emerge --deep=DEPTH option), though we want them
1256 + # to behave like normal arguments in most other respects.
1257 + pset=InternalPackageSet(initial_atoms=atoms),
1258 + force_reinstall=True,
1259 + internal=True,
1260 + reset_depth=False,
1261 + root_config=self._frozen_config.roots[root])
1262 +
1263 def _resolve(self, myfavorites):
1264 """Given self._dynamic_config._initial_arg_list, pull in the root nodes,
1265 call self._creategraph to process theier deps and return
1266 @@ -2276,10 +2608,7 @@ class depgraph(object):
1267 pprovideddict = pkgsettings.pprovideddict
1268 virtuals = pkgsettings.getvirtuals()
1269 args = self._dynamic_config._initial_arg_list[:]
1270 - for root, atom in chain(self._rebuild.rebuild_list,
1271 - self._rebuild.reinstall_list):
1272 - args.append(AtomArg(arg=atom, atom=atom,
1273 - root_config=self._frozen_config.roots[root]))
1274 +
1275 for arg in self._expand_set_args(args, add_to_digraph=True):
1276 for atom in arg.pset.getAtoms():
1277 self._spinner_update()
1278 @@ -2389,22 +2718,12 @@ class depgraph(object):
1279 except self._unknown_internal_error:
1280 return False, myfavorites
1281
1282 - digraph_set = frozenset(self._dynamic_config.digraph)
1283 -
1284 - if digraph_set.intersection(
1285 - self._dynamic_config._needed_unstable_keywords) or \
1286 - digraph_set.intersection(
1287 - self._dynamic_config._needed_p_mask_changes) or \
1288 - digraph_set.intersection(
1289 - self._dynamic_config._needed_use_config_changes) or \
1290 - digraph_set.intersection(
1291 - self._dynamic_config._needed_license_changes) :
1292 - #We failed if the user needs to change the configuration
1293 - self._dynamic_config._success_without_autounmask = True
1294 + if (self._dynamic_config._slot_collision_info and
1295 + not self._accept_blocker_conflicts()) or \
1296 + (self._dynamic_config._allow_backtracking and
1297 + "slot conflict" in self._dynamic_config._backtrack_infos):
1298 return False, myfavorites
1299
1300 - digraph_set = None
1301 -
1302 if self._rebuild.trigger_rebuilds():
1303 backtrack_infos = self._dynamic_config._backtrack_infos
1304 config = backtrack_infos.setdefault("config", {})
1305 @@ -2413,6 +2732,32 @@ class depgraph(object):
1306 self._dynamic_config._need_restart = True
1307 return False, myfavorites
1308
1309 + if "config" in self._dynamic_config._backtrack_infos and \
1310 + ("slot_operator_mask_built" in self._dynamic_config._backtrack_infos["config"] or
1311 + "slot_operator_replace_installed" in self._dynamic_config._backtrack_infos["config"]) and \
1312 + self.need_restart():
1313 + return False, myfavorites
1314 +
1315 + # Any failures except those due to autounmask *alone* should return
1316 + # before this point, since the success_without_autounmask flag that's
1317 + # set below is reserved for cases where there are *zero* other
1318 + # problems. For reference, see backtrack_depgraph, where it skips the
1319 + # get_best_run() call when success_without_autounmask is True.
1320 +
1321 + digraph_nodes = self._dynamic_config.digraph.nodes
1322 +
1323 + if any(x in digraph_nodes for x in
1324 + self._dynamic_config._needed_unstable_keywords) or \
1325 + any(x in digraph_nodes for x in
1326 + self._dynamic_config._needed_p_mask_changes) or \
1327 + any(x in digraph_nodes for x in
1328 + self._dynamic_config._needed_use_config_changes) or \
1329 + any(x in digraph_nodes for x in
1330 + self._dynamic_config._needed_license_changes) :
1331 + #We failed if the user needs to change the configuration
1332 + self._dynamic_config._success_without_autounmask = True
1333 + return False, myfavorites
1334 +
1335 # We're true here unless we are missing binaries.
1336 return (True, myfavorites)
1337
1338 @@ -2485,14 +2830,15 @@ class depgraph(object):
1339 slots = set()
1340 for cpv in vardb.match(atom):
1341 # don't mix new virtuals with old virtuals
1342 - if portage.cpv_getkey(cpv) == highest_pkg.cp:
1343 - slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
1344 + pkg = vardb._pkg_str(cpv, None)
1345 + if pkg.cp == highest_pkg.cp:
1346 + slots.add(pkg.slot)
1347
1348 - slots.add(highest_pkg.metadata["SLOT"])
1349 + slots.add(highest_pkg.slot)
1350 if len(slots) == 1:
1351 return []
1352 greedy_pkgs = []
1353 - slots.remove(highest_pkg.metadata["SLOT"])
1354 + slots.remove(highest_pkg.slot)
1355 while slots:
1356 slot = slots.pop()
1357 slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
1358 @@ -2506,7 +2852,7 @@ class depgraph(object):
1359 return [pkg.slot_atom for pkg in greedy_pkgs]
1360
1361 blockers = {}
1362 - blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
1363 + blocker_dep_keys = Package._dep_keys
1364 for pkg in greedy_pkgs + [highest_pkg]:
1365 dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
1366 try:
1367 @@ -2567,6 +2913,22 @@ class depgraph(object):
1368 """This will raise InvalidDependString if necessary. If trees is
1369 None then self._dynamic_config._filtered_trees is used."""
1370
1371 + if not isinstance(depstring, list):
1372 + eapi = None
1373 + is_valid_flag = None
1374 + if parent is not None:
1375 + eapi = parent.metadata['EAPI']
1376 + if not parent.installed:
1377 + is_valid_flag = parent.iuse.is_valid_flag
1378 + depstring = portage.dep.use_reduce(depstring,
1379 + uselist=myuse, opconvert=True, token_class=Atom,
1380 + is_valid_flag=is_valid_flag, eapi=eapi)
1381 +
1382 + if (self._dynamic_config.myparams.get(
1383 + "ignore_built_slot_operator_deps", "n") == "y" and
1384 + parent and parent.built):
1385 + ignore_built_slot_operator_deps(depstring)
1386 +
1387 pkgsettings = self._frozen_config.pkgsettings[root]
1388 if trees is None:
1389 trees = self._dynamic_config._filtered_trees
1390 @@ -2751,7 +3113,7 @@ class depgraph(object):
1391
1392 if target_atom is not None and isinstance(node, Package):
1393 affecting_use = set()
1394 - for dep_str in "DEPEND", "RDEPEND", "PDEPEND":
1395 + for dep_str in Package._dep_keys:
1396 try:
1397 affecting_use.update(extract_affecting_use(
1398 node.metadata[dep_str], target_atom,
1399 @@ -2832,13 +3194,13 @@ class depgraph(object):
1400 if priorities is None:
1401 # This edge comes from _parent_atoms and was not added to
1402 # the graph, and _parent_atoms does not contain priorities.
1403 - dep_strings.add(node.metadata["DEPEND"])
1404 - dep_strings.add(node.metadata["RDEPEND"])
1405 - dep_strings.add(node.metadata["PDEPEND"])
1406 + for k in Package._dep_keys:
1407 + dep_strings.add(node.metadata[k])
1408 else:
1409 for priority in priorities:
1410 if priority.buildtime:
1411 - dep_strings.add(node.metadata["DEPEND"])
1412 + for k in Package._buildtime_keys:
1413 + dep_strings.add(node.metadata[k])
1414 if priority.runtime:
1415 dep_strings.add(node.metadata["RDEPEND"])
1416 if priority.runtime_post:
1417 @@ -2930,7 +3292,7 @@ class depgraph(object):
1418
1419
1420 def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None,
1421 - check_backtrack=False, check_autounmask_breakage=False):
1422 + check_backtrack=False, check_autounmask_breakage=False, show_req_use=None):
1423 """
1424 When check_backtrack=True, no output is produced and
1425 the method either returns or raises _backtrack_mask if
1426 @@ -3037,7 +3399,8 @@ class depgraph(object):
1427 if not check_required_use(
1428 pkg.metadata["REQUIRED_USE"],
1429 self._pkg_use_enabled(pkg),
1430 - pkg.iuse.is_valid_flag):
1431 + pkg.iuse.is_valid_flag,
1432 + eapi=pkg.metadata["EAPI"]):
1433 required_use_unsatisfied.append(pkg)
1434 continue
1435 root_slot = (pkg.root, pkg.slot_atom)
1436 @@ -3082,7 +3445,7 @@ class depgraph(object):
1437
1438 untouchable_flags = \
1439 frozenset(chain(pkg.use.mask, pkg.use.force))
1440 - if untouchable_flags.intersection(
1441 + if any(x in untouchable_flags for x in
1442 chain(need_enable, need_disable)):
1443 continue
1444
1445 @@ -3096,8 +3459,10 @@ class depgraph(object):
1446 new_use.add(flag)
1447 for flag in need_disable:
1448 new_use.discard(flag)
1449 - if check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
1450 - not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
1451 + if check_required_use(required_use, old_use,
1452 + pkg.iuse.is_valid_flag, eapi=pkg.metadata["EAPI"]) \
1453 + and not check_required_use(required_use, new_use,
1454 + pkg.iuse.is_valid_flag, eapi=pkg.metadata["EAPI"]):
1455 required_use_warning = ", this change violates use flag constraints " + \
1456 "defined by %s: '%s'" % (pkg.cpv, human_readable_required_use(required_use))
1457
1458 @@ -3132,7 +3497,7 @@ class depgraph(object):
1459
1460 untouchable_flags = \
1461 frozenset(chain(myparent.use.mask, myparent.use.force))
1462 - if untouchable_flags.intersection(involved_flags):
1463 + if any(x in untouchable_flags for x in involved_flags):
1464 continue
1465
1466 required_use = myparent.metadata.get("REQUIRED_USE")
1467 @@ -3145,8 +3510,12 @@ class depgraph(object):
1468 new_use.discard(flag)
1469 else:
1470 new_use.add(flag)
1471 - if check_required_use(required_use, old_use, myparent.iuse.is_valid_flag) and \
1472 - not check_required_use(required_use, new_use, myparent.iuse.is_valid_flag):
1473 + if check_required_use(required_use, old_use,
1474 + myparent.iuse.is_valid_flag,
1475 + eapi=myparent.metadata["EAPI"]) and \
1476 + not check_required_use(required_use, new_use,
1477 + myparent.iuse.is_valid_flag,
1478 + eapi=myparent.metadata["EAPI"]):
1479 required_use_warning = ", this change violates use flag constraints " + \
1480 "defined by %s: '%s'" % (myparent.cpv, \
1481 human_readable_required_use(required_use))
1482 @@ -3211,62 +3580,72 @@ class depgraph(object):
1483
1484 mask_docs = False
1485
1486 - if required_use_unsatisfied:
1487 + if show_req_use is None and required_use_unsatisfied:
1488 # We have an unmasked package that only requires USE adjustment
1489 # in order to satisfy REQUIRED_USE, and nothing more. We assume
1490 # that the user wants the latest version, so only the first
1491 # instance is displayed.
1492 - pkg = required_use_unsatisfied[0]
1493 + show_req_use = required_use_unsatisfied[0]
1494 + self._dynamic_config._needed_required_use_config_changesuse_config_changes[pkg] = (new_use, new_changes)
1495 + backtrack_infos = self._dynamic_config._backtrack_infos
1496 + backtrack_infos.setdefault("config", {})
1497 + backtrack_infos["config"].setdefault("needed_required_use_config_changes", [])
1498 + backtrack_infos["config"]["needed_required_use_config_changes"].append((pkg, (new_use, new_changes)))
1499 +
1500 + if show_req_use is not None:
1501 +
1502 + pkg = show_req_use
1503 output_cpv = pkg.cpv + _repo_separator + pkg.repo
1504 - writemsg_stdout("\n!!! " + \
1505 + writemsg("\n!!! " + \
1506 colorize("BAD", "The ebuild selected to satisfy ") + \
1507 colorize("INFORM", xinfo) + \
1508 colorize("BAD", " has unmet requirements.") + "\n",
1509 noiselevel=-1)
1510 use_display = pkg_use_display(pkg, self._frozen_config.myopts)
1511 - writemsg_stdout("- %s %s\n" % (output_cpv, use_display),
1512 + writemsg("- %s %s\n" % (output_cpv, use_display),
1513 noiselevel=-1)
1514 - writemsg_stdout("\n The following REQUIRED_USE flag constraints " + \
1515 + writemsg("\n The following REQUIRED_USE flag constraints " + \
1516 "are unsatisfied:\n", noiselevel=-1)
1517 reduced_noise = check_required_use(
1518 pkg.metadata["REQUIRED_USE"],
1519 self._pkg_use_enabled(pkg),
1520 - pkg.iuse.is_valid_flag).tounicode()
1521 - writemsg_stdout(" %s\n" % \
1522 + pkg.iuse.is_valid_flag,
1523 + eapi=pkg.metadata["EAPI"]).tounicode()
1524 + writemsg(" %s\n" % \
1525 human_readable_required_use(reduced_noise),
1526 noiselevel=-1)
1527 normalized_required_use = \
1528 " ".join(pkg.metadata["REQUIRED_USE"].split())
1529 if reduced_noise != normalized_required_use:
1530 - writemsg_stdout("\n The above constraints " + \
1531 + writemsg("\n The above constraints " + \
1532 "are a subset of the following complete expression:\n",
1533 noiselevel=-1)
1534 - writemsg_stdout(" %s\n" % \
1535 + writemsg(" %s\n" % \
1536 human_readable_required_use(normalized_required_use),
1537 noiselevel=-1)
1538 - writemsg_stdout("\n", noiselevel=-1)
1539 + writemsg("\n", noiselevel=-1)
1540
1541 elif show_missing_use:
1542 - writemsg_stdout("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".\n", noiselevel=-1)
1543 - writemsg_stdout("!!! One of the following packages is required to complete your request:\n", noiselevel=-1)
1544 + writemsg("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".\n", noiselevel=-1)
1545 + writemsg("!!! One of the following packages is required to complete your request:\n", noiselevel=-1)
1546 for pkg, mreasons in show_missing_use:
1547 - writemsg_stdout("- "+pkg.cpv+_repo_separator+pkg.repo+" ("+", ".join(mreasons)+")\n", noiselevel=-1)
1548 + writemsg("- "+pkg.cpv+_repo_separator+pkg.repo+" ("+", ".join(mreasons)+")\n", noiselevel=-1)
1549
1550 elif masked_packages:
1551 - writemsg_stdout("\n!!! " + \
1552 + writemsg("\n!!! " + \
1553 colorize("BAD", "All ebuilds that could satisfy ") + \
1554 colorize("INFORM", xinfo) + \
1555 colorize("BAD", " have been masked.") + "\n", noiselevel=-1)
1556 - writemsg_stdout("!!! One of the following masked packages is required to complete your request:\n", noiselevel=-1)
1557 + writemsg("!!! One of the following masked packages is required to complete your request:\n", noiselevel=-1)
1558 have_eapi_mask = show_masked_packages(masked_packages)
1559 if have_eapi_mask:
1560 - writemsg_stdout("\n", noiselevel=-1)
1561 + writemsg("\n", noiselevel=-1)
1562 msg = ("The current version of portage supports " + \
1563 "EAPI '%s'. You must upgrade to a newer version" + \
1564 " of portage before EAPI masked packages can" + \
1565 " be installed.") % portage.const.EAPI
1566 - writemsg_stdout("\n".join(textwrap.wrap(msg, 75)), noiselevel=-1)
1567 - writemsg_stdout("\n", noiselevel=-1)
1568 + writemsg("\n".join(textwrap.wrap(msg, 75)), noiselevel=-1)
1569 + writemsg("\n", noiselevel=-1)
1570 mask_docs = True
1571 else:
1572 cp_exists = False
1573 @@ -3276,7 +3655,7 @@ class depgraph(object):
1574 cp_exists = True
1575 break
1576
1577 - writemsg_stdout("\nemerge: there are no ebuilds to satisfy "+green(xinfo)+".\n", noiselevel=-1)
1578 + writemsg("\nemerge: there are no ebuilds to satisfy "+green(xinfo)+".\n", noiselevel=-1)
1579 if isinstance(myparent, AtomArg) and \
1580 not cp_exists and \
1581 self._frozen_config.myopts.get(
1582 @@ -3286,7 +3665,7 @@ class depgraph(object):
1583 if cat == "null":
1584 cat = None
1585
1586 - writemsg_stdout("\nemerge: searching for similar names..."
1587 + writemsg("\nemerge: searching for similar names..."
1588 , noiselevel=-1)
1589
1590 all_cp = set()
1591 @@ -3334,16 +3713,16 @@ class depgraph(object):
1592 matches = matches_orig_case
1593
1594 if len(matches) == 1:
1595 - writemsg_stdout("\nemerge: Maybe you meant " + matches[0] + "?\n"
1596 + writemsg("\nemerge: Maybe you meant " + matches[0] + "?\n"
1597 , noiselevel=-1)
1598 elif len(matches) > 1:
1599 - writemsg_stdout(
1600 + writemsg(
1601 "\nemerge: Maybe you meant any of these: %s?\n" % \
1602 (", ".join(matches),), noiselevel=-1)
1603 else:
1604 # Generally, this would only happen if
1605 # all dbapis are empty.
1606 - writemsg_stdout(" nothing similar found.\n"
1607 + writemsg(" nothing similar found.\n"
1608 , noiselevel=-1)
1609 msg = []
1610 if not isinstance(myparent, AtomArg):
1611 @@ -3356,12 +3735,12 @@ class depgraph(object):
1612 (node)), node_type))
1613
1614 if msg:
1615 - writemsg_stdout("\n".join(msg), noiselevel=-1)
1616 - writemsg_stdout("\n", noiselevel=-1)
1617 + writemsg("\n".join(msg), noiselevel=-1)
1618 + writemsg("\n", noiselevel=-1)
1619
1620 if mask_docs:
1621 show_mask_docs()
1622 - writemsg_stdout("\n", noiselevel=-1)
1623 + writemsg("\n", noiselevel=-1)
1624
1625 def _iter_match_pkgs_any(self, root_config, atom, onlydeps=False):
1626 for db, pkg_type, built, installed, db_keys in \
1627 @@ -3379,60 +3758,12 @@ class depgraph(object):
1628 """
1629
1630 db = root_config.trees[self.pkg_tree_map[pkg_type]].dbapi
1631 -
1632 - if hasattr(db, "xmatch"):
1633 - # For portdbapi we match only against the cpv, in order
1634 - # to bypass unnecessary cache access for things like IUSE
1635 - # and SLOT. Later, we cache the metadata in a Package
1636 - # instance, and use that for further matching. This
1637 - # optimization is especially relevant since
1638 - # pordbapi.aux_get() does not cache calls that have
1639 - # myrepo or mytree arguments.
1640 - cpv_list = db.xmatch("match-all-cpv-only", atom)
1641 - else:
1642 - cpv_list = db.match(atom)
1643 -
1644 - # USE=multislot can make an installed package appear as if
1645 - # it doesn't satisfy a slot dependency. Rebuilding the ebuild
1646 - # won't do any good as long as USE=multislot is enabled since
1647 - # the newly built package still won't have the expected slot.
1648 - # Therefore, assume that such SLOT dependencies are already
1649 - # satisfied rather than forcing a rebuild.
1650 + atom_exp = dep_expand(atom, mydb=db, settings=root_config.settings)
1651 + cp_list = db.cp_list(atom_exp.cp)
1652 + matched_something = False
1653 installed = pkg_type == 'installed'
1654 - if installed and not cpv_list and atom.slot:
1655 -
1656 - if "remove" in self._dynamic_config.myparams:
1657 - # We need to search the portdbapi, which is not in our
1658 - # normal dbs list, in order to find the real SLOT.
1659 - portdb = self._frozen_config.trees[root_config.root]["porttree"].dbapi
1660 - db_keys = list(portdb._aux_cache_keys)
1661 - dbs = [(portdb, "ebuild", False, False, db_keys)]
1662 - else:
1663 - dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
1664
1665 - for cpv in db.match(atom.cp):
1666 - slot_available = False
1667 - for other_db, other_type, other_built, \
1668 - other_installed, other_keys in dbs:
1669 - try:
1670 - if atom.slot == \
1671 - other_db.aux_get(cpv, ["SLOT"])[0]:
1672 - slot_available = True
1673 - break
1674 - except KeyError:
1675 - pass
1676 - if not slot_available:
1677 - continue
1678 - inst_pkg = self._pkg(cpv, "installed",
1679 - root_config, installed=installed, myrepo = atom.repo)
1680 - # Remove the slot from the atom and verify that
1681 - # the package matches the resulting atom.
1682 - if portage.match_from_list(
1683 - atom.without_slot, [inst_pkg]):
1684 - yield inst_pkg
1685 - return
1686 -
1687 - if cpv_list:
1688 + if cp_list:
1689 atom_set = InternalPackageSet(initial_atoms=(atom,),
1690 allow_repo=True)
1691 if atom.repo is None and hasattr(db, "getRepositories"):
1692 @@ -3441,8 +3772,13 @@ class depgraph(object):
1693 repo_list = [atom.repo]
1694
1695 # descending order
1696 - cpv_list.reverse()
1697 - for cpv in cpv_list:
1698 + cp_list.reverse()
1699 + for cpv in cp_list:
1700 + # Call match_from_list on one cpv at a time, in order
1701 + # to avoid unnecessary match_from_list comparisons on
1702 + # versions that are never yielded from this method.
1703 + if not match_from_list(atom_exp, [cpv]):
1704 + continue
1705 for repo in repo_list:
1706
1707 try:
1708 @@ -3459,26 +3795,65 @@ class depgraph(object):
1709 # Make sure that cpv from the current repo satisfies the atom.
1710 # This might not be the case if there are several repos with
1711 # the same cpv, but different metadata keys, like SLOT.
1712 - # Also, for portdbapi, parts of the match that require
1713 - # metadata access are deferred until we have cached the
1714 - # metadata in a Package instance.
1715 + # Also, parts of the match that require metadata access
1716 + # are deferred until we have cached the metadata in a
1717 + # Package instance.
1718 if not atom_set.findAtomForPackage(pkg,
1719 modified_use=self._pkg_use_enabled(pkg)):
1720 continue
1721 + matched_something = True
1722 yield pkg
1723
1724 + # USE=multislot can make an installed package appear as if
1725 + # it doesn't satisfy a slot dependency. Rebuilding the ebuild
1726 + # won't do any good as long as USE=multislot is enabled since
1727 + # the newly built package still won't have the expected slot.
1728 + # Therefore, assume that such SLOT dependencies are already
1729 + # satisfied rather than forcing a rebuild.
1730 + if not matched_something and installed and atom.slot is not None:
1731 +
1732 + if "remove" in self._dynamic_config.myparams:
1733 + # We need to search the portdbapi, which is not in our
1734 + # normal dbs list, in order to find the real SLOT.
1735 + portdb = self._frozen_config.trees[root_config.root]["porttree"].dbapi
1736 + db_keys = list(portdb._aux_cache_keys)
1737 + dbs = [(portdb, "ebuild", False, False, db_keys)]
1738 + else:
1739 + dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
1740 +
1741 + cp_list = db.cp_list(atom_exp.cp)
1742 + if cp_list:
1743 + atom_set = InternalPackageSet(
1744 + initial_atoms=(atom.without_slot,), allow_repo=True)
1745 + atom_exp_without_slot = atom_exp.without_slot
1746 + cp_list.reverse()
1747 + for cpv in cp_list:
1748 + if not match_from_list(atom_exp_without_slot, [cpv]):
1749 + continue
1750 + slot_available = False
1751 + for other_db, other_type, other_built, \
1752 + other_installed, other_keys in dbs:
1753 + try:
1754 + if atom.slot == \
1755 + other_db._pkg_str(_unicode(cpv), None).slot:
1756 + slot_available = True
1757 + break
1758 + except (KeyError, InvalidData):
1759 + pass
1760 + if not slot_available:
1761 + continue
1762 + inst_pkg = self._pkg(cpv, "installed",
1763 + root_config, installed=installed, myrepo=atom.repo)
1764 + # Remove the slot from the atom and verify that
1765 + # the package matches the resulting atom.
1766 + if atom_set.findAtomForPackage(inst_pkg):
1767 + yield inst_pkg
1768 + return
1769 +
1770 def _select_pkg_highest_available(self, root, atom, onlydeps=False):
1771 - cache_key = (root, atom, atom.unevaluated_atom, onlydeps)
1772 + cache_key = (root, atom, atom.unevaluated_atom, onlydeps, self._dynamic_config._autounmask)
1773 ret = self._dynamic_config._highest_pkg_cache.get(cache_key)
1774 if ret is not None:
1775 - pkg, existing = ret
1776 - if pkg and not existing:
1777 - existing = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
1778 - if existing and existing == pkg:
1779 - # Update the cache to reflect that the
1780 - # package has been added to the graph.
1781 - ret = pkg, pkg
1782 - self._dynamic_config._highest_pkg_cache[cache_key] = ret
1783 return ret
1784 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
1785 self._dynamic_config._highest_pkg_cache[cache_key] = ret
1786 @@ -3495,21 +3870,55 @@ class depgraph(object):
1787 True if the user has not explicitly requested for this package
1788 to be replaced (typically via an atom on the command line).
1789 """
1790 - if "selective" not in self._dynamic_config.myparams and \
1791 - pkg.root == self._frozen_config.target_root:
1792 - if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
1793 - modified_use=self._pkg_use_enabled(pkg)):
1794 - return True
1795 - try:
1796 - next(self._iter_atoms_for_pkg(pkg))
1797 - except StopIteration:
1798 - pass
1799 - except portage.exception.InvalidDependString:
1800 - pass
1801 - else:
1802 + if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
1803 + modified_use=self._pkg_use_enabled(pkg)):
1804 + return True
1805 +
1806 + arg = False
1807 + try:
1808 + for arg, atom in self._iter_atoms_for_pkg(pkg):
1809 + if arg.force_reinstall:
1810 + return False
1811 + except InvalidDependString:
1812 + pass
1813 +
1814 + if "selective" in self._dynamic_config.myparams:
1815 + return True
1816 +
1817 + return not arg
1818 +
1819 + def _equiv_ebuild_visible(self, pkg, autounmask_level=None):
1820 + try:
1821 + pkg_eb = self._pkg(
1822 + pkg.cpv, "ebuild", pkg.root_config, myrepo=pkg.repo)
1823 + except portage.exception.PackageNotFound:
1824 + pkg_eb_visible = False
1825 + for pkg_eb in self._iter_match_pkgs(pkg.root_config,
1826 + "ebuild", Atom("=%s" % (pkg.cpv,))):
1827 + if self._pkg_visibility_check(pkg_eb, autounmask_level):
1828 + pkg_eb_visible = True
1829 + break
1830 + if not pkg_eb_visible:
1831 return False
1832 + else:
1833 + if not self._pkg_visibility_check(pkg_eb, autounmask_level):
1834 + return False
1835 +
1836 return True
1837
1838 + def _equiv_binary_installed(self, pkg):
1839 + build_time = pkg.metadata.get('BUILD_TIME')
1840 + if not build_time:
1841 + return False
1842 +
1843 + try:
1844 + inst_pkg = self._pkg(pkg.cpv, "installed",
1845 + pkg.root_config, installed=True)
1846 + except PackageNotFound:
1847 + return False
1848 +
1849 + return build_time == inst_pkg.metadata.get('BUILD_TIME')
1850 +
1851 class _AutounmaskLevel(object):
1852 __slots__ = ("allow_use_changes", "allow_unstable_keywords", "allow_license_changes", \
1853 "allow_missing_keywords", "allow_unmasks")
1854 @@ -3525,7 +3934,8 @@ class depgraph(object):
1855 """
1856 Iterate over the different allowed things to unmask.
1857
1858 - 1. USE
1859 + 0. USE
1860 + 1. USE + license
1861 2. USE + ~arch + license
1862 3. USE + ~arch + license + missing keywords
1863 4. USE + ~arch + license + masks
1864 @@ -3544,8 +3954,12 @@ class depgraph(object):
1865 autounmask_level = self._AutounmaskLevel()
1866
1867 autounmask_level.allow_use_changes = True
1868 + yield autounmask_level
1869
1870 - for only_use_changes in (True, False):
1871 + autounmask_level.allow_license_changes = True
1872 + yield autounmask_level
1873 +
1874 + for only_use_changes in (False,):
1875
1876 autounmask_level.allow_unstable_keywords = (not only_use_changes)
1877 autounmask_level.allow_license_changes = (not only_use_changes)
1878 @@ -3573,7 +3987,7 @@ class depgraph(object):
1879 pkg = None
1880
1881 if self._dynamic_config._autounmask is True:
1882 - pkg = None
1883 + reset_pkg(pkg)
1884
1885 for autounmask_level in self._autounmask_levels():
1886 if pkg is not None:
1887 @@ -3734,7 +4148,7 @@ class depgraph(object):
1888 if pkg not in self._dynamic_config.digraph.nodes:
1889 return False
1890
1891 - for key in "DEPEND", "RDEPEND", "PDEPEND", "LICENSE":
1892 + for key in Package._dep_keys + ("LICENSE",):
1893 dep = pkg.metadata[key]
1894 old_val = set(portage.dep.use_reduce(dep, pkg.use.enabled, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
1895 new_val = set(portage.dep.use_reduce(dep, new_use, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
1896 @@ -3749,7 +4163,7 @@ class depgraph(object):
1897 new_use, changes = self._dynamic_config._needed_use_config_changes.get(pkg)
1898 for ppkg, atom in parent_atoms:
1899 if not atom.use or \
1900 - not atom.use.required.intersection(changes):
1901 + not any(x in atom.use.required for x in changes):
1902 continue
1903 else:
1904 return True
1905 @@ -3759,12 +4173,14 @@ class depgraph(object):
1906 if new_changes != old_changes:
1907 #Don't do the change if it violates REQUIRED_USE.
1908 required_use = pkg.metadata.get("REQUIRED_USE")
1909 - if required_use and check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
1910 - not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
1911 + if required_use and check_required_use(required_use, old_use,
1912 + pkg.iuse.is_valid_flag, eapi=pkg.metadata["EAPI"]) and \
1913 + not check_required_use(required_use, new_use,
1914 + pkg.iuse.is_valid_flag, eapi=pkg.metadata["EAPI"]):
1915 return old_use
1916
1917 - if pkg.use.mask.intersection(new_changes) or \
1918 - pkg.use.force.intersection(new_changes):
1919 + if any(x in pkg.use.mask for x in new_changes) or \
1920 + any(x in pkg.use.force for x in new_changes):
1921 return old_use
1922
1923 self._dynamic_config._needed_use_config_changes[pkg] = (new_use, new_changes)
1924 @@ -3776,199 +4192,6 @@ class depgraph(object):
1925 self._dynamic_config._need_restart = True
1926 return new_use
1927
1928 - def change_required_use(self, pkg):
1929 - """
1930 - Checks if the use flags listed in 'use' satisfy all
1931 - constraints specified in 'constraints'.
1932 -
1933 - @param required_use: REQUIRED_USE string
1934 - @type required_use: String
1935 - @param use: Enabled use flags
1936 - @param use: List
1937 - @param iuse_match: Callable that takes a single flag argument and returns
1938 - True if the flag is matched, false otherwise,
1939 - @param iuse_match: Callable
1940 - @rtype: Bool
1941 - @return: Indicates if REQUIRED_USE constraints are satisfied
1942 - """
1943 -
1944 - required_use = pkg.metadata["REQUIRED_USE"]
1945 - use =self._pkg_use_enabled(pkg)
1946 - iuse_match = pkg.iuse.is_valid_flag
1947 -
1948 - def is_active(token):
1949 - if token.startswith("!"):
1950 - flag = token[1:]
1951 - is_negated = True
1952 - else:
1953 - flag = token
1954 - is_negated = False
1955 -
1956 - if not flag or not iuse_match(flag):
1957 - msg = _("USE flag '%s' is not in IUSE") \
1958 - % (flag,)
1959 - e = InvalidData(msg, category='IUSE.missing')
1960 - raise InvalidDependString(msg, errors=(e,))
1961 -
1962 - return (flag in use and not is_negated) or \
1963 - (flag not in use and is_negated)
1964 -
1965 - def is_satisfied(operator, argument):
1966 - if not argument:
1967 - #|| ( ) -> True
1968 - return True
1969 -
1970 - if operator == "||":
1971 - return (True in argument)
1972 - elif operator == "^^":
1973 - return (argument.count(True) == 1)
1974 - elif operator[-1] == "?":
1975 - return (False not in argument)
1976 -
1977 - mysplit = required_use.split()
1978 - level = 0
1979 - stack = [[]]
1980 - tree = portage.dep._RequiredUseBranch()
1981 - node = tree
1982 - need_bracket = False
1983 - target_use = {}
1984 -
1985 - for token in mysplit:
1986 - if token == "(":
1987 - if not need_bracket:
1988 - child = portage.dep._RequiredUseBranch(parent=node)
1989 - node._children.append(child)
1990 - node = child
1991 -
1992 - need_bracket = False
1993 - stack.append([])
1994 - level += 1
1995 - elif token == ")":
1996 - if need_bracket:
1997 - raise InvalidDependString(
1998 - _("malformed syntax: '%s'") % required_use)
1999 - if level > 0:
2000 - level -= 1
2001 - l = stack.pop()
2002 - op = None
2003 - if stack[level]:
2004 - if stack[level][-1] in ("||", "^^"):
2005 - op = stack[level].pop()
2006 - satisfied = is_satisfied(op, l)
2007 - stack[level].append(satisfied)
2008 - node._satisfied = satisfied
2009 -
2010 - elif not isinstance(stack[level][-1], bool) and \
2011 - stack[level][-1][-1] == "?":
2012 - op = stack[level].pop()
2013 - if is_active(op[:-1]):
2014 - satisfied = is_satisfied(op, l)
2015 - stack[level].append(satisfied)
2016 - node._satisfied = satisfied
2017 - else:
2018 - node._satisfied = True
2019 - last_node = node._parent._children.pop()
2020 - if last_node is not node:
2021 - raise AssertionError(
2022 - "node is not last child of parent")
2023 - node = node._parent
2024 - continue
2025 -
2026 - if op is None:
2027 - satisfied = False not in l
2028 - node._satisfied = satisfied
2029 - if l:
2030 - stack[level].append(satisfied)
2031 -
2032 - if len(node._children) <= 1 or \
2033 - node._parent._operator not in ("||", "^^"):
2034 - last_node = node._parent._children.pop()
2035 - if last_node is not node:
2036 - raise AssertionError(
2037 - "node is not last child of parent")
2038 - for child in node._children:
2039 - node._parent._children.append(child)
2040 - if isinstance(child, portage.dep._RequiredUseBranch):
2041 - child._parent = node._parent
2042 -
2043 - elif not node._children:
2044 - last_node = node._parent._children.pop()
2045 - if last_node is not node:
2046 - raise AssertionError(
2047 - "node is not last child of parent")
2048 -
2049 - elif len(node._children) == 1 and op in ("||", "^^"):
2050 - last_node = node._parent._children.pop()
2051 - if last_node is not node:
2052 - raise AssertionError(
2053 - "node is not last child of parent")
2054 - node._parent._children.append(node._children[0])
2055 - if isinstance(node._children[0], portage.dep._RequiredUseBranch):
2056 - node._children[0]._parent = node._parent
2057 - node = node._children[0]
2058 - if node._operator is None and \
2059 - node._parent._operator not in ("||", "^^"):
2060 - last_node = node._parent._children.pop()
2061 - if last_node is not node:
2062 - raise AssertionError(
2063 - "node is not last child of parent")
2064 - for child in node._children:
2065 - node._parent._children.append(child)
2066 - if isinstance(child, portage.dep._RequiredUseBranch):
2067 - child._parent = node._parent
2068 -
2069 - node = node._parent
2070 - else:
2071 - raise InvalidDependString(
2072 - _("malformed syntax: '%s'") % required_use)
2073 - elif token in ("||", "^^"):
2074 - if need_bracket:
2075 - raise InvalidDependString(
2076 - _("malformed syntax: '%s'") % required_use)
2077 - need_bracket = True
2078 - stack[level].append(token)
2079 - child = portage.dep._RequiredUseBranch(operator=token, parent=node)
2080 - node._children.append(child)
2081 - node = child
2082 - else:
2083 - if need_bracket or "(" in token or ")" in token or \
2084 - "|" in token or "^" in token:
2085 - raise InvalidDependString(
2086 - _("malformed syntax: '%s'") % required_use)
2087 -
2088 - if token[-1] == "?":
2089 - need_bracket = True
2090 - stack[level].append(token)
2091 - child = portage.dep._RequiredUseBranch(operator=token, parent=node)
2092 - node._children.append(child)
2093 - node = child
2094 - else:
2095 - satisfied = is_active(token)
2096 - stack[level].append(satisfied)
2097 - node._children.append(portage.dep._RequiredUseLeaf(token, satisfied))
2098 - print("satisfied:", satisfied)
2099 - if satisfied is False:
2100 - new_changes = {}
2101 - new_changes[token] = True
2102 - print("new_changes:", new_changes)
2103 - if pkg.use.mask.intersection(new_changes) or \
2104 - pkg.use.force.intersection(new_changes):
2105 - print("mask or force")
2106 - else:
2107 - print("new_changes2:", new_changes)
2108 - if token in pkg.use.enabled:
2109 - target_use[token] = False
2110 - elif not token in pkg.use.enabled:
2111 - target_use[token] = True
2112 - return target_use
2113 -
2114 - if level != 0 or need_bracket:
2115 - raise InvalidDependString(
2116 - _("malformed syntax: '%s'") % required_use)
2117 -
2118 - tree._satisfied = False not in stack[0]
2119 - return target_use
2120 -
2121 def _wrapped_select_pkg_highest_available_imp(self, root, atom, onlydeps=False, autounmask_level=None):
2122 root_config = self._frozen_config.roots[root]
2123 pkgsettings = self._frozen_config.pkgsettings[root]
2124 @@ -4128,37 +4351,24 @@ class depgraph(object):
2125 if not use_ebuild_visibility and (usepkgonly or useoldpkg):
2126 if pkg.installed and pkg.masks:
2127 continue
2128 - else:
2129 - try:
2130 - pkg_eb = self._pkg(
2131 - pkg.cpv, "ebuild", root_config, myrepo=pkg.repo)
2132 - except portage.exception.PackageNotFound:
2133 - pkg_eb_visible = False
2134 - for pkg_eb in self._iter_match_pkgs(pkg.root_config,
2135 - "ebuild", Atom("=%s" % (pkg.cpv,))):
2136 - if self._pkg_visibility_check(pkg_eb, autounmask_level):
2137 - pkg_eb_visible = True
2138 - break
2139 - if not pkg_eb_visible:
2140 - continue
2141 - else:
2142 - if not self._pkg_visibility_check(pkg_eb, autounmask_level):
2143 - continue
2144 + elif not self._equiv_ebuild_visible(pkg,
2145 + autounmask_level=autounmask_level):
2146 + continue
2147
2148 # Calculation of USE for unbuilt ebuilds is relatively
2149 # expensive, so it is only performed lazily, after the
2150 # above visibility checks are complete.
2151
2152 myarg = None
2153 - if root == self._frozen_config.target_root:
2154 - try:
2155 - myarg = next(self._iter_atoms_for_pkg(pkg))
2156 - except StopIteration:
2157 - pass
2158 - except portage.exception.InvalidDependString:
2159 - if not installed:
2160 - # masked by corruption
2161 - continue
2162 + try:
2163 + for myarg, myarg_atom in self._iter_atoms_for_pkg(pkg):
2164 + if myarg.force_reinstall:
2165 + reinstall = True
2166 + break
2167 + except InvalidDependString:
2168 + if not installed:
2169 + # masked by corruption
2170 + continue
2171 if not installed and myarg:
2172 found_available_arg = True
2173
2174 @@ -4169,17 +4379,8 @@ class depgraph(object):
2175 # since IUSE cannot be adjusted by the user.
2176 continue
2177
2178 - if pkg.metadata.get("REQUIRED_USE") and eapi_has_required_use(pkg.metadata["EAPI"]):
2179 - required_use_is_sat = check_required_use(pkg.metadata["REQUIRED_USE"],
2180 - self._pkg_use_enabled(pkg), pkg.iuse.is_valid_flag)
2181 - if not required_use_is_sat:
2182 - if autounmask_level and autounmask_level.allow_use_changes \
2183 - and not pkg.built:
2184 - target_use = self.change_required_use(pkg)
2185 - if not target_use is None:
2186 - use = self._pkg_use_enabled(pkg, target_use)
2187 -
2188 if atom.use:
2189 +
2190 matched_pkgs_ignore_use.append(pkg)
2191 if autounmask_level and autounmask_level.allow_use_changes and not pkg.built:
2192 target_use = {}
2193 @@ -4197,7 +4398,7 @@ class depgraph(object):
2194 missing_disabled = atom.use.missing_disabled.difference(pkg.iuse.all)
2195
2196 if atom.use.enabled:
2197 - if atom.use.enabled.intersection(missing_disabled):
2198 + if any(x in atom.use.enabled for x in missing_disabled):
2199 use_match = False
2200 can_adjust_use = False
2201 need_enabled = atom.use.enabled.difference(use)
2202 @@ -4206,11 +4407,11 @@ class depgraph(object):
2203 if need_enabled:
2204 use_match = False
2205 if can_adjust_use:
2206 - if pkg.use.mask.intersection(need_enabled):
2207 + if any(x in pkg.use.mask for x in need_enabled):
2208 can_adjust_use = False
2209
2210 if atom.use.disabled:
2211 - if atom.use.disabled.intersection(missing_enabled):
2212 + if any(x in atom.use.disabled for x in missing_enabled):
2213 use_match = False
2214 can_adjust_use = False
2215 need_disabled = atom.use.disabled.intersection(use)
2216 @@ -4219,8 +4420,8 @@ class depgraph(object):
2217 if need_disabled:
2218 use_match = False
2219 if can_adjust_use:
2220 - if pkg.use.force.difference(
2221 - pkg.use.mask).intersection(need_disabled):
2222 + if any(x in pkg.use.force and x not in
2223 + pkg.use.mask for x in need_disabled):
2224 can_adjust_use = False
2225
2226 if not use_match:
2227 @@ -4486,9 +4687,19 @@ class depgraph(object):
2228 "recurse" not in self._dynamic_config.myparams:
2229 return 1
2230
2231 + complete_if_new_use = self._dynamic_config.myparams.get(
2232 + "complete_if_new_use", "y") == "y"
2233 + complete_if_new_ver = self._dynamic_config.myparams.get(
2234 + "complete_if_new_ver", "y") == "y"
2235 + rebuild_if_new_slot = self._dynamic_config.myparams.get(
2236 + "rebuild_if_new_slot", "y") == "y"
2237 + complete_if_new_slot = rebuild_if_new_slot
2238 +
2239 if "complete" not in self._dynamic_config.myparams and \
2240 - self._dynamic_config.myparams.get("complete_if_new_ver", "y") == "y":
2241 - # Enable complete mode if an installed package version will change.
2242 + (complete_if_new_use or
2243 + complete_if_new_ver or complete_if_new_slot):
2244 + # Enable complete mode if an installed package will change somehow.
2245 + use_change = False
2246 version_change = False
2247 for node in self._dynamic_config.digraph:
2248 if not isinstance(node, Package) or \
2249 @@ -4496,12 +4707,35 @@ class depgraph(object):
2250 continue
2251 vardb = self._frozen_config.roots[
2252 node.root].trees["vartree"].dbapi
2253 - inst_pkg = vardb.match_pkgs(node.slot_atom)
2254 - if inst_pkg and (inst_pkg[0] > node or inst_pkg[0] < node):
2255 - version_change = True
2256 - break
2257
2258 - if version_change:
2259 + if complete_if_new_use or complete_if_new_ver:
2260 + inst_pkg = vardb.match_pkgs(node.slot_atom)
2261 + if inst_pkg and inst_pkg[0].cp == node.cp:
2262 + inst_pkg = inst_pkg[0]
2263 + if complete_if_new_ver and \
2264 + (inst_pkg < node or node < inst_pkg):
2265 + version_change = True
2266 + break
2267 +
2268 + # Intersect enabled USE with IUSE, in order to
2269 + # ignore forced USE from implicit IUSE flags, since
2270 + # they're probably irrelevant and they are sensitive
2271 + # to use.mask/force changes in the profile.
2272 + if complete_if_new_use and \
2273 + (node.iuse.all != inst_pkg.iuse.all or
2274 + self._pkg_use_enabled(node).intersection(node.iuse.all) !=
2275 + self._pkg_use_enabled(inst_pkg).intersection(inst_pkg.iuse.all)):
2276 + use_change = True
2277 + break
2278 +
2279 + if complete_if_new_slot:
2280 + cp_list = vardb.match_pkgs(Atom(node.cp))
2281 + if (cp_list and cp_list[0].cp == node.cp and
2282 + not any(node.slot == pkg.slot for pkg in cp_list)):
2283 + version_change = True
2284 + break
2285 +
2286 + if use_change or version_change:
2287 self._dynamic_config.myparams["complete"] = True
2288
2289 if "complete" not in self._dynamic_config.myparams:
2290 @@ -4515,6 +4749,7 @@ class depgraph(object):
2291 # scheduled for replacement. Also, toggle the "deep"
2292 # parameter so that all dependencies are traversed and
2293 # accounted for.
2294 + self._dynamic_config._complete_mode = True
2295 self._select_atoms = self._select_atoms_from_graph
2296 if "remove" in self._dynamic_config.myparams:
2297 self._select_package = self._select_pkg_from_installed
2298 @@ -4673,7 +4908,7 @@ class depgraph(object):
2299 # For installed packages, always ignore blockers from DEPEND since
2300 # only runtime dependencies should be relevant for packages that
2301 # are already built.
2302 - dep_keys = ["RDEPEND", "PDEPEND"]
2303 + dep_keys = Package._runtime_keys
2304 for myroot in self._frozen_config.trees:
2305
2306 if self._frozen_config.myopts.get("--root-deps") is not None and \
2307 @@ -5130,16 +5365,24 @@ class depgraph(object):
2308 root_config.root]["root_config"] = root_config
2309
2310 def _resolve_conflicts(self):
2311 +
2312 + if "complete" not in self._dynamic_config.myparams and \
2313 + self._dynamic_config._allow_backtracking and \
2314 + self._dynamic_config._slot_collision_nodes and \
2315 + not self._accept_blocker_conflicts():
2316 + self._dynamic_config.myparams["complete"] = True
2317 +
2318 if not self._complete_graph():
2319 raise self._unknown_internal_error()
2320
2321 + self._process_slot_conflicts()
2322 +
2323 + self._slot_operator_trigger_reinstalls()
2324 +
2325 if not self._validate_blockers():
2326 self._dynamic_config._skip_restart = True
2327 raise self._unknown_internal_error()
2328
2329 - if self._dynamic_config._slot_collision_info:
2330 - self._process_slot_conflicts()
2331 -
2332 def _serialize_tasks(self):
2333
2334 debug = "--debug" in self._frozen_config.myopts
2335 @@ -5434,7 +5677,7 @@ class depgraph(object):
2336 for node in nodes:
2337 parents = mygraph.parent_nodes(node,
2338 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
2339 - if parents and set(parents).intersection(asap_nodes):
2340 + if any(x in asap_nodes for x in parents):
2341 selected_nodes = [node]
2342 break
2343 else:
2344 @@ -6119,7 +6362,7 @@ class depgraph(object):
2345 if is_latest:
2346 unstable_keyword_msg[root].append(">=%s %s\n" % (pkg.cpv, keyword))
2347 elif is_latest_in_slot:
2348 - unstable_keyword_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], keyword))
2349 + unstable_keyword_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.slot, keyword))
2350 else:
2351 unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
2352 else:
2353 @@ -6159,7 +6402,7 @@ class depgraph(object):
2354 if is_latest:
2355 p_mask_change_msg[root].append(">=%s\n" % pkg.cpv)
2356 elif is_latest_in_slot:
2357 - p_mask_change_msg[root].append(">=%s:%s\n" % (pkg.cpv, pkg.metadata["SLOT"]))
2358 + p_mask_change_msg[root].append(">=%s:%s\n" % (pkg.cpv, pkg.slot))
2359 else:
2360 p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
2361 else:
2362 @@ -6184,7 +6427,7 @@ class depgraph(object):
2363 if is_latest:
2364 use_changes_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
2365 elif is_latest_in_slot:
2366 - use_changes_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], " ".join(adjustments)))
2367 + use_changes_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.slot, " ".join(adjustments)))
2368 else:
2369 use_changes_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
2370
2371 @@ -6201,7 +6444,7 @@ class depgraph(object):
2372 if is_latest:
2373 license_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
2374 elif is_latest_in_slot:
2375 - license_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], " ".join(sorted(missing_licenses))))
2376 + license_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.slot, " ".join(sorted(missing_licenses))))
2377 else:
2378 license_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
2379
2380 @@ -6308,27 +6551,27 @@ class depgraph(object):
2381 settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
2382
2383 if len(roots) > 1:
2384 - writemsg_stdout("\nFor %s:\n" % abs_user_config, noiselevel=-1)
2385 + writemsg("\nFor %s:\n" % abs_user_config, noiselevel=-1)
2386
2387 if root in unstable_keyword_msg:
2388 - writemsg_stdout("\nThe following " + colorize("BAD", "keyword changes") + \
2389 + writemsg("\nThe following " + colorize("BAD", "keyword changes") + \
2390 " are necessary to proceed:\n", noiselevel=-1)
2391 - writemsg_stdout(format_msg(unstable_keyword_msg[root]), noiselevel=-1)
2392 + writemsg(format_msg(unstable_keyword_msg[root]), noiselevel=-1)
2393
2394 if root in p_mask_change_msg:
2395 - writemsg_stdout("\nThe following " + colorize("BAD", "mask changes") + \
2396 + writemsg("\nThe following " + colorize("BAD", "mask changes") + \
2397 " are necessary to proceed:\n", noiselevel=-1)
2398 - writemsg_stdout(format_msg(p_mask_change_msg[root]), noiselevel=-1)
2399 + writemsg(format_msg(p_mask_change_msg[root]), noiselevel=-1)
2400
2401 if root in use_changes_msg:
2402 - writemsg_stdout("\nThe following " + colorize("BAD", "USE changes") + \
2403 + writemsg("\nThe following " + colorize("BAD", "USE changes") + \
2404 " are necessary to proceed:\n", noiselevel=-1)
2405 - writemsg_stdout(format_msg(use_changes_msg[root]), noiselevel=-1)
2406 + writemsg(format_msg(use_changes_msg[root]), noiselevel=-1)
2407
2408 if root in license_msg:
2409 - writemsg_stdout("\nThe following " + colorize("BAD", "license changes") + \
2410 + writemsg("\nThe following " + colorize("BAD", "license changes") + \
2411 " are necessary to proceed:\n", noiselevel=-1)
2412 - writemsg_stdout(format_msg(license_msg[root]), noiselevel=-1)
2413 + writemsg(format_msg(license_msg[root]), noiselevel=-1)
2414
2415 protect_obj = {}
2416 if write_to_file:
2417 @@ -6375,7 +6618,7 @@ class depgraph(object):
2418 for line in msg:
2419 if line:
2420 line = colorize("INFORM", line)
2421 - writemsg_stdout(line + "\n", noiselevel=-1)
2422 + writemsg(line + "\n", noiselevel=-1)
2423
2424 if ask and write_to_file and file_to_write_to:
2425 prompt = "\nWould you like to add these " + \
2426 @@ -6407,14 +6650,14 @@ class depgraph(object):
2427 file_to_write_to.get((abs_user_config, "package.license")))
2428
2429 if problems:
2430 - writemsg_stdout("\nThe following problems occurred while writing autounmask changes:\n", \
2431 + writemsg("\nThe following problems occurred while writing autounmask changes:\n", \
2432 noiselevel=-1)
2433 - writemsg_stdout("".join(problems), noiselevel=-1)
2434 + writemsg("".join(problems), noiselevel=-1)
2435 elif write_to_file and roots:
2436 - writemsg_stdout("\nAutounmask changes successfully written. Remember to run dispatch-conf.\n", \
2437 + writemsg("\nAutounmask changes successfully written. Remember to run dispatch-conf.\n", \
2438 noiselevel=-1)
2439 elif not pretend and not autounmask_write and roots:
2440 - writemsg_stdout("\nUse --autounmask-write to write changes to config files (honoring CONFIG_PROTECT).\n", \
2441 + writemsg("\nUse --autounmask-write to write changes to config files (honoring CONFIG_PROTECT).\n", \
2442 noiselevel=-1)
2443
2444
2445 @@ -6425,35 +6668,8 @@ class depgraph(object):
2446 the merge list where it is most likely to be seen, but if display()
2447 is not going to be called then this method should be called explicitly
2448 to ensure that the user is notified of problems with the graph.
2449 -
2450 - All output goes to stderr, except for unsatisfied dependencies which
2451 - go to stdout for parsing by programs such as autounmask.
2452 """
2453
2454 - # Note that show_masked_packages() sends its output to
2455 - # stdout, and some programs such as autounmask parse the
2456 - # output in cases when emerge bails out. However, when
2457 - # show_masked_packages() is called for installed packages
2458 - # here, the message is a warning that is more appropriate
2459 - # to send to stderr, so temporarily redirect stdout to
2460 - # stderr. TODO: Fix output code so there's a cleaner way
2461 - # to redirect everything to stderr.
2462 - sys.stdout.flush()
2463 - sys.stderr.flush()
2464 - stdout = sys.stdout
2465 - try:
2466 - sys.stdout = sys.stderr
2467 - self._display_problems()
2468 - finally:
2469 - sys.stdout = stdout
2470 - sys.stdout.flush()
2471 - sys.stderr.flush()
2472 -
2473 - # This goes to stdout for parsing by programs like autounmask.
2474 - for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
2475 - self._show_unsatisfied_dep(*pargs, **kwargs)
2476 -
2477 - def _display_problems(self):
2478 if self._dynamic_config._circular_deps_for_display is not None:
2479 self._show_circular_deps(
2480 self._dynamic_config._circular_deps_for_display)
2481 @@ -6572,6 +6788,9 @@ class depgraph(object):
2482 show_mask_docs()
2483 writemsg("\n", noiselevel=-1)
2484
2485 + for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
2486 + self._show_unsatisfied_dep(*pargs, **kwargs)
2487 +
2488 def saveNomergeFavorites(self):
2489 """Find atoms in favorites that are not in the mergelist and add them
2490 to the world file if necessary."""
2491 @@ -6618,6 +6837,9 @@ class depgraph(object):
2492 continue
2493 if arg.root_config.root != root_config.root:
2494 continue
2495 + if arg.internal:
2496 + # __auto_* sets
2497 + continue
2498 k = arg.name
2499 if k in ("selected", "world") or \
2500 not root_config.sets[k].world_candidate:
2501 @@ -6629,9 +6851,13 @@ class depgraph(object):
2502 all_added.extend(added_favorites)
2503 all_added.sort()
2504 for a in all_added:
2505 + if a.startswith(SETPREFIX):
2506 + filename = "world_sets"
2507 + else:
2508 + filename = "world"
2509 writemsg_stdout(
2510 - ">>> Recording %s in \"world\" favorites file...\n" % \
2511 - colorize("INFORM", str(a)), noiselevel=-1)
2512 + ">>> Recording %s in \"%s\" favorites file...\n" %
2513 + (colorize("INFORM", _unicode(a)), filename), noiselevel=-1)
2514 if all_added:
2515 world_set.update(all_added)
2516
2517 @@ -7010,13 +7236,8 @@ class _dep_check_composite_db(dbapi):
2518 return ret[:]
2519
2520 def _visible(self, pkg):
2521 - if pkg.installed and "selective" not in self._depgraph._dynamic_config.myparams:
2522 - try:
2523 - arg = next(self._depgraph._iter_atoms_for_pkg(pkg))
2524 - except (StopIteration, portage.exception.InvalidDependString):
2525 - arg = None
2526 - if arg:
2527 - return False
2528 + if pkg.installed and not self._depgraph._want_installed_pkg(pkg):
2529 + return False
2530 if pkg.installed and \
2531 (pkg.masks or not self._depgraph._pkg_visibility_check(pkg)):
2532 # Account for packages with masks (like KEYWORDS masks)
2533 @@ -7032,24 +7253,8 @@ class _dep_check_composite_db(dbapi):
2534 if not avoid_update:
2535 if not use_ebuild_visibility and usepkgonly:
2536 return False
2537 - else:
2538 - try:
2539 - pkg_eb = self._depgraph._pkg(
2540 - pkg.cpv, "ebuild", pkg.root_config,
2541 - myrepo=pkg.repo)
2542 - except portage.exception.PackageNotFound:
2543 - pkg_eb_visible = False
2544 - for pkg_eb in self._depgraph._iter_match_pkgs(
2545 - pkg.root_config, "ebuild",
2546 - Atom("=%s" % (pkg.cpv,))):
2547 - if self._depgraph._pkg_visibility_check(pkg_eb):
2548 - pkg_eb_visible = True
2549 - break
2550 - if not pkg_eb_visible:
2551 - return False
2552 - else:
2553 - if not self._depgraph._pkg_visibility_check(pkg_eb):
2554 - return False
2555 + elif not self._depgraph._equiv_ebuild_visible(pkg):
2556 + return False
2557
2558 in_graph = self._depgraph._dynamic_config._slot_pkg_map[
2559 self._root].get(pkg.slot_atom)
2560 @@ -7329,8 +7534,6 @@ def get_mask_info(root_config, cpv, pkgsettings,
2561 mreasons = ["corruption"]
2562 else:
2563 eapi = metadata['EAPI']
2564 - if eapi[:1] == '-':
2565 - eapi = eapi[1:]
2566 if not portage.eapi_is_supported(eapi):
2567 mreasons = ['EAPI %s' % eapi]
2568 else:
2569 @@ -7387,10 +7590,11 @@ def show_masked_packages(masked_packages):
2570 # above via mreasons.
2571 pass
2572
2573 - writemsg_stdout("- "+output_cpv+" (masked by: "+", ".join(mreasons)+")\n", noiselevel=-1)
2574 + writemsg("- "+output_cpv+" (masked by: "+", ".join(mreasons)+")\n",
2575 + noiselevel=-1)
2576
2577 if comment and comment not in shown_comments:
2578 - writemsg_stdout(filename + ":\n" + comment + "\n",
2579 + writemsg(filename + ":\n" + comment + "\n",
2580 noiselevel=-1)
2581 shown_comments.add(comment)
2582 portdb = root_config.trees["porttree"].dbapi
2583 @@ -7400,13 +7604,14 @@ def show_masked_packages(masked_packages):
2584 continue
2585 msg = ("A copy of the '%s' license" + \
2586 " is located at '%s'.\n\n") % (l, l_path)
2587 - writemsg_stdout(msg, noiselevel=-1)
2588 + writemsg(msg, noiselevel=-1)
2589 shown_licenses.add(l)
2590 return have_eapi_mask
2591
2592 def show_mask_docs():
2593 - writemsg_stdout("For more information, see the MASKED PACKAGES section in the emerge\n", noiselevel=-1)
2594 - writemsg_stdout("man page or refer to the Gentoo Handbook.\n", noiselevel=-1)
2595 + writemsg("For more information, see the MASKED PACKAGES "
2596 + "section in the emerge\n", noiselevel=-1)
2597 + writemsg("man page or refer to the Gentoo Handbook.\n", noiselevel=-1)
2598
2599 def show_blocker_docs_link():
2600 writemsg("\nFor more information about " + bad("Blocked Packages") + ", please refer to the following\n", noiselevel=-1)
2601
2602 diff --git a/gobs/pym/jobs.py b/gobs/pym/jobs.py
2603 index 292d57d..6b5340b 100644
2604 --- a/gobs/pym/jobs.py
2605 +++ b/gobs/pym/jobs.py
2606 @@ -9,7 +9,7 @@ from gobs.ConnectionManager import connectionManager
2607 CM=connectionManager(gobs_settings_dict)
2608 #selectively import the pgsql/mysql querys
2609 if CM.getName()=='pgsql':
2610 - from gobs.pgsql import *
2611 + from gobs.pgsql_querys import *
2612
2613 from gobs.sync import git_pull, sync_tree
2614 from gobs.buildquerydb import add_buildquery_main, del_buildquery_main
2615 @@ -17,75 +17,77 @@ from gobs.updatedb import update_db_main
2616
2617 def jobs_main(config_profile):
2618 conn = CM.getConnection()
2619 - job = check_job_list(conn, config_profile)
2620 - if job is None:
2621 + jobs_id = get_jobs_id(conn, config_profile)
2622 + if jobs_id is None:
2623 CM.putConnection(conn)
2624 - return
2625 - log_msg = "Job: %s Type: %s" % (job[1], job[0],)
2626 - add_gobs_logs(conn, log_msg, "info", config_profile)
2627 - if job[0] == "addbuildquery":
2628 - update_job_list(conn, "Runing", job[1])
2629 - log_msg = "Job %s is runing." % (job[1],)
2630 - add_gobs_logs(conn, log_msg, "info", config_profile)
2631 - result = add_buildquery_main(config_profile)
2632 - if result is True:
2633 - update_job_list(conn, "Done", job[1])
2634 - log_msg = "Job %s is done.." % (job[1],)
2635 + return
2636 + for job_id in jobs_id:
2637 + job = get_job(conn, job_id)
2638 + log_msg = "Job: %s Type: %s" % (job_id, job,)
2639 + add_gobs_logs(conn, log_msg, "info", config_profile)
2640 + if job == "addbuildquery":
2641 + update_job_list(conn, "Runing", job_id)
2642 + log_msg = "Job %s is runing." % (job_id,)
2643 add_gobs_logs(conn, log_msg, "info", config_profile)
2644 - else:
2645 - update_job_list(conn, "Fail", job[1])
2646 - log_msg = "Job %s did fail." % (job[1],)
2647 + result = add_buildquery_main(config_profile)
2648 + if result is True:
2649 + update_job_list(conn, "Done", job_id)
2650 + log_msg = "Job %s is done.." % (job_id,)
2651 + add_gobs_logs(conn, log_msg, "info", config_profile)
2652 + else:
2653 + update_job_list(conn, "Fail", job_id)
2654 + log_msg = "Job %s did fail." % (job_id,)
2655 + add_gobs_logs(conn, log_msg, "info", config_profile)
2656 + elif job == "delbuildquery":
2657 + update_job_list(conn, "Runing", job_id)
2658 + log_msg = "Job %s is runing." % (job_id,)
2659 add_gobs_logs(conn, log_msg, "info", config_profile)
2660 - elif job[0] == "delbuildquery":
2661 - update_job_list(conn, "Runing", job[1])
2662 - log_msg = "Job %s is runing." % (job[1],)
2663 - add_gobs_logs(conn, log_msg, "info", config_profile)
2664 - result = del_buildquery_main(config_profile)
2665 - if result is True:
2666 - update_job_list(conn, "Done", job[1])
2667 - log_msg = "Job %s is done.." % (job[1],)
2668 + result = del_buildquery_main(config_profile)
2669 + if result is True:
2670 + update_job_list(conn, "Done", job_id)
2671 + log_msg = "Job %s is done.." % (job_id,)
2672 + add_gobs_logs(conn, log_msg, "info", config_profile)
2673 + else:
2674 + update_job_list(conn, "Fail", job_id)
2675 + log_msg = "Job %s did fail." % (job_id,)
2676 + add_gobs_logs(conn, log_msg, "info", config_profile)
2677 + elif job == "gitsync":
2678 + update_job_list(conn, "Runing", job_id)
2679 + log_msg = "Job %s is runing." % (job_id,)
2680 add_gobs_logs(conn, log_msg, "info", config_profile)
2681 - else:
2682 - update_job_list(conn, "Fail", job[1])
2683 - log_msg = "Job %s did fail." % (job[1],)
2684 + result = git_pull()
2685 + if result is True:
2686 + update_job_list(conn, "Done", job_id)
2687 + log_msg = "Job %s is done.." % (job[1],)
2688 + add_gobs_logs(conn, log_msg, "info", config_profile)
2689 + else:
2690 + update_job_list(conn, "Fail", job_id)
2691 + log_msg = "Job %s did fail." % (job_id,)
2692 + add_gobs_logs(conn, log_msg, "info", config_profile)
2693 + elif job == "emergesync":
2694 + update_job_list(conn, "Runing", job_id)
2695 + log_msg = "Job %s is runing." % (job_id,)
2696 add_gobs_logs(conn, log_msg, "info", config_profile)
2697 - elif job[0] == "gitsync":
2698 - update_job_list(conn, "Runing", job[1])
2699 - log_msg = "Job %s is runing." % (job[1],)
2700 - add_gobs_logs(conn, log_msg, "info", config_profile)
2701 - result = git_pull()
2702 - if result is True:
2703 - update_job_list(conn, "Done", job[1])
2704 - log_msg = "Job %s is done.." % (job[1],)
2705 + result = sync_tree()
2706 + if result is True:
2707 + update_job_list(conn, "Done", job_id)
2708 + log_msg = "Job %s is done.." % (job_id,)
2709 + add_gobs_logs(conn, log_msg, "info", config_profile)
2710 + else:
2711 + update_job_list(conn, "Fail", job_id)
2712 + log_msg = "Job %s did fail." % (job_id,)
2713 + add_gobs_logs(conn, log_msg, "info", config_profile)
2714 + elif job == "updatedb":
2715 + update_job_list(conn, "Runing", job_id)
2716 + log_msg = "Job %s is runing." % (job_id,)
2717 add_gobs_logs(conn, log_msg, "info", config_profile)
2718 - else:
2719 - update_job_list(conn, "Fail", job[1])
2720 - log_msg = "Job %s did fail." % (job[1],)
2721 - add_gobs_logs(conn, log_msg, "info", config_profile)
2722 - elif job[0] == "emergesync":
2723 - update_job_list(conn, "Runing", job[1])
2724 - log_msg = "Job %s is runing." % (job[1],)
2725 - add_gobs_logs(conn, log_msg, "info", config_profile)
2726 - result = sync_tree()
2727 - if result is True:
2728 - update_job_list(conn, "Done", job[1])
2729 - log_msg = "Job %s is done.." % (job[1],)
2730 - add_gobs_logs(conn, log_msg, "info", config_profile)
2731 - else:
2732 - update_job_list(conn, "Fail", job[1])
2733 - log_msg = "Job %s did fail." % (job[1],)
2734 - add_gobs_logs(conn, log_msg, "info", config_profile)
2735 - elif job[0] == "updatedb":
2736 - update_job_list(conn, "Runing", job[1])
2737 - log_msg = "Job %s is runing." % (job[1],)
2738 - add_gobs_logs(conn, log_msg, "info", config_profile)
2739 - result = update_db_main()
2740 - if result is True:
2741 - update_job_list(conn, "Done", job[1])
2742 - log_msg = "Job %s is done.." % (job[1],)
2743 - add_gobs_logs(conn, log_msg, "info", config_profile)
2744 - else:
2745 - update_job_list(conn, "Fail", job[1])
2746 - log_msg = "Job %s did fail." % (job[1],)
2747 - add_gobs_logs(conn, log_msg, "info", config_profile)
2748 - return
2749 \ No newline at end of file
2750 + result = update_db_main()
2751 + if result is True:
2752 + update_job_list(conn, "Done", job_id)
2753 + log_msg = "Job %s is done.." % (job_id,)
2754 + add_gobs_logs(conn, log_msg, "info", config_profile)
2755 + else:
2756 + update_job_list(conn, "Fail", job_id)
2757 + log_msg = "Job %s did fail." % (job_id,)
2758 + add_gobs_logs(conn, log_msg, "info", config_profile)
2759 + return
2760 \ No newline at end of file
2761
2762 diff --git a/gobs/pym/old_cpv.py b/gobs/pym/old_cpv.py
2763 index 93af29f..3d47c50 100644
2764 --- a/gobs/pym/old_cpv.py
2765 +++ b/gobs/pym/old_cpv.py
2766 @@ -16,17 +16,21 @@ class gobs_old_cpv(object):
2767 self._mysettings = mysettings
2768 self._myportdb = myportdb
2769
2770 - def mark_old_ebuild_db(self, categories, package, package_id):
2771 + def mark_old_ebuild_db(self, package_id):
2772 conn=CM.getConnection()
2773 - ebuild_list_tree = sorted(self._myportdb.cp_list((categories + "/" + package), use_cache=1, mytree=None))
2774 + # Get the ebuild list for cp
2775 + cp, repo = get_cp_repo_from_package_id(conn, package_id)
2776 + mytree = []
2777 + mytree.append(self._myportdb.getRepositoryPath(repo))
2778 + ebuild_list_tree = self._myportdb.cp_list((cp, use_cache=1, mytree=mytree)
2779 # Get ebuild list on categories, package in the db
2780 - ebuild_list_db = cp_list_db(conn,package_id)
2781 + ebuild_list_db = cp_list_db(conn, package_id)
2782 # Check if don't have the ebuild in the tree
2783 # Add it to the no active list
2784 old_ebuild_list = []
2785 for ebuild_line in ebuild_list_db:
2786 - ebuild_line_db = categories + "/" + package + "-" + ebuild_line[0]
2787 - if not ebuild_line_db in ebuild_list_tree:
2788 + cpv_db = cp + "-" + ebuild_line[0]
2789 + if not cpv_db in ebuild_list_tree:
2790 old_ebuild_list.append(ebuild_line)
2791 # Set no active on ebuilds in the db that no longer in tree
2792 if old_ebuild_list != []:
2793
2794 diff --git a/gobs/pym/package.py b/gobs/pym/package.py
2795 index 771572f..cb6a1f1 100644
2796 --- a/gobs/pym/package.py
2797 +++ b/gobs/pym/package.py
2798 @@ -14,8 +14,10 @@ config_profile = gobs_settings_dict['gobs_config']
2799 from gobs.ConnectionManager import connectionManager
2800 CM=connectionManager(gobs_settings_dict)
2801 #selectively import the pgsql/mysql querys
2802 -if CM.getName()=='pgsql':
2803 - from gobs.pgsql import *
2804 +iif CM.getName()=='pgsql':
2805 + from gobs.pgsql_querys import *
2806 +if CM.getName()=='mysql':
2807 + from gobs.mysql_querys import *
2808
2809 class gobs_package(object):
2810
2811 @@ -23,113 +25,123 @@ class gobs_package(object):
2812 self._mysettings = mysettings
2813 self._myportdb = myportdb
2814
2815 - def change_config(self, config_id):
2816 - # Change config_root config_id = table configs.id
2817 - my_new_setup = "/var/cache/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/" + config_id + "/"
2818 - mysettings_setup = portage.config(config_root = my_new_setup)
2819 - return mysettings_setup
2820 + def change_config(self, config_setup):
2821 + # Change config_root config_setup = table config
2822 + my_new_setup = "/var/cache/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/" + config_setup + "/"
2823 + mysettings_setup = portage.config(config_root = my_new_setup)
2824 + return mysettings_setup
2825
2826 - def config_match_ebuild(self, categories, package, config_list):
2827 + def config_match_ebuild(self, cp, config_id_list):
2828 config_cpv_listDict ={}
2829 - if config_list == []:
2830 - return config_cpv_listDict
2831 - for config_id in config_list:
2832 + if config_id_list == []:
2833 + return config_cpv_listDict
2834 + conn=CM.getConnection()
2835 + for config_id in config_id_list:
2836 # Change config/setup
2837 - mysettings_setup = self.change_config(config_id)
2838 - myportdb_setup = portage.portdbapi(mysettings=mysettings_setup)
2839 - # Get latest cpv from portage with the config
2840 - latest_ebuild = myportdb_setup.xmatch('bestmatch-visible', categories + "/" + package)
2841 - latest_ebuild_version = unicode("")
2842 - # Check if could get cpv from portage
2843 - if latest_ebuild != "":
2844 - # Get the version of cpv
2845 - latest_ebuild_version = portage.versions.cpv_getversion(latest_ebuild)
2846 - # Get the iuse and use flags for that config/setup
2847 - init_useflags = gobs_use_flags(mysettings_setup, myportdb_setup, latest_ebuild)
2848 - iuse_flags_list, final_use_list = init_useflags.get_flags()
2849 - iuse_flags_list2 = []
2850 - for iuse_line in iuse_flags_list:
2851 - iuse_flags_list2.append( init_useflags.reduce_flag(iuse_line))
2852 - # Dic the needed info
2853 - attDict = {}
2854 - attDict['ebuild_version'] = latest_ebuild_version
2855 - attDict['useflags'] = final_use_list
2856 - attDict['iuse'] = iuse_flags_list2
2857 - attDict['package'] = package
2858 - attDict['categories'] = categories
2859 - config_cpv_listDict[config_id] = attDict
2860 - # Clean some cache
2861 - myportdb_setup.close_caches()
2862 - portage.portdbapi.portdbapi_instances.remove(myportdb_setup)
2863 - return config_cpv_listDict
2864 -
2865 - def get_ebuild_metadata(self, ebuild_line):
2866 - # Get the auxdbkeys infos for the ebuild
2867 - try:
2868 - ebuild_auxdb_list = self._myportdb.aux_get(ebuild_line, portage.auxdbkeys)
2869 - except:
2870 - ebuild_auxdb_list = []
2871 - else:
2872 - for i in range(len(ebuild_auxdb_list)):
2873 - if ebuild_auxdb_list[i] == '':
2874 - ebuild_auxdb_list[i] = ''
2875 - return ebuild_auxdb_list
2876 -
2877 - def get_packageDict(self, pkgdir, ebuild_line, categories, package, config_id):
2878 - attDict = {}
2879 - ebuild_version_tree = portage.versions.cpv_getversion(ebuild_line)
2880 - ebuild_version_checksum_tree = portage.checksum.sha256hash(pkgdir + "/" + package + "-" + ebuild_version_tree + ".ebuild")[0]
2881 - ebuild_version_text = get_ebuild_text(pkgdir + "/" + package + "-" + ebuild_version_tree + ".ebuild")
2882 - init_repoman = gobs_repoman(self._mysettings, self._myportdb)
2883 - repoman_error = init_repoman.check_repoman(categories, package, ebuild_version_tree, config_id)
2884 - ebuild_version_metadata_tree = self.get_ebuild_metadata(ebuild_line)
2885 - # if there some error to get the metadata we add rubish to the
2886 - # ebuild_version_metadata_tree and set ebuild_version_checksum_tree to 0
2887 - # so it can be updated next time we update the db
2888 - if ebuild_version_metadata_tree == []:
2889 - log_msg = " QA: %s Have broken metadata" % (ebuild_line,)
2890 - add_gobs_logs(conn, log_msg, "info", config_profile)
2891 - ebuild_version_metadata_tree = ['','','','','','','','','','','','','','','','','','','','','','','','','']
2892 - ebuild_version_checksum_tree = ['0']
2893 - # add the ebuild to the dict packages
2894 - attDict['categories'] = categories
2895 - attDict['package'] = package
2896 - attDict['ebuild_version_tree'] = ebuild_version_tree
2897 - attDict['ebuild_version_checksum_tree']= ebuild_version_checksum_tree
2898 - attDict['ebuild_version_metadata_tree'] = ebuild_version_metadata_tree
2899 - attDict['ebuild_version_text'] = ebuild_version_text[0]
2900 - attDict['ebuild_version_revision'] = ebuild_version_text[1]
2901 - attDict['ebuild_error'] = repoman_error
2902 - return attDict
2903 -
2904 - def get_metadataDict(self, packageDict, ebuild_id_list):
2905 - # Make the metadataDict from packageDict
2906 - ebuild_i = 0
2907 - metadataDict ={}
2908 - for k, v in packageDict.iteritems():
2909 - attDict = {}
2910 - metadata_restrictions = []
2911 - for i in v['ebuild_version_metadata_tree'][4].split():
2912 - metadata_restrictions.append(i)
2913 - metadata_keyword = []
2914 - for i in v['ebuild_version_metadata_tree'][8].split():
2915 - metadata_keyword.append(i)
2916 - metadata_iuse = []
2917 - for i in v['ebuild_version_metadata_tree'][10].split():
2918 - metadata_iuse.append(i)
2919 - attDict['restrictions'] = metadata_restrictions
2920 - attDict['keyword'] = metadata_keyword
2921 - attDict['iuse'] = metadata_iuse
2922 - metadataDict[ebuild_id_list[ebuild_i]] = attDict
2923 - ebuild_i = ebuild_i +1
2924 - return metadataDict
2925 + for config_id in config_id_list:
2926 +
2927 + # Change config/setup
2928 + config_setup = get_config_db(conn, config_id)
2929 + mysettings_setup = self.change_config(config_setup)
2930 + myportdb_setup = portage.portdbapi(mysettings=mysettings_setup)
2931 +
2932 + # Get the latest cpv from portage with the config that we can build
2933 + build_cpv = myportdb_setup.xmatch('bestmatch-visible', cp)
2934 +
2935 + # Check if could get cpv from portage and add it to the config_cpv_listDict.
2936 + if build_cpv != "":
2937 +
2938 + # Get the iuse and use flags for that config/setup and cpv
2939 + init_useflags = gobs_use_flags(mysettings_setup, myportdb_setup, build_cpv)
2940 + iuse_flags_list, final_use_list = init_useflags.get_flags()
2941 + iuse_flags_list2 = []
2942 + for iuse_line in iuse_flags_list:
2943 + iuse_flags_list2.append( init_useflags.reduce_flag(iuse_line))
2944 +
2945 + # Dict the needed info
2946 + attDict = {}
2947 + attDict['cpv'] = build_cpv
2948 + attDict['useflags'] = final_use_list
2949 + attDict['iuse'] = iuse_flags_list2
2950 + config_cpv_listDict[config_id] = attDict
2951 +
2952 + # Clean some cache
2953 + myportdb_setup.close_caches()
2954 + portage.portdbapi.portdbapi_instances.remove(myportdb_setup)
2955 + CM.putConnection(conn)
2956 + return config_cpv_listDict
2957 +
2958 + def get_ebuild_metadata(self, cpv, repo):
2959 + # Get the auxdbkeys infos for the ebuild
2960 + try:
2961 + ebuild_auxdb_list = self._myportdb.aux_get(cpv, portage.auxdbkeys, myrepo=repo)
2962 + except:
2963 + ebuild_auxdb_list = []
2964 + else:
2965 + for i in range(len(ebuild_auxdb_list)):
2966 + if ebuild_auxdb_list[i] == '':
2967 + ebuild_auxdb_list[i] = ''
2968 + return ebuild_auxdb_list
2969 +
2970 + def get_packageDict(self, pkgdir, cpv, repo, config_id):
2971 + attDict = {}
2972 + conn=CM.getConnection()
2973 +
2974 + #Get categories, package and version from cpv
2975 + ebuild_version_tree = portage.versions.cpv_getversion(cpv)
2976 + element = portage.versions.cpv_getkey(cpv).split('/')
2977 + categories = element[0]
2978 + package = element[1]
2979 +
2980 + # Make a checksum of the ebuild
2981 + try:
2982 + ebuild_version_checksum_tree = portage.checksum.sha256hash(pkgdir + "/" + package + "-" + ebuild_version_tree + ".ebuild")[0]
2983 + except:
2984 + ebuild_version_checksum_tree = "0"
2985 + log_msg = "QA: Can't checksum the ebuild file. %s on repo %s" % (cpv, repo,)
2986 + add_gobs_logs(conn, log_msg, "info", config_profile)
2987 + log_msg = "C %s:%s ... Fail." % (cpv, repo)
2988 + add_gobs_logs(conn, log_msg, "info", config_profile)
2989 + ebuild_version_text_tree = '0'
2990 + else:
2991 + ebuild_version_text_tree = get_ebuild_text(pkgdir + "/" + package + "-" + ebuild_version_tree + ".ebuild")
2992 +
2993 + # run repoman on the ebuild
2994 + #init_repoman = gobs_repoman(self._mysettings, self._myportdb)
2995 + #repoman_error = init_repoman.check_repoman(pkgdir, cpv, config_id)
2996 + #if repoman_error != []:
2997 + # log_msg = "Repoman: %s have errors on repo %s" % (cpv, repo,)
2998 + # add_gobs_logs(conn, log_msg, "info", config_profile)
2999 + repoman_error = []
3000 +
3001 + # Get the ebuild metadata
3002 + ebuild_version_metadata_tree = self.get_ebuild_metadata(cpv, repo)
3003 + # if there some error to get the metadata we add rubish to the
3004 + # ebuild_version_metadata_tree and set ebuild_version_checksum_tree to 0
3005 + # so it can be updated next time we update the db
3006 + if ebuild_version_metadata_tree == []:
3007 + log_msg = " QA: %s have broken metadata on repo %s" % (cpv, repo)
3008 + add_gobs_logs(conn, log_msg, "info", config_profile)
3009 + ebuild_version_metadata_tree = ['','','','','','','','','','','','','','','','','','','','','','','','','']
3010 + ebuild_version_checksum_tree = '0'
3011 +
3012 + # add the ebuild info to the dict packages
3013 + attDict['repo'] = repo
3014 + attDict['ebuild_version_tree'] = ebuild_version_tree
3015 + attDict['ebuild_version_checksum_tree']= ebuild_version_checksum_tree
3016 + attDict['ebuild_version_metadata_tree'] = ebuild_version_metadata_tree
3017 + #attDict['ebuild_version_text_tree'] = ebuild_version_text_tree[0]
3018 + attDict['ebuild_version_revision_tree'] = ebuild_version_text_tree[1]
3019 + attDict['ebuild_error'] = repoman_error
3020 + CM.putConnection(conn)
3021 + return attDict
3022
3023 def add_new_ebuild_buildquery_db(self, ebuild_id_list, packageDict, config_cpv_listDict):
3024 conn=CM.getConnection()
3025 # Get the needed info from packageDict and config_cpv_listDict and put that in buildqueue
3026 # Only add it if ebuild_version in packageDict and config_cpv_listDict match
3027 if config_cpv_listDict is not None:
3028 - message = None
3029 + message = []
3030 # Unpack config_cpv_listDict
3031 for k, v in config_cpv_listDict.iteritems():
3032 config_id = k
3033 @@ -147,183 +159,198 @@ class gobs_package(object):
3034 i = 0
3035 for k, v in packageDict.iteritems():
3036 ebuild_id = ebuild_id_list[i]
3037 - use_flags_list = []
3038 - use_enable_list = []
3039 - for u, s in use_flagsDict.iteritems():
3040 - use_flags_list.append(u)
3041 - use_enable_list.append(s)
3042 - # Comper ebuild_version and add the ebuild_version to buildqueue
3043 - if portage.vercmp(v['ebuild_version_tree'], latest_ebuild_version) == 0:
3044 - add_new_package_buildqueue(conn,ebuild_id, config_id, use_flags_list, use_enable_list, message)
3045 - # B = Build cpv use-flags config
3046 - log_msg = "B %s/%s-%s USE: %s %s" % \
3047 - (v['categories'], v['package'], latest_ebuild_version, use_enable, config_id,)
3048 - add_gobs_logs(conn, log_msg, "info", config_profile)
3049 +
3050 + # Comper and add the cpv to buildqueue
3051 + if build_cpv == k:
3052 + add_new_package_buildqueue(conn, ebuild_id, config_id, use_flagsDict, messages)
3053 +
3054 + # B = Build cpv use-flags config
3055 + config_setup = get_config_db(conn, config_id)
3056 +
3057 + # FIXME log_msg need a fix to log the use flags corect.
3058 + log_msg = "B %s:%s USE: %s %s" % \
3059 + (k, v['repo'], use_enable, config_setup,)
3060 + add_gobs_logs(conn, log_msg, "info", config_profile)
3061 i = i +1
3062 CM.putConnection(conn)
3063
3064 def get_package_metadataDict(self, pkgdir, package):
3065 - # Make package_metadataDict
3066 - attDict = {}
3067 - package_metadataDict = {}
3068 - changelog_checksum_tree = portage.checksum.sha256hash(pkgdir + "/ChangeLog")
3069 - changelog_text_tree = get_file_text(pkgdir + "/ChangeLog")
3070 - metadata_xml_checksum_tree = portage.checksum.sha256hash(pkgdir + "/metadata.xml")
3071 - metadata_xml_text_tree = get_file_text(pkgdir + "/metadata.xml")
3072 - attDict['changelog_checksum'] = changelog_checksum_tree[0]
3073 - attDict['changelog_text'] = changelog_text_tree
3074 - attDict['metadata_xml_checksum'] = metadata_xml_checksum_tree[0]
3075 - attDict[' metadata_xml_text'] = metadata_xml_text_tree
3076 - package_metadataDict[package] = attDict
3077 - return package_metadataDict
3078 -
3079 - def add_new_package_db(self, categories, package):
3080 + # Make package_metadataDict
3081 + attDict = {}
3082 + package_metadataDict = {}
3083 + changelog_checksum_tree = portage.checksum.sha256hash(pkgdir + "/ChangeLog")
3084 + changelog_text_tree = get_file_text(pkgdir + "/ChangeLog")
3085 + metadata_xml_checksum_tree = portage.checksum.sha256hash(pkgdir + "/metadata.xml")
3086 + metadata_xml_text_tree = get_file_text(pkgdir + "/metadata.xml")
3087 + attDict['changelog_checksum'] = changelog_checksum_tree[0]
3088 + attDict['changelog_text'] = changelog_text_tree
3089 + attDict['metadata_xml_checksum'] = metadata_xml_checksum_tree[0]
3090 + attDict['metadata_xml_text'] = metadata_xml_text_tree
3091 + package_metadataDict[package] = attDict
3092 + return package_metadataDict
3093 +
3094 + def add_new_package_db(self, categories, package, repo):
3095 conn=CM.getConnection()
3096 - # add new categories package ebuild to tables package and ebuilds
3097 - # C = Checking
3098 - # N = New Package
3099 - log_msg = "C %s/%s" % (categories, package,)
3100 - add_gobs_logs(conn, log_msg, "info", config_profile)
3101 - log_msg = "N %s/%s" % (categories, package,)
3102 - add_gobs_logs(conn, log_msg, "info", config_profile)
3103 - pkgdir = self._mysettings['PORTDIR'] + "/" + categories + "/" + package # Get PORTDIR + cp
3104 - categories_dir = self._mysettings['PORTDIR'] + "/" + categories + "/"
3105 - # Get the ebuild list for cp
3106 - ebuild_list_tree = self._myportdb.cp_list((categories + "/" + package), use_cache=1, mytree=None)
3107 - if ebuild_list_tree == []:
3108 - CM.putConnection(conn)
3109 - return
3110 - config_list = get_config_list(conn)
3111 - config_cpv_listDict = self.config_match_ebuild(categories, package, config_list)
3112 - config_id = get_default_config(conn)
3113 - packageDict ={}
3114 - for ebuild_line in sorted(ebuild_list_tree):
3115 - # Make the needed packageDict
3116 - packageDict[ebuild_line] = self.get_packageDict(pkgdir, ebuild_line, categories, package, config_id)
3117 - # Add the ebuild to db
3118 - return_id = add_new_package_sql(conn,packageDict)
3119 - ebuild_id_list = return_id[0]
3120 - package_id_list = return_id[1]
3121 - package_id = package_id_list[0]
3122 - # Add metadataDict to db
3123 - metadataDict = self.get_metadataDict(packageDict, ebuild_id_list)
3124 - add_new_metadata(conn,metadataDict)
3125 - # Add any qa and repoman erro for the ebuild to buildlog
3126 - qa_error = []
3127 - init_manifest = gobs_manifest(self._mysettings, pkgdir)
3128 - manifest_error = init_manifest.digestcheck()
3129 - if manifest_error is not None:
3130 - qa_error.append(manifest_error)
3131 - log_msg = "QA: %s/%s %s" % (categories, package, qa_error,)
3132 - add_gobs_logs(conn, log_msg, "info", config_profile)
3133 - add_qa_repoman(conn,ebuild_id_list, qa_error, packageDict, config_id)
3134 - # Add the ebuild to the buildqueru table if needed
3135 - self.add_new_ebuild_buildquery_db(ebuild_id_list, packageDict, config_cpv_listDict)
3136 - # Add some checksum on some files
3137 - package_metadataDict = self.get_package_metadataDict(pkgdir, package)
3138 - add_new_package_metadata(conn,package_id, package_metadataDict)
3139 - # Add the manifest file to db
3140 - try:
3141 - manifest_checksum_tree = portage.checksum.sha256hash(pkgdir + "/Manifest")[0]
3142 - except:
3143 - manifest_checksum_tree = "0"
3144 - get_manifest_text = "0"
3145 - log_msg = "QA: Can't checksum the Manifest file. %c/%s" % (categories, package,)
3146 - add_gobs_logs(conn, log_msg, "info", config_profile)
3147 - else:
3148 - get_manifest_text = get_file_text(pkgdir + "/Manifest")
3149 - add_new_manifest_sql(conn,package_id, get_manifest_text, manifest_checksum_tree)
3150 - log_msg = "C %s/%s ... Done." % (categories, package)
3151 - add_gobs_logs(conn, log_msg, "info", config_profile)
3152 - CM.putConnection(conn)
3153 + # Add new categories package ebuild to tables package and ebuilds
3154 + # C = Checking
3155 + # N = New Package
3156 + log_msg = "C %s/%s:%s" % (categories, package, repo)
3157 + add_gobs_logs(conn, log_msg, "info", config_profile)
3158 + log_msg = "N %s/%s:%s" % (categories, package, repo)
3159 + add_gobs_logs(conn, log_msg, "info", config_profile)
3160 + pkgdir = self._myportdb.getRepositoryPath(repo) + "/" + categories + "/" + package # Get RepoDIR + cp
3161 +
3162 + # Get the cp manifest file checksum.
3163 + try:
3164 + manifest_checksum_tree = portage.checksum.sha256hash(pkgdir + "/Manifest")[0]
3165 + except:
3166 + manifest_checksum_tree = "0"
3167 + log_msg = "QA: Can't checksum the Manifest file. %s/%s:%s" % (categories, package, repo,)
3168 + add_gobs_logs(conn, log_msg, "info", config_profile)
3169 + log_msg = "C %s/%s:%s ... Fail." % (categories, package, repo)
3170 + add_gobs_logs(conn, log_msg, "info", config_profile)
3171 + CM.putConnection(conn)
3172 + return
3173 + package_id = add_new_manifest_sql(conn, categories, package, repo, manifest_checksum_tree)
3174 +
3175 + # Get the ebuild list for cp
3176 + mytree = []
3177 + mytree.append(self._myportdb.getRepositoryPath(repo))
3178 + ebuild_list_tree = self._myportdb.cp_list((categories + "/" + package), use_cache=1, mytree=mytree)
3179 + if ebuild_list_tree == []:
3180 + log_msg = "QA: Can't get the ebuilds list. %s/%s:%s" % (categories, package, repo,)
3181 + add_gobs_logs(conn, log_msg, "info", config_profile)
3182 + log_msg = "C %s/%s:%s ... Fail." % (categories, package, repo)
3183 + add_gobs_logs(conn, log_msg, "info", config_profile)
3184 + CM.putConnection(conn)
3185 + return
3186 +
3187 + # set config to default config
3188 + default_config = get_default_config(conn)
3189
3190 - def update_package_db(self, categories, package, package_id):
3191 + # Make the needed packageDict with ebuild infos so we can add it later to the db.
3192 + packageDict ={}
3193 + ebuild_id_list = []
3194 + for cpv in sorted(ebuild_list_tree):
3195 + packageDict[cpv] = self.get_packageDict(pkgdir, cpv, repo, default_config)
3196 +
3197 + # Add new ebuilds to the db
3198 + ebuild_id_list = add_new_ebuild_sql(conn, package_id, packageDict)
3199 +
3200 + # Get the best cpv for the configs and add it to config_cpv_listDict
3201 + configs_id_list = get_config_id_list(conn)
3202 + config_cpv_listDict = self.config_match_ebuild(categories + "/" + package, configs_id_list)
3203 +
3204 + # Add the ebuild to the buildquery table if needed
3205 + self.add_new_ebuild_buildquery_db(ebuild_id_list, packageDict, config_cpv_listDict)
3206 +
3207 + log_msg = "C %s/%s:%s ... Done." % (categories, package, repo)
3208 + add_gobs_logs(conn, log_msg, "info", config_profile)
3209 + print(categories, package, repo)
3210 + CM.putConnection(conn)
3211 +
3212 + def update_package_db(self, package_id):
3213 conn=CM.getConnection()
3214 # Update the categories and package with new info
3215 - # C = Checking
3216 - log_msg = "C %s/%s" % (categories, package,)
3217 - add_gobs_logs(conn, log_msg, "info", config_profile)
3218 - pkgdir = self._mysettings['PORTDIR'] + "/" + categories + "/" + package # Get PORTDIR with cp
3219 - # Get the checksum from the Manifest file.
3220 - try:
3221 - manifest_checksum_tree = portage.checksum.sha256hash(pkgdir + "/Manifest")[0]
3222 - except:
3223 - # We did't fine any Manifest file
3224 - manifest_checksum_tree = '0'
3225 - ebuild_list_tree = self._myportdb.cp_list((categories + "/" + package), use_cache=1, mytree=None)
3226 - if ebuild_list_tree == []:
3227 - CM.putConnection(conn)
3228 - log_msg = "QA: No Manifest file or ebuilds in %s/%s." % (categories, package,)
3229 - add_gobs_logs(conn, log_msg, "info", config_profile)
3230 - log_msg = "C %s/%s ... Done." % (categories, package,)
3231 - add_gobs_logs(conn, log_msg, "info", config_profile)
3232 - return
3233 + # C = Checking
3234 + cp, repo = get_cp_repo_from_package_id(conn, package_id)
3235 + element = cp.split('/')
3236 + package = element[1]
3237 + log_msg = "C %s:%s" % (cp, repo)
3238 + add_gobs_logs(conn, log_msg, "info", config_profile)
3239 + pkgdir = self._myportdb.getRepositoryPath(repo) + "/" + cp # Get RepoDIR + cp
3240 +
3241 + # Get the cp mainfest file checksum
3242 + try:
3243 + manifest_checksum_tree = portage.checksum.sha256hash(pkgdir + "/Manifest")[0]
3244 + except:
3245 + manifest_checksum_tree = "0"
3246 + log_msg = "QA: Can't checksum the Manifest file. %s:%s" % (cp, repo,)
3247 + add_gobs_logs(conn, log_msg, "info", config_profile)
3248 + log_msg = "C %s:%s ... Fail." % (cp, repo)
3249 + add_gobs_logs(conn, log_msg, "info", config_profile)
3250 + CM.putConnection(conn)
3251 + return
3252 +
3253 # Get the checksum from the db in package table
3254 - manifest_checksum_db = get_manifest_db(conn,package_id)
3255 - # if we have the same checksum return else update the package
3256 - ebuild_list_tree = self._myportdb.cp_list((categories + "/" + package), use_cache=1, mytree=None)
3257 - if manifest_checksum_tree != manifest_checksum_db:
3258 + manifest_checksum_db = get_manifest_db(conn, package_id)
3259 +
3260 + # if we have the same checksum return else update the package
3261 + if manifest_checksum_tree != manifest_checksum_db:
3262 +
3263 # U = Update
3264 - log_msg = "U %s/%s" % (categories, package)
3265 - add_gobs_logs(conn, log_msg, "info", config_profile)
3266 - # Get package_metadataDict and update the db with it
3267 - package_metadataDict = self.get_package_metadataDict(pkgdir, package)
3268 - update_new_package_metadata(conn,package_id, package_metadataDict)
3269 - # Get config_cpv_listDict
3270 - config_list = get_config_list(conn)
3271 - config_cpv_listDict = self.config_match_ebuild(categories, package, config_list)
3272 - config_id = get_default_config(conn)
3273 - packageDict ={}
3274 - for ebuild_line in sorted(ebuild_list_tree):
3275 - old_ebuild_list = []
3276 - # split out ebuild version
3277 - ebuild_version_tree = portage.versions.cpv_getversion(ebuild_line)
3278 - # Get the checksum of the ebuild in tree and db
3279 - ebuild_version_checksum_tree = portage.checksum.sha256hash(pkgdir + "/" + package + "-" + ebuild_version_tree + ".ebuild")[0]
3280 - ebuild_version_manifest_checksum_db = get_ebuild_checksum(conn,package_id, ebuild_version_tree)
3281 - # Check if the checksum have change
3282 - if ebuild_version_manifest_checksum_db is None or ebuild_version_checksum_tree != ebuild_version_manifest_checksum_db:
3283 - # Get packageDict for ebuild
3284 - packageDict[ebuild_line] = self.get_packageDict(pkgdir, ebuild_line, categories, package, config_id)
3285 - if ebuild_version_manifest_checksum_db is None:
3286 - # N = New ebuild
3287 - log_msg = "N %s/%s-%s" % (categories, package, ebuild_version_tree,)
3288 - add_gobs_logs(conn, log_msg, "info", config_profile)
3289 - else:
3290 - # U = Updated ebuild
3291 - log_msg = "U %s/%s-%s" % (categories, package, ebuild_version_tree,)
3292 - add_gobs_logs(conn, log_msg, "info", config_profile)
3293 - # Fix so we can use add_new_package_sql(packageDict) to update the ebuilds
3294 - old_ebuild_list.append(ebuild_version_tree)
3295 - add_old_ebuild(conn,package_id, old_ebuild_list)
3296 - update_active_ebuild(conn,package_id, ebuild_version_tree)
3297 - # Use packageDictand and metadataDict to update the db
3298 - return_id = add_new_package_sql(conn,packageDict)
3299 - ebuild_id_list = return_id[0]
3300 - metadataDict = self.get_metadataDict(packageDict, ebuild_id_list)
3301 - add_new_metadata(conn,metadataDict)
3302 - # Get the text in Manifest and update it
3303 - try:
3304 - get_manifest_text = get_file_text(pkgdir + "/Manifest")
3305 - except:
3306 - get_manifest_text = "0"
3307 - update_manifest_sql(conn,package_id, get_manifest_text, manifest_checksum_tree)
3308 - # Add any qa and repoman erros to buildlog
3309 - qa_error = []
3310 - init_manifest = gobs_manifest(self._mysettings, pkgdir)
3311 - manifest_error = init_manifest.digestcheck()
3312 - if manifest_error is not None:
3313 - qa_error.append(manifest_error)
3314 - log_msg = "QA: %s/%s %s" % (categories, package, qa_error,)
3315 - add_gobs_logs(conn, log_msg, "info", config_profile)
3316 - add_qa_repoman(conn,ebuild_id_list, qa_error, packageDict, config_id)
3317 - # Add the ebuild to the buildqueru table if needed
3318 - self.add_new_ebuild_buildquery_db(ebuild_id_list, packageDict, config_cpv_listDict)
3319 - # Mark or remove any old ebuilds
3320 - init_old_cpv = gobs_old_cpv(self._myportdb, self._mysettings)
3321 - init_old_cpv.mark_old_ebuild_db(categories, package, package_id)
3322 - log_msg = "C %s/%s ... Done." % (categories, package)
3323 - add_gobs_logs(conn, log_msg, "info", config_profile)
3324 + log_msg = "U %s:%s" % (cp, repo)
3325 + add_gobs_logs(conn, log_msg, "info", config_profile)
3326 +
3327 + # Get the ebuild list for cp
3328 + mytree = []
3329 + mytree.append(self._myportdb.getRepositoryPath(repo))
3330 + ebuild_list_tree = self._myportdb.cp_list(cp, use_cache=1, mytree=mytree)
3331 + if ebuild_list_tree == []:
3332 + log_msg = "QA: Can't get the ebuilds list. %s:%s" % (cp, repo,)
3333 + add_gobs_logs(conn, log_msg, "info", config_profile)
3334 + log_msg = "C %s:%s ... Fail." % (cp, repo)
3335 + add_gobs_logs(conn, log_msg, "info", config_profile)
3336 + CM.putConnection(conn)
3337 + return
3338 + packageDict ={}
3339 + for cpv in sorted(ebuild_list_tree):
3340 + old_ebuild_list = []
3341 +
3342 + # split out ebuild version
3343 + ebuild_version_tree = portage.versions.cpv_getversion(cpv)
3344 +
3345 + # Get the checksum of the ebuild in tree and db
3346 + # Make a checksum of the ebuild
3347 + try:
3348 + ebuild_version_checksum_tree = portage.checksum.sha256hash(pkgdir + "/" + package + "-" + ebuild_version_tree + ".ebuild")[0]
3349 + except:
3350 + ebuild_version_checksum_tree = '0'
3351 + manifest_checksum_tree = '0'
3352 + log_msg = "QA: Can't checksum the ebuild file. %s on repo %s" % (cpv, repo,)
3353 + add_gobs_logs(conn, log_msg, "info", config_profile)
3354 + log_msg = "C %s:%s ... Fail." % (cpv, repo)
3355 + add_gobs_logs(conn, log_msg, "info", config_profile)
3356 + ebuild_version_manifest_checksum_db = get_ebuild_checksum(conn, package_id, ebuild_version_tree)
3357 +
3358 +
3359 + # Check if the checksum have change
3360 + if ebuild_version_manifest_checksum_db is None or ebuild_version_checksum_tree != ebuild_version_manifest_checksum_db:
3361 +
3362 + # set config to default config
3363 + default_config = get_default_config(conn)
3364 +
3365 + # Get packageDict for ebuild
3366 + packageDict[cpv] = self.get_packageDict(pkgdir, cpv, repo, default_config)
3367 + if ebuild_version_manifest_checksum_db is None:
3368 + # N = New ebuild
3369 + log_msg = "N %s:%s" % (cpv, repo,)
3370 + add_gobs_logs(conn, log_msg, "info", config_profile)
3371 + else:
3372 + # U = Updated ebuild
3373 + log_msg = "U %s:%s" % (cpv, repo,)
3374 + add_gobs_logs(conn, log_msg, "info", config_profile)
3375 +
3376 + # Fix so we can use add_new_ebuild_sql() to update the ebuilds
3377 + old_ebuild_list.append(ebuild_version_tree)
3378 + add_old_ebuild(conn, package_id, old_ebuild_list)
3379 + update_active_ebuild_to_fales(conn, package_id, ebuild_version_tree)
3380 + # Use packageDictand to update the db
3381 + # Add new ebuilds to the db
3382 + ebuild_id_list = add_new_ebuild_sql(conn, package_id, packageDict)
3383 +
3384 + # update the cp manifest checksum
3385 + update_manifest_sql(conn, package_id, manifest_checksum_tree)
3386 +
3387 + # Get the best cpv for the configs and add it to config_cpv_listDict
3388 + configs_id_list = get_config_id_list(conn)
3389 + config_cpv_listDict = self.config_match_ebuild(cp, configs_id_list)
3390 +
3391 + # Add the ebuild to the buildqueru table if needed
3392 + self.add_new_ebuild_buildquery_db(ebuild_id_list, packageDict, config_cpv_listDict)
3393 +
3394 + log_msg = "C %s:%s ... Done." % (cp, repo)
3395 + add_gobs_logs(conn, log_msg, "info", config_profile)
3396 CM.putConnection(conn)
3397
3398 def update_ebuild_db(self, build_dict):
3399
3400 diff --git a/gobs/pym/pgsql.py b/gobs/pym/pgsql.py
3401 index be557d2..f8de5ff 100644
3402 --- a/gobs/pym/pgsql.py
3403 +++ b/gobs/pym/pgsql.py
3404 @@ -225,7 +225,7 @@ def add_new_metadata(connection, metadataDict):
3405 def add_new_package_sql(connection, packageDict):
3406 #lets have a new cursor for each metod as per best practice
3407 cursor = connection.cursor()
3408 - sqlQ="SELECT insert_ebuild( %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, 'True')"
3409 + sqlQ="SELECT insert_ebuild( %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, 'True')"
3410 ebuild_id_list = []
3411 package_id_list = []
3412 for k, v in packageDict.iteritems():
3413 @@ -235,7 +235,7 @@ def add_new_package_sql(connection, packageDict):
3414 v['ebuild_version_metadata_tree'][5],v['ebuild_version_metadata_tree'][6], v['ebuild_version_metadata_tree'][7],
3415 v['ebuild_version_metadata_tree'][9], v['ebuild_version_metadata_tree'][11],
3416 v['ebuild_version_metadata_tree'][13],v['ebuild_version_metadata_tree'][14], v['ebuild_version_metadata_tree'][15],
3417 - v['ebuild_version_metadata_tree'][16]]
3418 + v['ebuild_version_metadata_tree'][16], v['ebuild_version_metadata_tree'][4]]
3419 cursor.execute(sqlQ, params)
3420 mid = cursor.fetchone()
3421 mid=mid[0]
3422 @@ -248,7 +248,7 @@ def add_new_package_sql(connection, packageDict):
3423 def add_new_ebuild_sql(connection, packageDict, new_ebuild_list):
3424 #lets have a new cursor for each metod as per best practice
3425 cursor = connection.cursor()
3426 - sqlQ="SELECT insert_ebuild( %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, 'True')"
3427 + sqlQ="SELECT insert_ebuild( %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, 'True')"
3428 ebuild_id_list = []
3429 package_id_list = []
3430 for k, v in packageDict.iteritems():
3431 @@ -260,7 +260,7 @@ def add_new_ebuild_sql(connection, packageDict, new_ebuild_list):
3432 v['ebuild_version_metadata_tree'][5],v['ebuild_version_metadata_tree'][6], v['ebuild_version_metadata_tree'][7],
3433 v['ebuild_version_metadata_tree'][9], v['ebuild_version_metadata_tree'][11],
3434 v['ebuild_version_metadata_tree'][13],v['ebuild_version_metadata_tree'][14], v['ebuild_version_metadata_tree'][15],
3435 - v['ebuild_version_metadata_tree'][16]]
3436 + v['ebuild_version_metadata_tree'][16], v['ebuild_version_metadata_tree'][4]]
3437 cursor.execute(sqlQ, params)
3438 mid = cursor.fetchone()
3439 mid=mid[0]
3440
3441 diff --git a/gobs/pym/pgsql_querys.py b/gobs/pym/pgsql_querys.py
3442 new file mode 100644
3443 index 0000000..9184a20
3444 --- /dev/null
3445 +++ b/gobs/pym/pgsql_querys.py
3446 @@ -0,0 +1,308 @@
3447 +#every function takes a connection as a parameter that is provided by the CM
3448 +from __future__ import print_function
3449 +
3450 +# Queryes to add the logs
3451 +def add_gobs_logs(connection, log_msg, log_type, config):
3452 + cursor = connection.cursor()
3453 + sqlQ = 'INSERT INTO logs (config_id, type, msg) VALUES ( (SELECT config_id FROM configs WHERE config = %s), %s, %s )'
3454 + cursor.execute(sqlQ, (config, log_type, log_msg))
3455 + connection.commit()
3456 +
3457 + # Queryes to handel the jobs table
3458 + def get_jobs_id(connection, config_profile):
3459 + cursor = connection.cursor()
3460 + sqlQ = "SELECT job_id FROM jobs WHERE status = 'Waiting' AND config_id = (SELECT config_id FROM configs WHERE config = %s)"
3461 + cursor.execute(sqlQ, (config_profile,))
3462 + entries = cursor.fetchall()
3463 + if entries is None:
3464 + return None
3465 + jobs_id = []
3466 + for job_id in entries:
3467 + jobs_id.append(job_id[0])
3468 + return sorted(jobs_id)
3469 +
3470 + def get_job(connection, job_id):
3471 + cursor = connection.cursor()
3472 + sqlQ ='SELECT job FROM jobs WHERE job_id = %s'
3473 + cursor.execute(sqlQ, (job_id,))
3474 + job = cursor.fetchone()
3475 + return job[0]
3476 +
3477 + def update_job_list(connection, status, job_id):
3478 + cursor = connection.cursor()
3479 + sqlQ = 'UPDATE jobs SET status = %s WHERE job_id = %s'
3480 + cursor.execute(sqlQ, (status, job_id,))
3481 + connection.commit()
3482 +
3483 + # Queryes to handel the configs* tables
3484 + def get_config_list_all(connection):
3485 + cursor = connection.cursor()
3486 + sqlQ = 'SELECT config FROM configs'
3487 + cursor.execute(sqlQ)
3488 + entries = cursor.fetchall()
3489 + return entries
3490 + def update_make_conf(connection, configsDict):
3491 + cursor = connection.cursor()
3492 + sqlQ1 = 'UPDATE configs_metadata SET checksum = %s, make_conf_text = %s, active = %s, config_error = %s WHERE config_id = (SELECT config_id FROM configs WHERE config = %s)'
3493 + for k, v in configsDict.iteritems():
3494 + params = [v['make_conf_checksum_tree'], v['make_conf_text'], v['active'], v['config_error'], k]
3495 + cursor.execute(sqlQ1, params)
3496 + connection.commit()
3497 +
3498 + def get_default_config(connection):
3499 + cursor = connection.cursor()
3500 + sqlQ = "SELECT config FROM configs WHERE default_config = 'True'"
3501 + cursor.execute(sqlQ)
3502 + entries = cursor.fetchone()
3503 + return entries
3504 +
3505 + def update_repo_db(connection, repo_list):
3506 + cursor = connection.cursor()
3507 + sqlQ1 = 'SELECT repo_id FROM repos WHERE repo = %s'
3508 + sqlQ2 = 'INSERT INTO repos (repo) VALUES ( %s )'
3509 + for repo in repo_list:
3510 + cursor.execute(sqlQ1, (repo,))
3511 + entries = cursor.fetchone()
3512 + if entries is None:
3513 + cursor.execute(sqlQ2, (repo,))
3514 + connection.commit()
3515 + return
3516 +def get_package_id(connection, categories, package, repo):
3517 + cursor = connection.cursor()
3518 + sqlQ ='SELECT package_id FROM packages WHERE category = %s AND package = %s AND repo_id = (SELECT repo_id FROM repos WHERE repo = %s)'
3519 + params = categories, package, repo
3520 + cursor.execute(sqlQ, params)
3521 + entries = cursor.fetchone()
3522 + if entries is None:
3523 + return None
3524 + return entries[0]
3525 +
3526 +# Add new info to the packages table
3527 +
3528 +def get_repo_id(connection, repo):
3529 + cursor = connection.cursor()
3530 + sqlQ ='SELECT repo_id FROM repos WHERE repo = %s'
3531 + cursor.execute(sqlQ, (repo,))
3532 + entries = cursor.fetchone()
3533 + if entries is None:
3534 + return None
3535 + return entries[0]
3536 +
3537 +def add_new_manifest_sql(connection, categories, package, repo, manifest_checksum_tree):
3538 + cursor = connection.cursor()
3539 + sqlQ = "INSERT INTO packages (category, package, repo_id, checksum, active) VALUES (%s, %s, %s, %s, 'True') RETURNING package_id"
3540 + repo_id = get_repo_id(connection, repo)
3541 + cursor.execute(sqlQ, (categories, package, repo_id, manifest_checksum_tree,))
3542 + package_id = cursor.fetchone()[0]
3543 + connection.commit()
3544 + return package_id
3545 +
3546 +def get_restriction_id(connection, restriction):
3547 + cursor = connection.cursor()
3548 + sqlQ ='SELECT restriction_id FROM restrictions WHERE restriction = %s'
3549 + cursor.execute(sqlQ, (restriction,))
3550 + entries = cursor.fetchone()
3551 + if entries is None:
3552 + return None
3553 + return entries[0]
3554 +
3555 +def get_use_id(connection, use_flag):
3556 + cursor = connection.cursor()
3557 + sqlQ ='SELECT use_id FROM uses WHERE flag = %s'
3558 + cursor.execute(sqlQ, (use_flag,))
3559 + entries = cursor.fetchone()
3560 + if entries is None:
3561 + return None
3562 + return entries[0]
3563 +
3564 +def get_keyword_id(connection, keyword):
3565 + cursor = connection.cursor()
3566 + sqlQ ='SELECT keyword_id FROM keywords WHERE keyword = %s'
3567 + cursor.execute(sqlQ, (keyword,))
3568 + entries = cursor.fetchone()
3569 + if entries is None:
3570 + return None
3571 + return entries[0]
3572 +
3573 +def add_new_ebuild_metadata_sql(connection, ebuild_id, keywords, restrictions, iuse_list):
3574 + cursor = connection.cursor()
3575 + sqlQ1 = 'INSERT INTO keywords (keyword) VALUES ( %s ) RETURNING keyword_id'
3576 + sqlQ3 = 'INSERT INTO restrictions (restriction) VALUES ( %s ) RETURNING restriction_id'
3577 + sqlQ4 = 'INSERT INTO ebuilds_restrictions (ebuild_id, restriction_id) VALUES ( %s, %s )'
3578 + sqlQ5 = 'INSERT INTO uses (flag) VALUES ( %s ) RETURNING use_id'
3579 + sqlQ6 = 'INSERT INTO ebuilds_iuse (ebuild_id, use_id, status) VALUES ( %s, %s, %s)'
3580 + sqlQ7 = 'INSERT INTO ebuilds_keywords (ebuild_id, keyword_id, status) VALUES ( %s, %s, %s)'
3581 + # FIXME restriction need some filter as iuse and keyword have.
3582 + for restriction in restrictions:
3583 + restriction_id = get_restriction_id(connection, restriction)
3584 + if restriction_id is None:
3585 + cursor.execute(sqlQ3, (restriction,))
3586 + restriction_id = cursor.fetchone()[0]
3587 + cursor.execute(sqlQ4, (ebuild_id, restriction_id,))
3588 + for iuse in iuse_list:
3589 + set_iuse = 'disable'
3590 + if iuse[0] in ["+"]:
3591 + iuse = iuse[1:]
3592 + set_iuse = 'enable'
3593 + elif iuse[0] in ["-"]:
3594 + iuse = iuse[1:]
3595 + use_id = get_use_id(connection, iuse)
3596 + if use_id is None:
3597 + cursor.execute(sqlQ5, (iuse,))
3598 + use_id = cursor.fetchone()[0]
3599 + for keyword in keywords:
3600 + set_keyword = 'stable'
3601 + if keyword[0] in ["~"]:
3602 + keyword = keyword[1:]
3603 + set_keyword = 'unstable'
3604 + elif keyword[0] in ["-"]:
3605 + keyword = keyword[1:]
3606 + set_keyword = 'testing'
3607 + keyword_id = get_keyword_id(connection, keyword)
3608 + if keyword_id is None:
3609 + cursor.execute(sqlQ1, (keyword,))
3610 + keyword_id = cursor.fetchone()[0]
3611 + cursor.execute(sqlQ7, (ebuild_id, keyword_id, set_keyword,))
3612 + connection.commit()cursor.execute(sqlQ6, (ebuild_id, use_id, set_iuse,))
3613 +
3614 +def add_new_ebuild_sql(connection, package_id, ebuildDict):
3615 + cursor = connection.cursor()
3616 + sqlQ1 = 'SELECT repo_id FROM packages WHERE package_id = %s'
3617 + sqlQ2 = "INSERT INTO ebuilds (package_id, version, checksum, active) VALUES (%s, %s, %s, 'True') RETURNING ebuild_id"
3618 + sqlQ4 = "INSERT INTO ebuilds_metadata (ebuild_id, revision) VALUES (%s, %s)"
3619 + ebuild_id_list = []
3620 + cursor.execute(sqlQ1, (package_id,))
3621 + repo_id = cursor.fetchone()[0]
3622 + for k, v in ebuildDict.iteritems():
3623 + cursor.execute(sqlQ2, (package_id, v['ebuild_version_tree'], v['ebuild_version_checksum_tree'],))
3624 + ebuild_id = cursor.fetchone()[0]
3625 + cursor.execute(sqlQ4, (ebuild_id, v['ebuild_version_revision_tree'],))
3626 + ebuild_id_list.append(ebuild_id)
3627 + restrictions = []
3628 + keywords = []
3629 + iuse = []
3630 + for i in v['ebuild_version_metadata_tree'][4].split():
3631 + restrictions.append(i)
3632 + for i in v['ebuild_version_metadata_tree'][8].split():
3633 + keywords.append(i)
3634 + for i in v['ebuild_version_metadata_tree'][10].split():
3635 + iuse.append(i)
3636 + add_new_ebuild_metadata_sql(connection, ebuild_id, keywords, restrictions, iuse)
3637 + connection.commit()
3638 + return ebuild_id_list
3639 +
3640 +def get_config_id_list(connection):
3641 + cursor = connection.cursor()
3642 + sqlQ = "SELECT configs.config_id FROM configs, configs_metadata WHERE configs.default_config = 'False' AND configs_metadata.active = 'True' AND configs.config_id = configs_metadata.config_id"
3643 + cursor.execute(sqlQ)
3644 + entries = cursor.fetchall()
3645 + if entries == ():
3646 + return None
3647 + else:
3648 + config_id_list = []
3649 + for config_id in entries:
3650 + config_id_list.append(config_id[0])
3651 + return config_id_list
3652 +
3653 +def get_config_db(connection, config_id):
3654 + cursor = connection.cursor()
3655 + sqlQ = 'SELECT config FROM configs WHERE config_id = %s'
3656 + cursor.execute(sqlQ,(config_id,))
3657 + entries = cursor.fetchone()
3658 + if entries is None:
3659 + return None
3660 + return entries[0]
3661 +
3662 +def add_new_package_buildqueue(connection, ebuild_id, config_id, use_flagsDict, messages):
3663 + cursor = connection.cursor()
3664 + sqlQ1 = 'INSERT INTO build_jobs (ebuild_id, config_id) VALUES (%s, %s) RETURNING build_job_id'
3665 + sqlQ3 = 'INSERT INTO build_jobs_use (build_job_id, use_id, status) VALUES (%s, (SELECT use_id FROM uses WHERE flag = %s), %s)'
3666 + cursor.execute(sqlQ1, (ebuild_id, config_id,))
3667 + build_job_id = cursor.fetchone()[0]
3668 + for k, v in use_flagsDict.iteritems():
3669 + cursor.execute(sqlQ3, (build_job_id, k, v,))
3670 + connection.commit()
3671 +
3672 +def get_manifest_db(connection, package_id):
3673 + cursor = connection.cursor()
3674 + sqlQ = 'SELECT checksum FROM packages WHERE package_id = %s'
3675 + cursor.execute(sqlQ, (package_id,))
3676 + entries = cursor.fetchone()
3677 + if entries is None:
3678 + return None
3679 + # If entries is not None we need [0]
3680 + return entries[0]
3681 +
3682 +def get_cp_from_package_id(connection, package_id):
3683 + cursor = connection.cursor()
3684 + sqlQ = "SELECT ARRAY_TO_STRING(ARRAY[category, package] , '/') AS cp FROM packages WHERE package_id = %s"
3685 + cursor.execute(sqlQ, (package_id,))
3686 + return cursor.fetchone()
3687 +
3688 +def get_cp_repo_from_package_id(connection, package_id):
3689 + cursor =connection.cursor()
3690 + sqlQ = 'SELECT repos.repo FROM repos, packages WHERE repos.repo_id = packages.repo_id AND packages.package_id = %s'
3691 + cp = get_cp_from_package_id(connection, package_id)
3692 + cursor.execute(sqlQ, (package_id,))
3693 + repo = cursor.fetchone()
3694 + return cp[0], repo[0]
3695 +
3696 +def get_ebuild_checksum(connection, package_id, ebuild_version_tree):
3697 + cursor = connection.cursor()
3698 + sqlQ = "SELECT checksum FROM ebuilds WHERE package_id = %s AND version = %s AND active = 'True'"
3699 + cursor.execute(sqlQ, (package_id, ebuild_version_tree))
3700 + entries = cursor.fetchone()
3701 + if entries is None:
3702 + return None
3703 + # If entries is not None we need [0]
3704 + return entries[0]
3705 +
3706 +def add_old_ebuild(connection, package_id, old_ebuild_list):
3707 + cursor = connection.cursor()
3708 + sqlQ1 = "UPDATE ebuilds SET active = 'False' WHERE package_id = %s AND version = %s"
3709 + sqlQ2 = "SELECT ebuild_id FROM ebuilds WHERE package_id = %s AND version = %s AND active = 'True'"
3710 + sqlQ3 = "SELECT build_job_id FROM build_jobs WHERE ebuild_id = %s"
3711 + sqlQ4 = 'DELETE FROM build_jobs_use WHERE build_job_id = %s'
3712 + sqlQ5 = 'DELETE FROM build_jobs WHERE build_job_id = %s'
3713 + for old_ebuild in old_ebuild_list:
3714 + cursor.execute(sqlQ2, (package_id, old_ebuild[0]))
3715 + ebuild_id_list = cursor.fetchall()
3716 + if ebuild_id_list is not None:
3717 + for ebuild_id in ebuild_id_list:
3718 + cursor.execute(sqlQ3, (ebuild_id))
3719 + build_job_id_list = cursor.fetchall()
3720 + if build_job_id_list is not None:
3721 + for build_job_id in build_job_id_list:
3722 + cursor.execute(sqlQ4, (build_job_id))
3723 + cursor.execute(sqlQ5, (build_job_id))
3724 + cursor.execute(sqlQ1, (package_id, old_ebuild[0]))
3725 + connection.commit()
3726 +
3727 +def update_active_ebuild_to_fales(connection, package_id, ebuild_version_tree):
3728 + cursor = connection.cursor()
3729 + sqlQ ="UPDATE ebuilds SET active = 'False' WHERE package_id = %s AND version = %s AND active = 'True'"
3730 + cursor.execute(sqlQ, (package_id, ebuild_version_tree))
3731 + connection.commit()
3732 +
3733 +def update_manifest_sql(connection, package_id, manifest_checksum_tree):
3734 + cursor = connection.cursor()
3735 + sqlQ = 'UPDATE packages SET checksum = %s WHERE package_id = %s'
3736 + cursor.execute(sqlQ, (manifest_checksum_tree, package_id,))
3737 + connection.commit()
3738 +
3739 +def get_build_jobs_id_list_config(connection, config_id):
3740 + cursor = connection.cursor()
3741 + sqlQ = 'SELECT build_job_id FROM build_jobs WHERE config_id = %s'
3742 + cursor.execute(sqlQ, (config_id,))
3743 + entries = cursor.fetchall()
3744 + return entries
3745 +
3746 +def del_old_build_jobs(connection, queue_id):
3747 + cursor = connection.cursor()
3748 + sqlQ1 = 'DELETE FROM build_jobs_use WHERE build_job_id = %s'
3749 + sqlQ2 = 'DELETE FROM build_jobs_retest WHERE build_job_id = %s'
3750 + sqlQ3 = 'DELETE FROM build_jobs WHERE build_job_id = %s'
3751 + cursor.execute(sqlQ1, (build_job_id,))
3752 + cursor.execute(sqlQ2, (build_job_id,))
3753 + cursor.execute(sqlQ3, (build_job_id,))
3754 + connection.commit()
3755
3756 diff --git a/gobs/pym/repoman_gobs.py b/gobs/pym/repoman_gobs.py
3757 index ef10f9c..adb4466 100644
3758 --- a/gobs/pym/repoman_gobs.py
3759 +++ b/gobs/pym/repoman_gobs.py
3760 @@ -15,11 +15,13 @@ class gobs_repoman(object):
3761 self._mysettings = mysettings
3762 self._myportdb = myportdb
3763
3764 - def check_repoman(self, categories, package, ebuild_version_tree, config_id):
3765 + def check_repoman(self, pkgdir, cpv, repo, config_id):
3766 # We run repoman run_checks on the ebuild
3767 - pkgdir = self._mysettings['PORTDIR'] + "/" + categories + "/" + package
3768 + ebuild_version_tree = portage.versions.cpv_getversion(cpv)
3769 + element = portage.versions.cpv_getkey(cpv).split('/')
3770 + categories = element[0]
3771 + package = element[1]
3772 full_path = pkgdir + "/" + package + "-" + ebuild_version_tree + ".ebuild"
3773 - cpv = categories + "/" + package + "-" + ebuild_version_tree
3774 root = '/'
3775 trees = {
3776 root : {'porttree' : portage.portagetree(root, settings=self._mysettings)}
3777 @@ -28,7 +30,7 @@ class gobs_repoman(object):
3778 allvars = set(x for x in portage.auxdbkeys if not x.startswith("UNUSED_"))
3779 allvars.update(Package.metadata_keys)
3780 allvars = sorted(allvars)
3781 - myaux = dict(zip(allvars, self._myportdb.aux_get(cpv, allvars)))
3782 + myaux = dict(zip(allvars, self._myportdb.aux_get(cpv, allvars, myrepo=repo)))
3783 pkg = Package(cpv=cpv, metadata=myaux, root_config=root_config, type_name='ebuild')
3784 fails = []
3785 try:
3786
3787 diff --git a/gobs/pym/text.py b/gobs/pym/text.py
3788 index 2f1f689..3f7b040 100644
3789 --- a/gobs/pym/text.py
3790 +++ b/gobs/pym/text.py
3791 @@ -16,7 +16,7 @@ def get_file_text(filename):
3792 textfile.close()
3793 return text
3794
3795 -def get_ebuild_text(filename):
3796 +def get_ebuild_cvs_revision(filename):
3797 """Return the ebuild contents"""
3798 try:
3799 ebuildfile = open(filename)
3800 @@ -33,7 +33,7 @@ def get_ebuild_text(filename):
3801 cvs_revision = field[3]
3802 except:
3803 cvs_revision = ''
3804 - return text, cvs_revision
3805 + return cvs_revision
3806
3807 def get_log_text_list(filename):
3808 """Return the log contents as a list"""
3809
3810 diff --git a/gobs/pym/updatedb.py b/gobs/pym/updatedb.py
3811 index e643fc8..fb185d2 100755
3812 --- a/gobs/pym/updatedb.py
3813 +++ b/gobs/pym/updatedb.py
3814 @@ -18,35 +18,25 @@ from gobs.ConnectionManager import connectionManager
3815 CM=connectionManager(gobs_settings_dict)
3816 #selectively import the pgsql/mysql querys
3817 if CM.getName()=='pgsql':
3818 - from gobs.pgsql import *
3819 + from gobs.pgsql_querys import *
3820 +if CM.getName()=='mysql':
3821 + from gobs.mysql_querys import *
3822
3823 from gobs.check_setup import check_make_conf
3824 -from gobs.arch import gobs_arch
3825 from gobs.package import gobs_package
3826 -from gobs.categories import gobs_categories
3827 -from gobs.old_cpv import gobs_old_cpv
3828 -from gobs.categories import gobs_categories
3829 -from gobs.sync import git_pull, sync_tree
3830 import portage
3831
3832 def init_portage_settings():
3833 -
3834 - """ Get the BASE Setup/Config for portage.settings
3835 - @type: module
3836 - @module: The SQL Backend
3837 - @type: dict
3838 - @parms: config options from the config file (host_setup_root)
3839 - @rtype: settings
3840 - @returns new settings
3841 - """
3842 # check config setup
3843 conn=CM.getConnection()
3844 check_make_conf()
3845 log_msg = "Check configs done"
3846 add_gobs_logs(conn, log_msg, "info", config_profile)
3847 +
3848 # Get default config from the configs table and default_config=1
3849 config_id = get_default_config(conn) # HostConfigDir = table configs id
3850 default_config_root = "/var/cache/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/" + config_id[0] + "/"
3851 +
3852 # Set config_root (PORTAGE_CONFIGROOT) to default_config_root
3853 mysettings = portage.config(config_root = default_config_root)
3854 log_msg = "Setting default config to: %s" % (config_id[0],)
3855 @@ -54,59 +44,67 @@ def init_portage_settings():
3856 CM.putConnection(conn)
3857 return mysettings
3858
3859 -def update_cpv_db_pool(mysettings, myportdb, init_package, package_line):
3860 +def update_cpv_db_pool(mysettings, myportdb, init_package, package_line, repo):
3861 conn=CM.getConnection()
3862 # split the cp to categories and package
3863 element = package_line.split('/')
3864 categories = element[0]
3865 package = element[1]
3866 +
3867 # Check if we don't have the cp in the package table
3868 - package_id = have_package_db(conn,categories, package)
3869 + package_id = get_package_id(conn, categories, package, repo)
3870 CM.putConnection(conn)
3871 if package_id is None:
3872 +
3873 # Add new package with ebuilds
3874 init_package.add_new_package_db(categories, package)
3875 +
3876 # Ceck if we have the cp in the package table
3877 elif package_id is not None:
3878 +
3879 # Update the packages with ebuilds
3880 - init_package.update_package_db(categories, package, package_id)
3881 - # Update the metadata for categories
3882 - init_categories = gobs_categories(mysettings)
3883 - init_categories.update_categories_db(categories)
3884 + init_package.update_package_db(package_id)
3885 + return
3886
3887 def update_cpv_db():
3888 - """Code to update the cpv in the database.
3889 - @type:settings
3890 - @parms: portage.settings
3891 - @type: module
3892 - @module: The SQL Backend
3893 - @type: dict
3894 - @parms: config options from the config file
3895 - """
3896 conn=CM.getConnection()
3897 mysettings = init_portage_settings()
3898 log_msg = "Checking categories, package, ebuilds"
3899 add_gobs_logs(conn, log_msg, "info", config_profile)
3900 - CM.putConnection(conn)
3901 +
3902 # Setup portdb, package
3903 myportdb = portage.portdbapi(mysettings=mysettings)
3904 init_package = gobs_package(mysettings, myportdb)
3905 - package_id_list_tree = []
3906 - # Will run some update checks and update package if needed
3907 - # Get categories/package list from portage
3908 - package_list_tree = myportdb.cp_all()
3909 - # Use all exept 2 cores when multiprocessing
3910 - pool_cores= multiprocessing.cpu_count()
3911 - if pool_cores >= 3:
3912 - use_pool_cores = pool_cores - 2
3913 - else:
3914 - use_pool_cores = 1
3915 - pool = multiprocessing.Pool(processes=use_pool_cores)
3916 - # Run the update package for all package in the list in
3917 - # a multiprocessing pool
3918 - for package_line in sorted(package_list_tree):
3919 - #update_cpv_db_pool(mysettings, package_line)
3920 - pool.apply_async(update_cpv_db_pool, (mysettings, myportdb, init_package, package_line,))
3921 + repo_list = ()
3922 + repos_trees_list = []
3923 +
3924 + # Use all cores when multiprocessing
3925 + pool_cores= multiprocessing.cpu_count()
3926 + pool = multiprocessing.Pool(processes=pool_cores)
3927 +
3928 + # Will run some update checks and update package if needed
3929 + # Get categories/package list from portage and repos
3930 +
3931 + # Get the repos and update the repos db
3932 + repo_list = myportdb.getRepositories()
3933 + update_repo_db(conn, repo_list)
3934 + CM.putConnection(conn)
3935 +
3936 + # Get the rootdirs for the repos
3937 + repo_trees_list = myportdb.porttrees
3938 + for repo_dir in repo_trees_list:
3939 + repo = myportdb.getRepositoryName(repo_dir)
3940 + repo_dir_list = []
3941 + repo_dir_list.append(repo_dir)
3942 +
3943 + # Get the package list from the repo
3944 + package_id_list_tree = []
3945 + package_list_tree = myportdb.cp_all(trees=repo_dir_list)
3946 +
3947 + # Run the update package for all package in the list and in a multiprocessing pool
3948 + for package_line in sorted(package_list_tree):
3949 + pool.apply_async(update_cpv_db_pool, (mysettings, myportdb, init_package, package_line, repo,))
3950 + # update_cpv_db_pool(mysettings, myportdb, init_package, package_line, repo)
3951 pool.close()
3952 pool.join()
3953 conn=CM.getConnection()
3954 @@ -116,30 +114,15 @@ def update_cpv_db():
3955
3956 def update_db_main():
3957 # Main
3958 - conn=CM.getConnection()
3959 - # Logging
3960 - log_msg = "Update db started."
3961 - add_gobs_logs(conn, log_msg, "info", config_profile)
3962 - # Sync portage and profile/settings
3963 - resutalt = git_pull()
3964 - if resutalt is False:
3965 - log_msg = "Update db ... Fail."
3966 - add_gobs_logs(conn, log_msg, "info", config_profile)
3967 - CM.putConnection(conn)
3968 - return False
3969 - resutalt = sync_tree()
3970 - if resutalt is False:
3971 - log_msg = "Update db ... Fail."
3972 - add_gobs_logs(conn, log_msg, "info", config_profile)
3973 - CM.putConnection(conn)
3974 - return False
3975 - # Init settings for the default config
3976 - mysettings = init_portage_settings()
3977 - init_arch = gobs_arch()
3978 - init_arch.update_arch_db()
3979 - # Update the cpv db
3980 - update_cpv_db()
3981 - log_msg = "Update db ... Done."
3982 - add_gobs_logs(conn, log_msg, "info", config_profile)
3983 - CM.putConnection(conn)
3984 - return True
3985 \ No newline at end of file
3986 + conn=CM.getConnection()
3987 +
3988 + # Logging
3989 + log_msg = "Update db started."
3990 + add_gobs_logs(conn, log_msg, "info", config_profile)
3991 +
3992 + # Update the cpv db
3993 + update_cpv_db()
3994 + log_msg = "Update db ... Done."
3995 + add_gobs_logs(conn, log_msg, "info", config_profile)
3996 + CM.putConnection(conn)
3997 + return True