1 |
Author: grobian |
2 |
Date: 2008-05-08 19:18:20 +0000 (Thu, 08 May 2008) |
3 |
New Revision: 10244 |
4 |
|
5 |
Modified: |
6 |
main/branches/prefix/bin/dodoc |
7 |
main/branches/prefix/bin/misc-functions.sh |
8 |
main/branches/prefix/doc/dependency_resolution/task_scheduling.docbook |
9 |
main/branches/prefix/man/color.map.5 |
10 |
main/branches/prefix/pym/_emerge/__init__.py |
11 |
main/branches/prefix/pym/portage/__init__.py |
12 |
main/branches/prefix/pym/portage/dbapi/vartree.py |
13 |
main/branches/prefix/pym/portage/output.py |
14 |
Log: |
15 |
Merged from trunk 10208:10241 |
16 |
|
17 |
| 10210 | Cache results for Task.__hash__() calls. | |
18 |
| zmedico | | |
19 |
|
20 |
| 10212 | Use find -path -or -name to match basenames of files in | |
21 |
| zmedico | INSTALL_MASK as suggested by solar in bug #219286, comment | |
22 |
| | #8. | |
23 |
|
24 |
| 10214 | Display satisfied blockers in green and show a small "b" | |
25 |
| zmedico | instead of a big "B" (similar to "f" for satisfied fetch | |
26 |
| | restrictions). | |
27 |
|
28 |
| 10216 | For consistency with the merge list display, show "block" | |
29 |
| zmedico | instead of "blocker" in the summary. | |
30 |
|
31 |
| 10218 | Make satisfied blockers "darkblue" by default. | |
32 |
| zmedico | | |
33 |
|
34 |
| 10220 | In install_mask(), discard stderr messages from the 'find' | |
35 |
| zmedico | command since some tokens from INSTALL_MASK can trigger lots | |
36 |
| | of warnings and errors that are irrelevant for our purposes. | |
37 |
|
38 |
| 10222 | Fix spelling of "SATISFIED". Thanks to Arfrever. | |
39 |
| zmedico | | |
40 |
|
41 |
| 10224 | Tolerate InvalidDependString exceptions when checking | |
42 |
| zmedico | visibility of installed packages. | |
43 |
|
44 |
| 10225 | Instead of doing automatic uninstalls in advance, install | |
45 |
| zmedico | conflicting packages first and then do the uninstall | |
46 |
| | afterwards. This requires special handling for file | |
47 |
| | collisions occur, but it's preferred because it ensures that | |
48 |
| | package files remain installed in a usable state whenever | |
49 |
| | possible. When file collisions occur between conflicting | |
50 |
| | packages, the contents entries for those files are removed | |
51 |
| | from the packages that are scheduled for uninstallation. | |
52 |
| | This prevents uninstallation operations from removing | |
53 |
| | overlapping files that have been claimed by conflicting | |
54 |
| | packages. | |
55 |
|
56 |
| 10229 | Fix findInstalledBlockers() to check for blockers in both | |
57 |
| zmedico | directions. | |
58 |
|
59 |
| 10231 | Don't use try/finally to close atomic_ofstream since we | |
60 |
| zmedico | don't want to call close() on this stream if an error | |
61 |
| | occurs. | |
62 |
|
63 |
| 10233 | Bug #220689 - Fix package selection logic so that it doesn't | |
64 |
| zmedico | trigger the code path from bug 219369 in some unwanted | |
65 |
| | cases. | |
66 |
|
67 |
| 10235 | Bug #220775 - Source isolated-functions.sh before trying to | |
68 |
| zmedico | call vecho. | |
69 |
|
70 |
| 10237 | Bug #220341 - USE=multislot can make an installed package | |
71 |
| zmedico | appear as if it doesn't satisfy a slot dependency. | |
72 |
| | Rebuilding the ebuild won't do any good as long as | |
73 |
| | USE=multislot is enabled since the newly built package still | |
74 |
| | won't have the expected slot. Therefore, assume that such | |
75 |
| | SLOT dependencies are already satisfied rather than forcing | |
76 |
| | a rebuild. | |
77 |
|
78 |
| 10239 | Don't save "uninstall" tasks in the resume list since | |
79 |
| zmedico | they'll be regenerated by dependency calculations upon | |
80 |
| | resume. | |
81 |
|
82 |
| 10241 | Remove unnecessary BlockerDB and BlockerCache | |
83 |
| zmedico | _installed_pkgs attributes. | |
84 |
|
85 |
|
86 |
Modified: main/branches/prefix/bin/dodoc |
87 |
=================================================================== |
88 |
--- main/branches/prefix/bin/dodoc 2008-05-08 19:14:22 UTC (rev 10243) |
89 |
+++ main/branches/prefix/bin/dodoc 2008-05-08 19:18:20 UTC (rev 10244) |
90 |
@@ -4,6 +4,7 @@ |
91 |
# $Id$ |
92 |
|
93 |
if [ $# -lt 1 ] ; then |
94 |
+ source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh |
95 |
vecho "${0##*/}: at least one argument needed" 1>&2 |
96 |
exit 1 |
97 |
fi |
98 |
|
99 |
Modified: main/branches/prefix/bin/misc-functions.sh |
100 |
=================================================================== |
101 |
--- main/branches/prefix/bin/misc-functions.sh 2008-05-08 19:14:22 UTC (rev 10243) |
102 |
+++ main/branches/prefix/bin/misc-functions.sh 2008-05-08 19:18:20 UTC (rev 10244) |
103 |
@@ -518,7 +518,8 @@ |
104 |
rm -Rf "${root}"/${no_inst} >&/dev/null |
105 |
|
106 |
# we also need to handle globs (*.a, *.h, etc) |
107 |
- find "${root}" -path "${no_inst}" -exec rm -fR {} \; >/dev/null |
108 |
+ find "${root}" \( -path "${no_inst}" -or -name "${no_inst}" \) \ |
109 |
+ -exec rm -fR {} \; >/dev/null 2>&1 |
110 |
done |
111 |
# set everything back the way we found it |
112 |
set +o noglob |
113 |
|
114 |
Modified: main/branches/prefix/doc/dependency_resolution/task_scheduling.docbook |
115 |
=================================================================== |
116 |
--- main/branches/prefix/doc/dependency_resolution/task_scheduling.docbook 2008-05-08 19:14:22 UTC (rev 10243) |
117 |
+++ main/branches/prefix/doc/dependency_resolution/task_scheduling.docbook 2008-05-08 19:18:20 UTC (rev 10244) |
118 |
@@ -21,7 +21,7 @@ |
119 |
</para> |
120 |
<para> |
121 |
In order to avoid a conflict, a package may need to be uninstalled |
122 |
- in advance, rather than through replacement. The following constraints |
123 |
+ rather than replaced. The following constraints |
124 |
protect inappropriate packages from being chosen for automatic |
125 |
uninstallation: |
126 |
<itemizedlist> |
127 |
@@ -46,6 +46,16 @@ |
128 |
</listitem> |
129 |
</itemizedlist> |
130 |
</para> |
131 |
+ <para> |
132 |
+ In order to ensure that package files remain installed in a usable state |
133 |
+ whenever possible, uninstallation operations are not executed |
134 |
+ until after all associated conflicting packages have been installed. |
135 |
+ When file collisions occur between conflicting packages, the contents |
136 |
+ entries for those files are removed from the packages |
137 |
+ that are scheduled for uninstallation. This prevents |
138 |
+ uninstallation operations from removing overlapping files that |
139 |
+ have been claimed by conflicting packages. |
140 |
+ </para> |
141 |
</sect1> |
142 |
<sect1 id='dependency-resolution-task-scheduling-circular-dependencies'> |
143 |
<title>Circular Dependencies</title> |
144 |
|
145 |
Modified: main/branches/prefix/man/color.map.5 |
146 |
=================================================================== |
147 |
--- main/branches/prefix/man/color.map.5 2008-05-08 19:14:22 UTC (rev 10243) |
148 |
+++ main/branches/prefix/man/color.map.5 2008-05-08 19:18:20 UTC (rev 10244) |
149 |
@@ -30,6 +30,12 @@ |
150 |
\fBMERGE_LIST_PROGRESS\fR = \fI"yellow"\fR |
151 |
Defines color used for numbers indicating merge progress. |
152 |
.TP |
153 |
+\fBPKG_BLOCKER\fR = \fI"red"\fR |
154 |
+Defines color used for unsatisfied blockers. |
155 |
+.TP |
156 |
+\fBPKG_BLOCKER_SATISFIED\fR = \fI"darkblue"\fR |
157 |
+Defines color used for satisfied blockers. |
158 |
+.TP |
159 |
\fBPKG_MERGE\fR = \fI"darkgreen"\fR |
160 |
Defines color used for packages planned to be merged. |
161 |
.TP |
162 |
|
163 |
Modified: main/branches/prefix/pym/_emerge/__init__.py |
164 |
=================================================================== |
165 |
--- main/branches/prefix/pym/_emerge/__init__.py 2008-05-08 19:14:22 UTC (rev 10243) |
166 |
+++ main/branches/prefix/pym/_emerge/__init__.py 2008-05-08 19:18:20 UTC (rev 10244) |
167 |
@@ -738,7 +738,6 @@ |
168 |
result = "" |
169 |
return result |
170 |
|
171 |
- |
172 |
class RootConfig(object): |
173 |
"""This is used internally by depgraph to track information about a |
174 |
particular $ROOT.""" |
175 |
@@ -1241,7 +1240,7 @@ |
176 |
return have_eapi_mask |
177 |
|
178 |
class Task(SlotObject): |
179 |
- __slots__ = ("_hash_key",) |
180 |
+ __slots__ = ("_hash_key", "_hash_value") |
181 |
|
182 |
def _get_hash_key(self): |
183 |
hash_key = getattr(self, "_hash_key", None) |
184 |
@@ -1256,7 +1255,10 @@ |
185 |
return self._get_hash_key() != other |
186 |
|
187 |
def __hash__(self): |
188 |
- return hash(self._get_hash_key()) |
189 |
+ hash_value = getattr(self, "_hash_value", None) |
190 |
+ if hash_value is None: |
191 |
+ self._hash_value = hash(self._get_hash_key()) |
192 |
+ return self._hash_value |
193 |
|
194 |
def __len__(self): |
195 |
return len(self._get_hash_key()) |
196 |
@@ -1291,13 +1293,22 @@ |
197 |
__slots__ = ("built", "cpv", "depth", |
198 |
"installed", "metadata", "onlydeps", "operation", |
199 |
"root", "type_name", |
200 |
- "cp", "cpv_slot", "pv_split", "slot_atom") |
201 |
+ "category", "cp", "cpv_slot", "pf", "pv_split", "slot_atom") |
202 |
+ |
203 |
+ metadata_keys = [ |
204 |
+ "CHOST", "COUNTER", "DEPEND", "EAPI", "IUSE", "KEYWORDS", |
205 |
+ "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND", |
206 |
+ "repository", "RESTRICT", "SLOT", "USE"] |
207 |
+ |
208 |
def __init__(self, **kwargs): |
209 |
Task.__init__(self, **kwargs) |
210 |
self.cp = portage.cpv_getkey(self.cpv) |
211 |
self.slot_atom = "%s:%s" % (self.cp, self.metadata["SLOT"]) |
212 |
self.cpv_slot = "%s:%s" % (self.cpv, self.metadata["SLOT"]) |
213 |
- self.pv_split = portage.catpkgsplit(self.cpv)[1:] |
214 |
+ cpv_parts = portage.catpkgsplit(self.cpv) |
215 |
+ self.category = cpv_parts[0] |
216 |
+ self.pv_split = cpv_parts[1:] |
217 |
+ self.pf = self.cpv.replace(self.category + "/", "", 1) |
218 |
|
219 |
def _get_hash_key(self): |
220 |
hash_key = getattr(self, "_hash_key", None) |
221 |
@@ -1384,13 +1395,15 @@ |
222 |
2) the old-style virtuals have changed |
223 |
""" |
224 |
class BlockerData(object): |
225 |
+ |
226 |
+ __slots__ = ("__weakref__", "atoms", "counter") |
227 |
+ |
228 |
def __init__(self, counter, atoms): |
229 |
self.counter = counter |
230 |
self.atoms = atoms |
231 |
|
232 |
def __init__(self, myroot, vardb): |
233 |
self._vardb = vardb |
234 |
- self._installed_pkgs = set(vardb.cpv_all()) |
235 |
self._virtuals = vardb.settings.getvirtuals() |
236 |
self._cache_filename = os.path.join(myroot, |
237 |
portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle") |
238 |
@@ -1525,6 +1538,107 @@ |
239 |
an AttributeError.""" |
240 |
return list(self) |
241 |
|
242 |
+class BlockerDB(object): |
243 |
+ |
244 |
+ def __init__(self, vartree, portdb): |
245 |
+ self._vartree = vartree |
246 |
+ self._portdb = portdb |
247 |
+ self._blocker_cache = \ |
248 |
+ BlockerCache(self._vartree.root, vartree.dbapi) |
249 |
+ self._dep_check_trees = { self._vartree.root : { |
250 |
+ "porttree" : self._vartree, |
251 |
+ "vartree" : self._vartree, |
252 |
+ }} |
253 |
+ |
254 |
+ def findInstalledBlockers(self, new_pkg): |
255 |
+ blocker_cache = self._blocker_cache |
256 |
+ dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"] |
257 |
+ dep_check_trees = self._dep_check_trees |
258 |
+ settings = self._vartree.settings |
259 |
+ stale_cache = set(blocker_cache) |
260 |
+ fake_vartree = \ |
261 |
+ FakeVartree(self._vartree, |
262 |
+ self._portdb, Package.metadata_keys, {}) |
263 |
+ vardb = fake_vartree.dbapi |
264 |
+ installed_pkgs = list(vardb) |
265 |
+ |
266 |
+ for inst_pkg in installed_pkgs: |
267 |
+ stale_cache.discard(inst_pkg.cpv) |
268 |
+ cached_blockers = blocker_cache.get(inst_pkg.cpv) |
269 |
+ if cached_blockers is not None and \ |
270 |
+ cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]): |
271 |
+ cached_blockers = None |
272 |
+ if cached_blockers is not None: |
273 |
+ blocker_atoms = cached_blockers.atoms |
274 |
+ else: |
275 |
+ myuse = inst_pkg.metadata["USE"].split() |
276 |
+ # Use aux_get() to trigger FakeVartree global |
277 |
+ # updates on *DEPEND when appropriate. |
278 |
+ depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys)) |
279 |
+ try: |
280 |
+ portage.dep._dep_check_strict = False |
281 |
+ success, atoms = portage.dep_check(depstr, |
282 |
+ vardb, settings, myuse=myuse, |
283 |
+ trees=dep_check_trees, myroot=inst_pkg.root) |
284 |
+ finally: |
285 |
+ portage.dep._dep_check_strict = True |
286 |
+ if not success: |
287 |
+ pkg_location = os.path.join(inst_pkg.root, |
288 |
+ portage.VDB_PATH, inst_pkg.category, inst_pkg.pf) |
289 |
+ portage.writemsg("!!! %s/*DEPEND: %s\n" % \ |
290 |
+ (pkg_location, atoms), noiselevel=-1) |
291 |
+ continue |
292 |
+ |
293 |
+ blocker_atoms = [atom for atom in atoms \ |
294 |
+ if atom.startswith("!")] |
295 |
+ blocker_atoms.sort() |
296 |
+ counter = long(inst_pkg.metadata["COUNTER"]) |
297 |
+ blocker_cache[inst_pkg.cpv] = \ |
298 |
+ blocker_cache.BlockerData(counter, blocker_atoms) |
299 |
+ for cpv in stale_cache: |
300 |
+ del blocker_cache[cpv] |
301 |
+ blocker_cache.flush() |
302 |
+ |
303 |
+ blocker_parents = digraph() |
304 |
+ blocker_atoms = [] |
305 |
+ for pkg in installed_pkgs: |
306 |
+ for blocker_atom in self._blocker_cache[pkg.cpv].atoms: |
307 |
+ blocker_atom = blocker_atom[1:] |
308 |
+ blocker_atoms.append(blocker_atom) |
309 |
+ blocker_parents.add(blocker_atom, pkg) |
310 |
+ |
311 |
+ blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms) |
312 |
+ blocking_pkgs = set() |
313 |
+ for atom in blocker_atoms.iterAtomsForPackage(new_pkg): |
314 |
+ blocking_pkgs.update(blocker_parents.parent_nodes(atom)) |
315 |
+ |
316 |
+ # Check for blockers in the other direction. |
317 |
+ myuse = new_pkg.metadata["USE"].split() |
318 |
+ depstr = " ".join(new_pkg.metadata[k] for k in dep_keys) |
319 |
+ try: |
320 |
+ portage.dep._dep_check_strict = False |
321 |
+ success, atoms = portage.dep_check(depstr, |
322 |
+ vardb, settings, myuse=myuse, |
323 |
+ trees=dep_check_trees, myroot=new_pkg.root) |
324 |
+ finally: |
325 |
+ portage.dep._dep_check_strict = True |
326 |
+ if not success: |
327 |
+ # We should never get this far with invalid deps. |
328 |
+ show_invalid_depstring_notice(new_pkg, depstr, atoms) |
329 |
+ assert False |
330 |
+ |
331 |
+ blocker_atoms = [atom[1:] for atom in atoms \ |
332 |
+ if atom.startswith("!")] |
333 |
+ blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms) |
334 |
+ for inst_pkg in installed_pkgs: |
335 |
+ try: |
336 |
+ blocker_atoms.iterAtomsForPackage(inst_pkg).next() |
337 |
+ except (portage.exception.InvalidDependString, StopIteration): |
338 |
+ continue |
339 |
+ blocking_pkgs.add(inst_pkg) |
340 |
+ |
341 |
+ return blocking_pkgs |
342 |
+ |
343 |
def show_invalid_depstring_notice(parent_node, depstring, error_msg): |
344 |
|
345 |
from formatter import AbstractFormatter, DumbWriter |
346 |
@@ -1678,10 +1792,7 @@ |
347 |
"binary":"bintree", |
348 |
"installed":"vartree"} |
349 |
|
350 |
- _mydbapi_keys = [ |
351 |
- "CHOST", "COUNTER", "DEPEND", "EAPI", "IUSE", "KEYWORDS", |
352 |
- "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND", |
353 |
- "repository", "RESTRICT", "SLOT", "USE"] |
354 |
+ _mydbapi_keys = Package.metadata_keys |
355 |
|
356 |
_dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"] |
357 |
|
358 |
@@ -2845,6 +2956,18 @@ |
359 |
cpv_list = db.xmatch("match-all", atom) |
360 |
else: |
361 |
cpv_list = db.match(atom) |
362 |
+ |
363 |
+ # USE=multislot can make an installed package appear as if |
364 |
+ # it doesn't satisfy a slot dependency. Rebuilding the ebuild |
365 |
+ # won't do any good as long as USE=multislot is enabled since |
366 |
+ # the newly built package still won't have the expected slot. |
367 |
+ # Therefore, assume that such SLOT dependencies are already |
368 |
+ # satisfied rather than forcing a rebuild. |
369 |
+ if installed and not cpv_list and matched_packages \ |
370 |
+ and vardb.cpv_exists(matched_packages[-1].cpv) and \ |
371 |
+ portage.dep.dep_getslot(atom): |
372 |
+ cpv_list = [matched_packages[-1].cpv] |
373 |
+ |
374 |
if not cpv_list: |
375 |
continue |
376 |
pkg_status = "merge" |
377 |
@@ -2906,7 +3029,8 @@ |
378 |
if not visible(pkgsettings, pkg): |
379 |
continue |
380 |
except portage.exception.InvalidDependString: |
381 |
- continue |
382 |
+ if not installed: |
383 |
+ continue |
384 |
|
385 |
# Enable upgrade or downgrade to a version |
386 |
# with visible KEYWORDS when the installed |
387 |
@@ -3011,16 +3135,6 @@ |
388 |
cur_use, cur_iuse) |
389 |
if reinstall_for_flags: |
390 |
reinstall = True |
391 |
- if not installed: |
392 |
- must_reinstall = empty or \ |
393 |
- (myarg and not selective) |
394 |
- if not reinstall_for_flags and \ |
395 |
- not must_reinstall and \ |
396 |
- cpv in vardb.match(atom): |
397 |
- # If the installed version is masked, it may |
398 |
- # be necessary to look at lower versions, |
399 |
- # in case there is a visible downgrade. |
400 |
- continue |
401 |
if not built: |
402 |
myeb = cpv |
403 |
matched_packages.append(pkg) |
404 |
@@ -3505,6 +3619,9 @@ |
405 |
return -1 |
406 |
myblocker_uninstalls = self._blocker_uninstalls.copy() |
407 |
retlist=[] |
408 |
+ # Contains uninstall tasks that have been scheduled to |
409 |
+ # occur after overlapping blockers have been installed. |
410 |
+ scheduled_uninstalls = set() |
411 |
# Contains any Uninstall tasks that have been ignored |
412 |
# in order to avoid the circular deps code path. These |
413 |
# correspond to blocker conflicts that could not be |
414 |
@@ -3719,10 +3836,16 @@ |
415 |
selected_nodes = list(selected_nodes) |
416 |
selected_nodes.sort(cmp_circular_bias) |
417 |
|
418 |
+ if not selected_nodes and scheduled_uninstalls: |
419 |
+ selected_nodes = set() |
420 |
+ for node in scheduled_uninstalls: |
421 |
+ if not mygraph.child_nodes(node): |
422 |
+ selected_nodes.add(node) |
423 |
+ scheduled_uninstalls.difference_update(selected_nodes) |
424 |
+ |
425 |
if not selected_nodes and not myblocker_uninstalls.is_empty(): |
426 |
# An Uninstall task needs to be executed in order to |
427 |
# avoid conflict if possible. |
428 |
- |
429 |
min_parent_deps = None |
430 |
uninst_task = None |
431 |
for task in myblocker_uninstalls.leaf_nodes(): |
432 |
@@ -3840,7 +3963,20 @@ |
433 |
uninst_task = task |
434 |
|
435 |
if uninst_task is not None: |
436 |
- selected_nodes = [uninst_task] |
437 |
+ # The uninstall is performed only after blocking |
438 |
+ # packages have been merged on top of it. File |
439 |
+ # collisions between blocking packages are detected |
440 |
+ # and removed from the list of files to be uninstalled. |
441 |
+ scheduled_uninstalls.add(uninst_task) |
442 |
+ parent_nodes = mygraph.parent_nodes(uninst_task) |
443 |
+ |
444 |
+ # Reverse the parent -> uninstall edges since we want |
445 |
+ # to do the uninstall after blocking packages have |
446 |
+ # been merged on top of it. |
447 |
+ mygraph.remove(uninst_task) |
448 |
+ for blocked_pkg in parent_nodes: |
449 |
+ mygraph.add(blocked_pkg, uninst_task, |
450 |
+ priority=BlockerDepPriority.instance) |
451 |
else: |
452 |
# None of the Uninstall tasks are acceptable, so |
453 |
# the corresponding blockers are unresolvable. |
454 |
@@ -3857,12 +3993,12 @@ |
455 |
ignored_uninstall_tasks.add(node) |
456 |
break |
457 |
|
458 |
- # After dropping an Uninstall task, reset |
459 |
- # the state variables for leaf node selection and |
460 |
- # continue trying to select leaf nodes. |
461 |
- prefer_asap = True |
462 |
- accept_root_node = False |
463 |
- continue |
464 |
+ # After dropping an Uninstall task, reset |
465 |
+ # the state variables for leaf node selection and |
466 |
+ # continue trying to select leaf nodes. |
467 |
+ prefer_asap = True |
468 |
+ accept_root_node = False |
469 |
+ continue |
470 |
|
471 |
if not selected_nodes: |
472 |
self._circular_deps_for_display = mygraph |
473 |
@@ -4023,6 +4159,8 @@ |
474 |
verbosity = ("--quiet" in self.myopts and 1 or \ |
475 |
"--verbose" in self.myopts and 3 or 2) |
476 |
favorites_set = InternalPackageSet(favorites) |
477 |
+ oneshot = "--oneshot" in self.myopts or \ |
478 |
+ "--onlydeps" in self.myopts |
479 |
changelogs=[] |
480 |
p=[] |
481 |
blockers = [] |
482 |
@@ -4251,24 +4389,35 @@ |
483 |
fetch=" " |
484 |
indent = " " * depth |
485 |
|
486 |
- if x[0]=="blocks": |
487 |
- addl=""+red("B")+" "+fetch+" " |
488 |
+ if isinstance(x, Blocker): |
489 |
+ if x.satisfied: |
490 |
+ blocker_style = "PKG_BLOCKER_SATISFIED" |
491 |
+ addl = "%s %s " % (colorize(blocker_style, "b"), fetch) |
492 |
+ else: |
493 |
+ blocker_style = "PKG_BLOCKER" |
494 |
+ addl = "%s %s " % (colorize(blocker_style, "B"), fetch) |
495 |
if ordered: |
496 |
counters.blocks += 1 |
497 |
+ if x.satisfied: |
498 |
+ counters.blocks_satisfied += 1 |
499 |
resolved = portage.key_expand( |
500 |
pkg_key, mydb=vardb, settings=pkgsettings) |
501 |
if "--columns" in self.myopts and "--quiet" in self.myopts: |
502 |
- addl = addl + " " + red(resolved) |
503 |
+ addl += " " + colorize(blocker_style, resolved) |
504 |
else: |
505 |
- addl = "[blocks " + addl + "] " + indent + red(resolved) |
506 |
+ addl = "[%s %s] %s%s" % \ |
507 |
+ (colorize(blocker_style, "blocks"), |
508 |
+ addl, indent, colorize(blocker_style, resolved)) |
509 |
block_parents = self._blocker_parents.parent_nodes(x) |
510 |
block_parents = set([pnode[2] for pnode in block_parents]) |
511 |
block_parents = ", ".join(block_parents) |
512 |
if resolved!=x[2]: |
513 |
- addl += bad(" (\"%s\" is blocking %s)") % \ |
514 |
+ addl += colorize(blocker_style, |
515 |
+ " (\"%s\" is blocking %s)") % \ |
516 |
(pkg_key, block_parents) |
517 |
else: |
518 |
- addl += bad(" (is blocking %s)") % block_parents |
519 |
+ addl += colorize(blocker_style, |
520 |
+ " (is blocking %s)") % block_parents |
521 |
if isinstance(x, Blocker) and x.satisfied: |
522 |
p.append(addl) |
523 |
else: |
524 |
@@ -4573,7 +4722,8 @@ |
525 |
try: |
526 |
pkg_system = system_set.findAtomForPackage(pkg_key, metadata) |
527 |
pkg_world = world_set.findAtomForPackage(pkg_key, metadata) |
528 |
- if not pkg_world and myroot == self.target_root and \ |
529 |
+ if not (oneshot or pkg_world) and \ |
530 |
+ myroot == self.target_root and \ |
531 |
favorites_set.findAtomForPackage(pkg_key, metadata): |
532 |
# Maybe it will be added to world now. |
533 |
if create_world_atom(pkg_key, metadata, |
534 |
@@ -4936,7 +5086,7 @@ |
535 |
pkg_type, myroot, pkg_key, action = x |
536 |
if pkg_type not in self.pkg_tree_map: |
537 |
continue |
538 |
- if action not in ("merge", "uninstall"): |
539 |
+ if action != "merge": |
540 |
continue |
541 |
mydb = trees[myroot][self.pkg_tree_map[pkg_type]].dbapi |
542 |
try: |
543 |
@@ -5319,6 +5469,7 @@ |
544 |
self.reinst = 0 |
545 |
self.uninst = 0 |
546 |
self.blocks = 0 |
547 |
+ self.blocks_satisfied = 0 |
548 |
self.totalsize = 0 |
549 |
self.restrict_fetch = 0 |
550 |
self.restrict_fetch_satisfied = 0 |
551 |
@@ -5354,10 +5505,6 @@ |
552 |
details.append("%s uninstall" % self.uninst) |
553 |
if self.uninst > 1: |
554 |
details[-1] += "s" |
555 |
- if self.blocks > 0: |
556 |
- details.append("%s block" % self.blocks) |
557 |
- if self.blocks > 1: |
558 |
- details[-1] += "s" |
559 |
myoutput.append(", ".join(details)) |
560 |
if total_installs != 0: |
561 |
myoutput.append(")") |
562 |
@@ -5370,6 +5517,14 @@ |
563 |
if self.restrict_fetch_satisfied < self.restrict_fetch: |
564 |
myoutput.append(bad(" (%s unsatisfied)") % \ |
565 |
(self.restrict_fetch - self.restrict_fetch_satisfied)) |
566 |
+ if self.blocks > 0: |
567 |
+ myoutput.append("\nConflict: %s block" % \ |
568 |
+ self.blocks) |
569 |
+ if self.blocks > 1: |
570 |
+ myoutput.append("s") |
571 |
+ if self.blocks_satisfied < self.blocks: |
572 |
+ myoutput.append(bad(" (%s unsatisfied)") % \ |
573 |
+ (self.blocks - self.blocks_satisfied)) |
574 |
return "".join(myoutput) |
575 |
|
576 |
class MergeTask(object): |
577 |
@@ -5383,13 +5538,36 @@ |
578 |
if settings.get("PORTAGE_DEBUG", "") == "1": |
579 |
self.edebug = 1 |
580 |
self.pkgsettings = {} |
581 |
+ self._blocker_db = {} |
582 |
for root in trees: |
583 |
self.pkgsettings[root] = portage.config( |
584 |
clone=trees[root]["vartree"].settings) |
585 |
+ self._blocker_db[root] = BlockerDB( |
586 |
+ trees[root]["vartree"], |
587 |
+ trees[root]["porttree"].dbapi) |
588 |
self.curval = 0 |
589 |
self._spawned_pids = [] |
590 |
- self._uninstall_queue = [] |
591 |
|
592 |
+ def _find_blockers(self, new_pkg): |
593 |
+ for opt in ("--buildpkgonly", "--nodeps", |
594 |
+ "--fetchonly", "--fetch-all-uri", "--pretend"): |
595 |
+ if opt in self.myopts: |
596 |
+ return None |
597 |
+ |
598 |
+ blocker_dblinks = [] |
599 |
+ for blocking_pkg in self._blocker_db[ |
600 |
+ new_pkg.root].findInstalledBlockers(new_pkg): |
601 |
+ if new_pkg.slot_atom == blocking_pkg.slot_atom: |
602 |
+ continue |
603 |
+ if new_pkg.cpv == blocking_pkg.cpv: |
604 |
+ continue |
605 |
+ blocker_dblinks.append(portage.dblink( |
606 |
+ blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root, |
607 |
+ self.pkgsettings[blocking_pkg.root], treetype="vartree", |
608 |
+ vartree=self.trees[blocking_pkg.root]["vartree"])) |
609 |
+ |
610 |
+ return blocker_dblinks |
611 |
+ |
612 |
def merge(self, mylist, favorites, mtimedb): |
613 |
try: |
614 |
return self._merge(mylist, favorites, mtimedb) |
615 |
@@ -5418,17 +5596,6 @@ |
616 |
pass |
617 |
spawned_pids.remove(pid) |
618 |
|
619 |
- def _dequeue_uninstall_tasks(self, mtimedb): |
620 |
- if not self._uninstall_queue: |
621 |
- return |
622 |
- for uninst_task in self._uninstall_queue: |
623 |
- root_config = self.trees[uninst_task.root]["root_config"] |
624 |
- unmerge(root_config, self.myopts, "unmerge", |
625 |
- [uninst_task.cpv], mtimedb["ldpath"], clean_world=0) |
626 |
- del mtimedb["resume"]["mergelist"][0] |
627 |
- mtimedb.commit() |
628 |
- del self._uninstall_queue[:] |
629 |
- |
630 |
def _merge(self, mylist, favorites, mtimedb): |
631 |
from portage.elog import elog_process |
632 |
from portage.elog.filtering import filter_mergephases |
633 |
@@ -5481,7 +5648,7 @@ |
634 |
world_set = root_config.sets["world"] |
635 |
|
636 |
mtimedb["resume"]["mergelist"] = [list(x) for x in mylist \ |
637 |
- if isinstance(x, Package)] |
638 |
+ if isinstance(x, Package) and x.operation == "merge"] |
639 |
mtimedb.commit() |
640 |
|
641 |
mymergelist = mylist |
642 |
@@ -5569,7 +5736,8 @@ |
643 |
metadata = pkg.metadata |
644 |
if pkg.installed: |
645 |
if not (buildpkgonly or fetchonly or pretend): |
646 |
- self._uninstall_queue.append(pkg) |
647 |
+ unmerge(root_config, self.myopts, "unmerge", |
648 |
+ [pkg.cpv], mtimedb["ldpath"], clean_world=0) |
649 |
continue |
650 |
|
651 |
if x[0]=="blocks": |
652 |
@@ -5670,20 +5838,22 @@ |
653 |
return retval |
654 |
bintree = self.trees[myroot]["bintree"] |
655 |
bintree.inject(pkg_key, filename=binpkg_tmpfile) |
656 |
- self._dequeue_uninstall_tasks(mtimedb) |
657 |
+ |
658 |
if "--buildpkgonly" not in self.myopts: |
659 |
msg = " === (%s of %s) Merging (%s::%s)" % \ |
660 |
(mergecount, len(mymergelist), pkg_key, y) |
661 |
short_msg = "emerge: (%s of %s) %s Merge" % \ |
662 |
(mergecount, len(mymergelist), pkg_key) |
663 |
emergelog(xterm_titles, msg, short_msg=short_msg) |
664 |
+ |
665 |
retval = portage.merge(pkgsettings["CATEGORY"], |
666 |
pkgsettings["PF"], pkgsettings["D"], |
667 |
os.path.join(pkgsettings["PORTAGE_BUILDDIR"], |
668 |
"build-info"), myroot, pkgsettings, |
669 |
myebuild=pkgsettings["EBUILD"], |
670 |
mytree="porttree", mydbapi=portdb, |
671 |
- vartree=vartree, prev_mtimes=ldpath_mtimes) |
672 |
+ vartree=vartree, prev_mtimes=ldpath_mtimes, |
673 |
+ blockers=self._find_blockers(pkg)) |
674 |
if retval != os.EX_OK: |
675 |
return retval |
676 |
elif "noclean" not in pkgsettings.features: |
677 |
@@ -5702,14 +5872,15 @@ |
678 |
prev_mtimes=ldpath_mtimes) |
679 |
if retval != os.EX_OK: |
680 |
return retval |
681 |
- self._dequeue_uninstall_tasks(mtimedb) |
682 |
+ |
683 |
retval = portage.merge(pkgsettings["CATEGORY"], |
684 |
pkgsettings["PF"], pkgsettings["D"], |
685 |
os.path.join(pkgsettings["PORTAGE_BUILDDIR"], |
686 |
"build-info"), myroot, pkgsettings, |
687 |
myebuild=pkgsettings["EBUILD"], |
688 |
mytree="porttree", mydbapi=portdb, |
689 |
- vartree=vartree, prev_mtimes=ldpath_mtimes) |
690 |
+ vartree=vartree, prev_mtimes=ldpath_mtimes, |
691 |
+ blockers=self._find_blockers(pkg)) |
692 |
if retval != os.EX_OK: |
693 |
return retval |
694 |
finally: |
695 |
@@ -5731,7 +5902,6 @@ |
696 |
portage.locks.unlockdir(catdir_lock) |
697 |
|
698 |
elif x[0]=="binary": |
699 |
- self._dequeue_uninstall_tasks(mtimedb) |
700 |
#merge the tbz2 |
701 |
mytbz2 = self.trees[myroot]["bintree"].getname(pkg_key) |
702 |
if "--getbinpkg" in self.myopts: |
703 |
@@ -5787,7 +5957,8 @@ |
704 |
retval = portage.pkgmerge(mytbz2, x[1], pkgsettings, |
705 |
mydbapi=bindb, |
706 |
vartree=self.trees[myroot]["vartree"], |
707 |
- prev_mtimes=ldpath_mtimes) |
708 |
+ prev_mtimes=ldpath_mtimes, |
709 |
+ blockers=self._find_blockers(pkg)) |
710 |
if retval != os.EX_OK: |
711 |
return retval |
712 |
#need to check for errors |
713 |
@@ -7951,6 +8122,7 @@ |
714 |
fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts |
715 |
ask = "--ask" in myopts |
716 |
nodeps = "--nodeps" in myopts |
717 |
+ oneshot = "--oneshot" in myopts or "--onlydeps" in myopts |
718 |
tree = "--tree" in myopts |
719 |
if nodeps and tree: |
720 |
tree = False |
721 |
@@ -8122,7 +8294,7 @@ |
722 |
mergecount += 1 |
723 |
|
724 |
if mergecount==0: |
725 |
- if "--noreplace" in myopts and favorites: |
726 |
+ if "--noreplace" in myopts and not oneshot and favorites: |
727 |
print |
728 |
for x in favorites: |
729 |
print " %s %s" % (good("*"), x) |
730 |
|
731 |
Modified: main/branches/prefix/pym/portage/__init__.py |
732 |
=================================================================== |
733 |
--- main/branches/prefix/pym/portage/__init__.py 2008-05-08 19:14:22 UTC (rev 10243) |
734 |
+++ main/branches/prefix/pym/portage/__init__.py 2008-05-08 19:18:20 UTC (rev 10244) |
735 |
@@ -5364,13 +5364,13 @@ |
736 |
return newmtime |
737 |
|
738 |
def merge(mycat, mypkg, pkgloc, infloc, myroot, mysettings, myebuild=None, |
739 |
- mytree=None, mydbapi=None, vartree=None, prev_mtimes=None): |
740 |
+ mytree=None, mydbapi=None, vartree=None, prev_mtimes=None, blockers=None): |
741 |
if not os.access(myroot + EPREFIX_LSTRIP, os.W_OK): |
742 |
writemsg("Permission denied: access('%s', W_OK)\n" % |
743 |
(myroot + EPREFIX_LSTRIP), noiselevel=-1) |
744 |
return errno.EACCES |
745 |
mylink = dblink(mycat, mypkg, myroot, mysettings, treetype=mytree, |
746 |
- vartree=vartree) |
747 |
+ vartree=vartree, blockers=blockers) |
748 |
return mylink.merge(pkgloc, infloc, myroot, myebuild, |
749 |
mydbapi=mydbapi, prev_mtimes=prev_mtimes) |
750 |
|
751 |
@@ -6235,7 +6235,8 @@ |
752 |
"""Returns keys for all packages within pkgdir""" |
753 |
return self.portdb.cp_list(self.cp, mytree=self.mytree) |
754 |
|
755 |
-def pkgmerge(mytbz2, myroot, mysettings, mydbapi=None, vartree=None, prev_mtimes=None): |
756 |
+def pkgmerge(mytbz2, myroot, mysettings, mydbapi=None, |
757 |
+ vartree=None, prev_mtimes=None, blockers=None): |
758 |
"""will merge a .tbz2 file, returning a list of runtime dependencies |
759 |
that must be satisfied, or None if there was a merge error. This |
760 |
code assumes the package exists.""" |
761 |
@@ -6365,7 +6366,7 @@ |
762 |
pkgloc = os.path.join(builddir, "image") |
763 |
|
764 |
mylink = dblink(mycat, mypkg, myroot, mysettings, vartree=vartree, |
765 |
- treetype="bintree") |
766 |
+ treetype="bintree", blockers=blockers) |
767 |
retval = mylink.merge(pkgloc, infloc, myroot, myebuild, cleanup=0, |
768 |
mydbapi=mydbapi, prev_mtimes=prev_mtimes) |
769 |
did_merge_phase = True |
770 |
|
771 |
Modified: main/branches/prefix/pym/portage/dbapi/vartree.py |
772 |
=================================================================== |
773 |
--- main/branches/prefix/pym/portage/dbapi/vartree.py 2008-05-08 19:14:22 UTC (rev 10243) |
774 |
+++ main/branches/prefix/pym/portage/dbapi/vartree.py 2008-05-08 19:18:20 UTC (rev 10244) |
775 |
@@ -1019,7 +1019,7 @@ |
776 |
} |
777 |
|
778 |
def __init__(self, cat, pkg, myroot, mysettings, treetype=None, |
779 |
- vartree=None): |
780 |
+ vartree=None, blockers=None): |
781 |
""" |
782 |
Creates a DBlink object for a given CPV. |
783 |
The given CPV may not be present in the database already. |
784 |
@@ -1048,6 +1048,7 @@ |
785 |
from portage import db |
786 |
vartree = db[myroot]["vartree"] |
787 |
self.vartree = vartree |
788 |
+ self._blockers = blockers |
789 |
|
790 |
self.dbroot = normalize_path(os.path.join(myroot, VDB_PATH)) |
791 |
self.dbcatdir = self.dbroot+"/"+cat |
792 |
@@ -1124,6 +1125,11 @@ |
793 |
if os.path.exists(self.dbdir+"/CONTENTS"): |
794 |
os.unlink(self.dbdir+"/CONTENTS") |
795 |
|
796 |
+ def _clear_contents_cache(self): |
797 |
+ self.contentscache = None |
798 |
+ self._contents_inodes = None |
799 |
+ self._contents_basenames = None |
800 |
+ |
801 |
def getcontents(self): |
802 |
""" |
803 |
Get the installed files of a given package (aka what that package installed) |
804 |
@@ -2006,6 +2012,7 @@ |
805 |
""" |
806 |
|
807 |
srcroot = normalize_path(srcroot).rstrip(os.path.sep) + os.path.sep |
808 |
+ destroot = normalize_path(destroot).rstrip(os.path.sep) + os.path.sep |
809 |
|
810 |
if not os.path.isdir(srcroot): |
811 |
writemsg("!!! Directory Not Found: D='%s'\n" % srcroot, |
812 |
@@ -2146,8 +2153,11 @@ |
813 |
self._preserve_libs(srcroot, destroot, myfilelist+mylinklist, counter, inforoot) |
814 |
|
815 |
# check for package collisions |
816 |
- collisions = self._collision_protect(srcroot, destroot, others_in_slot, |
817 |
- myfilelist+mylinklist) |
818 |
+ blockers = self._blockers |
819 |
+ if blockers is None: |
820 |
+ blockers = [] |
821 |
+ collisions = self._collision_protect(srcroot, destroot, |
822 |
+ others_in_slot + blockers, myfilelist + mylinklist) |
823 |
|
824 |
# Make sure the ebuild environment is initialized and that ${T}/elog |
825 |
# exists for logging of collision-protect eerror messages. |
826 |
@@ -2367,6 +2377,42 @@ |
827 |
self.dbdir = self.dbpkgdir |
828 |
self.delete() |
829 |
_movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings) |
830 |
+ |
831 |
+ # Check for file collisions with blocking packages |
832 |
+ # and remove any colliding files from their CONTENTS |
833 |
+ # since they now belong to this package. |
834 |
+ self._clear_contents_cache() |
835 |
+ contents = self.getcontents() |
836 |
+ destroot_len = len(destroot) - 1 |
837 |
+ for blocker in blockers: |
838 |
+ blocker_contents = blocker.getcontents() |
839 |
+ collisions = [] |
840 |
+ for filename in blocker_contents: |
841 |
+ relative_filename = filename[destroot_len:] |
842 |
+ if self.isowner(relative_filename, destroot): |
843 |
+ collisions.append(filename) |
844 |
+ if not collisions: |
845 |
+ continue |
846 |
+ for filename in collisions: |
847 |
+ del blocker_contents[filename] |
848 |
+ f = atomic_ofstream(os.path.join(blocker.dbdir, "CONTENTS")) |
849 |
+ for filename in sorted(blocker_contents): |
850 |
+ entry_data = blocker_contents[filename] |
851 |
+ entry_type = entry_data[0] |
852 |
+ relative_filename = filename[destroot_len:] |
853 |
+ if entry_type == "obj": |
854 |
+ entry_type, mtime, md5sum = entry_data |
855 |
+ line = "%s %s %s %s\n" % \ |
856 |
+ (entry_type, relative_filename, md5sum, mtime) |
857 |
+ elif entry_type == "sym": |
858 |
+ entry_type, mtime, link = entry_data |
859 |
+ line = "%s %s -> %s %s\n" % \ |
860 |
+ (entry_type, relative_filename, link, mtime) |
861 |
+ else: # dir, dev, fif |
862 |
+ line = "%s %s\n" % (entry_type, relative_filename) |
863 |
+ f.write(line) |
864 |
+ f.close() |
865 |
+ |
866 |
# Due to mtime granularity, mtime checks do not always properly |
867 |
# invalidate vardbapi caches. |
868 |
self.vartree.dbapi.mtdircache.pop(self.cat, None) |
869 |
|
870 |
Modified: main/branches/prefix/pym/portage/output.py |
871 |
=================================================================== |
872 |
--- main/branches/prefix/pym/portage/output.py 2008-05-08 19:14:22 UTC (rev 10243) |
873 |
+++ main/branches/prefix/pym/portage/output.py 2008-05-08 19:18:20 UTC (rev 10244) |
874 |
@@ -148,6 +148,8 @@ |
875 |
codes["UNMERGE_WARN"] = codes["red"] |
876 |
codes["SECURITY_WARN"] = codes["red"] |
877 |
codes["MERGE_LIST_PROGRESS"] = codes["yellow"] |
878 |
+codes["PKG_BLOCKER"] = codes["red"] |
879 |
+codes["PKG_BLOCKER_SATISFIED"] = codes["darkblue"] |
880 |
codes["PKG_MERGE"] = codes["darkgreen"] |
881 |
codes["PKG_MERGE_SYSTEM"] = codes["darkgreen"] |
882 |
codes["PKG_MERGE_WORLD"] = codes["green"] |
883 |
|
884 |
-- |
885 |
gentoo-commits@l.g.o mailing list |