Gentoo Archives: gentoo-commits

From: "Zac Medico (zmedico)" <zmedico@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] portage r10227 - in main/branches/2.1.2: bin doc/dependency_resolution pym
Date: Thu, 08 May 2008 08:18:15
Message-Id: E1Ju1KN-0003pu-P4@stork.gentoo.org
1 Author: zmedico
2 Date: 2008-05-08 08:18:10 +0000 (Thu, 08 May 2008)
3 New Revision: 10227
4
5 Modified:
6 main/branches/2.1.2/bin/emerge
7 main/branches/2.1.2/doc/dependency_resolution/task_scheduling.docbook
8 main/branches/2.1.2/pym/portage.py
9 Log:
10 Instead of doing automatic uninstalls in advance, install conflicting
11 packages first and then do the uninstall afterwards. This requires
12 special handling for file collisions occur, but it's preferred
13 because it ensures that package files remain installed in a usable
14 state whenever possible.
15
16 When file collisions occur between conflicting packages, the contents
17 entries for those files are removed from the packages that are
18 scheduled for uninstallation. This prevents uninstallation operations
19 from removing overlapping files that have been claimed by conflicting
20 packages.
21
22 (trunk r10225)
23
24
25 Modified: main/branches/2.1.2/bin/emerge
26 ===================================================================
27 --- main/branches/2.1.2/bin/emerge 2008-05-08 07:55:30 UTC (rev 10226)
28 +++ main/branches/2.1.2/bin/emerge 2008-05-08 08:18:10 UTC (rev 10227)
29 @@ -1414,13 +1414,22 @@
30 __slots__ = ("built", "cpv", "depth",
31 "installed", "metadata", "onlydeps", "operation",
32 "root", "type_name",
33 - "cp", "cpv_slot", "pv_split", "slot_atom")
34 + "category", "cp", "cpv_slot", "pf", "pv_split", "slot_atom")
35 +
36 + metadata_keys = [
37 + "CHOST", "COUNTER", "DEPEND", "EAPI", "IUSE", "KEYWORDS",
38 + "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
39 + "repository", "RESTRICT", "SLOT", "USE"]
40 +
41 def __init__(self, **kwargs):
42 Task.__init__(self, **kwargs)
43 self.cp = portage.cpv_getkey(self.cpv)
44 self.slot_atom = "%s:%s" % (self.cp, self.metadata["SLOT"])
45 self.cpv_slot = "%s:%s" % (self.cpv, self.metadata["SLOT"])
46 - self.pv_split = portage.catpkgsplit(self.cpv)[1:]
47 + cpv_parts = portage.catpkgsplit(self.cpv)
48 + self.category = cpv_parts[0]
49 + self.pv_split = cpv_parts[1:]
50 + self.pf = self.cpv.replace(self.category + "/", "", 1)
51
52 def _get_hash_key(self):
53 hash_key = getattr(self, "_hash_key", None)
54 @@ -1507,6 +1516,9 @@
55 2) the old-style virtuals have changed
56 """
57 class BlockerData(object):
58 +
59 + __slots__ = ("__weakref__", "atoms", "counter")
60 +
61 def __init__(self, counter, atoms):
62 self.counter = counter
63 self.atoms = atoms
64 @@ -1648,6 +1660,84 @@
65 an AttributeError."""
66 return list(self)
67
68 +class BlockerDB(object):
69 +
70 + def __init__(self, vartree, portdb):
71 + self._vartree = vartree
72 + self._portdb = portdb
73 + self._blocker_cache = \
74 + BlockerCache(self._vartree.root, vartree.dbapi)
75 + self._dep_check_trees = { self._vartree.root : {
76 + "porttree" : self._vartree,
77 + "vartree" : self._vartree,
78 + }}
79 + self._installed_pkgs = None
80 +
81 + def findInstalledBlockers(self, new_pkg):
82 + self._update_cache()
83 + blocker_parents = digraph()
84 + blocker_atoms = []
85 + for pkg in self._installed_pkgs:
86 + for blocker_atom in self._blocker_cache[pkg.cpv].atoms:
87 + blocker_atom = blocker_atom[1:]
88 + blocker_atoms.append(blocker_atom)
89 + blocker_parents.add(blocker_atom, pkg)
90 +
91 + blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
92 + blocking_pkgs = set()
93 + for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
94 + blocking_pkgs.update(blocker_parents.parent_nodes(atom))
95 + return blocking_pkgs
96 +
97 + def _update_cache(self):
98 + blocker_cache = self._blocker_cache
99 + dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
100 + dep_check_trees = self._dep_check_trees
101 + settings = self._vartree.settings
102 + stale_cache = set(blocker_cache)
103 + fake_vartree = \
104 + FakeVartree(self._vartree,
105 + self._portdb, Package.metadata_keys, {})
106 + vardb = fake_vartree.dbapi
107 + self._installed_pkgs = list(vardb)
108 +
109 + for inst_pkg in self._installed_pkgs:
110 + stale_cache.discard(inst_pkg.cpv)
111 + cached_blockers = blocker_cache.get(inst_pkg.cpv)
112 + if cached_blockers is not None and \
113 + cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
114 + cached_blockers = None
115 + if cached_blockers is not None:
116 + blocker_atoms = cached_blockers.atoms
117 + else:
118 + myuse = inst_pkg.metadata["USE"].split()
119 + # Use aux_get() to trigger FakeVartree global
120 + # updates on *DEPEND when appropriate.
121 + depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
122 + try:
123 + portage.dep._dep_check_strict = False
124 + success, atoms = portage.dep_check(depstr,
125 + vardb, settings, myuse=myuse,
126 + trees=dep_check_trees, myroot=inst_pkg.root)
127 + finally:
128 + portage.dep._dep_check_strict = True
129 + if not success:
130 + pkg_location = os.path.join(inst_pkg.root,
131 + portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
132 + portage.writemsg("!!! %s/*DEPEND: %s\n" % \
133 + (pkg_location, atoms), noiselevel=-1)
134 + continue
135 +
136 + blocker_atoms = [atom for atom in atoms \
137 + if atom.startswith("!")]
138 + blocker_atoms.sort()
139 + counter = long(inst_pkg.metadata["COUNTER"])
140 + blocker_cache[inst_pkg.cpv] = \
141 + blocker_cache.BlockerData(counter, blocker_atoms)
142 + for cpv in stale_cache:
143 + del blocker_cache[cpv]
144 + blocker_cache.flush()
145 +
146 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
147
148 from formatter import AbstractFormatter, DumbWriter
149 @@ -1801,10 +1891,7 @@
150 "binary":"bintree",
151 "installed":"vartree"}
152
153 - _mydbapi_keys = [
154 - "CHOST", "COUNTER", "DEPEND", "EAPI", "IUSE", "KEYWORDS",
155 - "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
156 - "repository", "RESTRICT", "SLOT", "USE"]
157 + _mydbapi_keys = Package.metadata_keys
158
159 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
160
161 @@ -3635,6 +3722,9 @@
162 return -1
163 myblocker_uninstalls = self._blocker_uninstalls.copy()
164 retlist=[]
165 + # Contains uninstall tasks that have been scheduled to
166 + # occur after overlapping blockers have been installed.
167 + scheduled_uninstalls = set()
168 # Contains any Uninstall tasks that have been ignored
169 # in order to avoid the circular deps code path. These
170 # correspond to blocker conflicts that could not be
171 @@ -3849,10 +3939,16 @@
172 selected_nodes = list(selected_nodes)
173 selected_nodes.sort(cmp_circular_bias)
174
175 + if not selected_nodes and scheduled_uninstalls:
176 + selected_nodes = set()
177 + for node in scheduled_uninstalls:
178 + if not mygraph.child_nodes(node):
179 + selected_nodes.add(node)
180 + scheduled_uninstalls.difference_update(selected_nodes)
181 +
182 if not selected_nodes and not myblocker_uninstalls.is_empty():
183 # An Uninstall task needs to be executed in order to
184 # avoid conflict if possible.
185 -
186 min_parent_deps = None
187 uninst_task = None
188 for task in myblocker_uninstalls.leaf_nodes():
189 @@ -3970,7 +4066,20 @@
190 uninst_task = task
191
192 if uninst_task is not None:
193 - selected_nodes = [uninst_task]
194 + # The uninstall is performed only after blocking
195 + # packages have been merged on top of it. File
196 + # collisions between blocking packages are detected
197 + # and removed from the list of files to be uninstalled.
198 + scheduled_uninstalls.add(uninst_task)
199 + parent_nodes = mygraph.parent_nodes(uninst_task)
200 +
201 + # Reverse the parent -> uninstall edges since we want
202 + # to do the uninstall after blocking packages have
203 + # been merged on top of it.
204 + mygraph.remove(uninst_task)
205 + for blocked_pkg in parent_nodes:
206 + mygraph.add(blocked_pkg, uninst_task,
207 + priority=BlockerDepPriority.instance)
208 else:
209 # None of the Uninstall tasks are acceptable, so
210 # the corresponding blockers are unresolvable.
211 @@ -3987,12 +4096,12 @@
212 ignored_uninstall_tasks.add(node)
213 break
214
215 - # After dropping an Uninstall task, reset
216 - # the state variables for leaf node selection and
217 - # continue trying to select leaf nodes.
218 - prefer_asap = True
219 - accept_root_node = False
220 - continue
221 + # After dropping an Uninstall task, reset
222 + # the state variables for leaf node selection and
223 + # continue trying to select leaf nodes.
224 + prefer_asap = True
225 + accept_root_node = False
226 + continue
227
228 if not selected_nodes:
229 self._circular_deps_for_display = mygraph
230 @@ -4153,6 +4262,8 @@
231 verbosity = ("--quiet" in self.myopts and 1 or \
232 "--verbose" in self.myopts and 3 or 2)
233 favorites_set = InternalPackageSet(favorites)
234 + oneshot = "--oneshot" in self.myopts or \
235 + "--onlydeps" in self.myopts
236 changelogs=[]
237 p=[]
238 blockers = []
239 @@ -4718,7 +4829,8 @@
240 try:
241 pkg_system = system_set.findAtomForPackage(pkg_key, metadata)
242 pkg_world = world_set.findAtomForPackage(pkg_key, metadata)
243 - if not pkg_world and myroot == self.target_root and \
244 + if not (oneshot or pkg_world) and \
245 + myroot == self.target_root and \
246 favorites_set.findAtomForPackage(pkg_key, metadata):
247 # Maybe it will be added to world now.
248 if create_world_atom(pkg_key, metadata,
249 @@ -5530,14 +5642,36 @@
250 if settings.get("PORTAGE_DEBUG", "") == "1":
251 self.edebug = 1
252 self.pkgsettings = {}
253 - self.pkgsettings[self.target_root] = portage.config(clone=settings)
254 - if self.target_root != "/":
255 - self.pkgsettings["/"] = \
256 - portage.config(clone=trees["/"]["vartree"].settings)
257 + self._blocker_db = {}
258 + for root in trees:
259 + self.pkgsettings[root] = portage.config(
260 + clone=trees[root]["vartree"].settings)
261 + self._blocker_db[root] = BlockerDB(
262 + trees[root]["vartree"],
263 + trees[root]["porttree"].dbapi)
264 self.curval = 0
265 self._spawned_pids = []
266 - self._uninstall_queue = []
267
268 + def _find_blockers(self, new_pkg):
269 + for opt in ("--buildpkgonly", "--nodeps",
270 + "--fetchonly", "--fetch-all-uri", "--pretend"):
271 + if opt in self.myopts:
272 + return None
273 +
274 + blocker_dblinks = []
275 + for blocking_pkg in self._blocker_db[
276 + new_pkg.root].findInstalledBlockers(new_pkg):
277 + if new_pkg.slot_atom == blocking_pkg.slot_atom:
278 + continue
279 + if new_pkg.cpv == blocking_pkg.cpv:
280 + continue
281 + blocker_dblinks.append(portage.dblink(
282 + blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
283 + self.pkgsettings[blocking_pkg.root], treetype="vartree",
284 + vartree=self.trees[blocking_pkg.root]["vartree"]))
285 +
286 + return blocker_dblinks
287 +
288 def merge(self, mylist, favorites, mtimedb):
289 try:
290 return self._merge(mylist, favorites, mtimedb)
291 @@ -5565,18 +5699,6 @@
292 pass
293 spawned_pids.remove(pid)
294
295 - def _dequeue_uninstall_tasks(self, mtimedb):
296 - if not self._uninstall_queue:
297 - return
298 - for uninst_task in self._uninstall_queue:
299 - root_config = self.trees[uninst_task.root]["root_config"]
300 - unmerge(root_config.settings, self.myopts,
301 - root_config.trees["vartree"], "unmerge",
302 - [uninst_task.cpv], mtimedb["ldpath"], clean_world=0)
303 - del mtimedb["resume"]["mergelist"][0]
304 - mtimedb.commit()
305 - del self._uninstall_queue[:]
306 -
307 def _merge(self, mylist, favorites, mtimedb):
308 failed_fetches = []
309 buildpkgonly = "--buildpkgonly" in self.myopts
310 @@ -5715,7 +5837,11 @@
311 metadata = pkg.metadata
312 if pkg.installed:
313 if not (buildpkgonly or fetchonly or pretend):
314 - self._uninstall_queue.append(pkg)
315 + unmerge(root_config.settings, self.myopts,
316 + root_config.trees["vartree"], "unmerge",
317 + [pkg.cpv], mtimedb["ldpath"], clean_world=0)
318 + del mtimedb["resume"]["mergelist"][0]
319 + mtimedb.commit()
320 continue
321
322 if x[0]=="blocks":
323 @@ -5812,7 +5938,7 @@
324 bintree = self.trees[myroot]["bintree"]
325 if bintree.populated:
326 bintree.inject(pkg_key)
327 - self._dequeue_uninstall_tasks(mtimedb)
328 +
329 if "--buildpkgonly" not in self.myopts:
330 msg = " === (%s of %s) Merging (%s::%s)" % \
331 (mergecount, len(mymergelist), pkg_key, y)
332 @@ -5825,7 +5951,8 @@
333 "build-info"), myroot, pkgsettings,
334 myebuild=pkgsettings["EBUILD"],
335 mytree="porttree", mydbapi=portdb,
336 - vartree=vartree, prev_mtimes=ldpath_mtimes)
337 + vartree=vartree, prev_mtimes=ldpath_mtimes,
338 + blockers=self._find_blockers(pkg))
339 if retval != os.EX_OK:
340 return retval
341 elif "noclean" not in pkgsettings.features:
342 @@ -5844,14 +5971,15 @@
343 prev_mtimes=ldpath_mtimes)
344 if retval != os.EX_OK:
345 return retval
346 - self._dequeue_uninstall_tasks(mtimedb)
347 +
348 retval = portage.merge(pkgsettings["CATEGORY"],
349 pkgsettings["PF"], pkgsettings["D"],
350 os.path.join(pkgsettings["PORTAGE_BUILDDIR"],
351 "build-info"), myroot, pkgsettings,
352 myebuild=pkgsettings["EBUILD"],
353 mytree="porttree", mydbapi=portdb,
354 - vartree=vartree, prev_mtimes=ldpath_mtimes)
355 + vartree=vartree, prev_mtimes=ldpath_mtimes,
356 + blockers=self._find_blockers(pkg))
357 if retval != os.EX_OK:
358 return retval
359 finally:
360 @@ -5873,7 +6001,6 @@
361 portage_locks.unlockdir(catdir_lock)
362
363 elif x[0]=="binary":
364 - self._dequeue_uninstall_tasks(mtimedb)
365 #merge the tbz2
366 mytbz2 = self.trees[myroot]["bintree"].getname(pkg_key)
367 if "--getbinpkg" in self.myopts:
368 @@ -5929,7 +6056,8 @@
369 retval = portage.pkgmerge(mytbz2, x[1], pkgsettings,
370 mydbapi=bindb,
371 vartree=self.trees[myroot]["vartree"],
372 - prev_mtimes=ldpath_mtimes)
373 + prev_mtimes=ldpath_mtimes,
374 + blockers=self._find_blockers(pkg))
375 if retval != os.EX_OK:
376 return retval
377 #need to check for errors
378 @@ -7906,6 +8034,7 @@
379 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
380 ask = "--ask" in myopts
381 nodeps = "--nodeps" in myopts
382 + oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
383 tree = "--tree" in myopts
384 if nodeps and tree:
385 tree = False
386 @@ -8077,7 +8206,7 @@
387 mergecount += 1
388
389 if mergecount==0:
390 - if "--noreplace" in myopts and favorites:
391 + if "--noreplace" in myopts and not oneshot and favorites:
392 print
393 for x in favorites:
394 print " %s %s" % (good("*"), x)
395
396 Modified: main/branches/2.1.2/doc/dependency_resolution/task_scheduling.docbook
397 ===================================================================
398 --- main/branches/2.1.2/doc/dependency_resolution/task_scheduling.docbook 2008-05-08 07:55:30 UTC (rev 10226)
399 +++ main/branches/2.1.2/doc/dependency_resolution/task_scheduling.docbook 2008-05-08 08:18:10 UTC (rev 10227)
400 @@ -21,7 +21,7 @@
401 </para>
402 <para>
403 In order to avoid a conflict, a package may need to be uninstalled
404 - in advance, rather than through replacement. The following constraints
405 + rather than replaced. The following constraints
406 protect inappropriate packages from being chosen for automatic
407 uninstallation:
408 <itemizedlist>
409 @@ -46,6 +46,16 @@
410 </listitem>
411 </itemizedlist>
412 </para>
413 + <para>
414 + In order to ensure that package files remain installed in a usable state
415 + whenever possible, uninstallation operations are not executed
416 + until after all associated conflicting packages have been installed.
417 + When file collisions occur between conflicting packages, the contents
418 + entries for those files are removed from the packages
419 + that are scheduled for uninstallation. This prevents
420 + uninstallation operations from removing overlapping files that
421 + have been claimed by conflicting packages.
422 + </para>
423 </sect1>
424 <sect1 id='dependency-resolution-task-scheduling-circular-dependencies'>
425 <title>Circular Dependencies</title>
426
427 Modified: main/branches/2.1.2/pym/portage.py
428 ===================================================================
429 --- main/branches/2.1.2/pym/portage.py 2008-05-08 07:55:30 UTC (rev 10226)
430 +++ main/branches/2.1.2/pym/portage.py 2008-05-08 08:18:10 UTC (rev 10227)
431 @@ -5310,13 +5310,13 @@
432 return newmtime
433
434 def merge(mycat, mypkg, pkgloc, infloc, myroot, mysettings, myebuild=None,
435 - mytree=None, mydbapi=None, vartree=None, prev_mtimes=None):
436 + mytree=None, mydbapi=None, vartree=None, prev_mtimes=None, blockers=None):
437 if not os.access(myroot, os.W_OK):
438 writemsg("Permission denied: access('%s', W_OK)\n" % myroot,
439 noiselevel=-1)
440 return errno.EACCES
441 mylink = dblink(mycat, mypkg, myroot, mysettings, treetype=mytree,
442 - vartree=vartree)
443 + vartree=vartree, blockers=blockers)
444 return mylink.merge(pkgloc, infloc, myroot, myebuild,
445 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
446
447 @@ -8389,7 +8389,7 @@
448 }
449
450 def __init__(self, cat, pkg, myroot, mysettings, treetype=None,
451 - vartree=None):
452 + vartree=None, blockers=None):
453 """
454 Creates a DBlink object for a given CPV.
455 The given CPV may not be present in the database already.
456 @@ -8418,6 +8418,7 @@
457 global db
458 vartree = db[myroot]["vartree"]
459 self.vartree = vartree
460 + self._blockers = blockers
461
462 self.dbroot = normalize_path(os.path.join(myroot, VDB_PATH))
463 self.dbcatdir = self.dbroot+"/"+cat
464 @@ -8504,6 +8505,11 @@
465 if os.path.exists(self.dbdir+"/CONTENTS"):
466 os.unlink(self.dbdir+"/CONTENTS")
467
468 + def _clear_contents_cache(self):
469 + self.contentscache = None
470 + self._contents_inodes = None
471 + self._contents_basenames = None
472 +
473 def getcontents(self):
474 """
475 Get the installed files of a given package (aka what that package installed)
476 @@ -9167,6 +9173,7 @@
477 """
478
479 srcroot = normalize_path(srcroot).rstrip(os.path.sep) + os.path.sep
480 + destroot = normalize_path(destroot).rstrip(os.path.sep) + os.path.sep
481
482 if not os.path.isdir(srcroot):
483 writemsg("!!! Directory Not Found: D='%s'\n" % srcroot,
484 @@ -9291,6 +9298,9 @@
485 return 1
486
487 # check for package collisions
488 + blockers = self._blockers
489 + if blockers is None:
490 + blockers = []
491 if True:
492 collision_ignore = set([normalize_path(myignore) for myignore in \
493 self.settings.get("COLLISION_IGNORE", "").split()])
494 @@ -9343,7 +9353,7 @@
495 if f[0] != "/":
496 f="/"+f
497 isowned = False
498 - for ver in [self] + others_in_slot:
499 + for ver in [self] + others_in_slot + blockers:
500 if (ver.isowner(f, destroot) or ver.isprotected(f)):
501 isowned = True
502 break
503 @@ -9581,6 +9591,44 @@
504 self.dbdir = self.dbpkgdir
505 self.delete()
506 _movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
507 +
508 + # Check for file collisions with blocking packages
509 + # and remove any colliding files from their CONTENTS
510 + # since they now belong to this package.
511 + self._clear_contents_cache()
512 + contents = self.getcontents()
513 + destroot_len = len(destroot) - 1
514 + for blocker in blockers:
515 + blocker_contents = blocker.getcontents()
516 + collisions = []
517 + for filename in blocker_contents:
518 + relative_filename = filename[destroot_len:]
519 + if self.isowner(relative_filename, destroot):
520 + collisions.append(filename)
521 + if not collisions:
522 + continue
523 + for filename in collisions:
524 + del blocker_contents[filename]
525 + f = atomic_ofstream(os.path.join(blocker.dbdir, "CONTENTS"))
526 + try:
527 + for filename in sorted(blocker_contents):
528 + entry_data = blocker_contents[filename]
529 + entry_type = entry_data[0]
530 + relative_filename = filename[destroot_len:]
531 + if entry_type == "obj":
532 + entry_type, mtime, md5sum = entry_data
533 + line = "%s %s %s %s\n" % \
534 + (entry_type, relative_filename, md5sum, mtime)
535 + elif entry_type == "sym":
536 + entry_type, mtime, link = entry_data
537 + line = "%s %s -> %s %s\n" % \
538 + (entry_type, relative_filename, link, mtime)
539 + else: # dir, dev, fif
540 + line = "%s %s\n" % (entry_type, relative_filename)
541 + f.write(line)
542 + finally:
543 + f.close()
544 +
545 # Due to mtime granularity, mtime checks do not always properly
546 # invalidate vardbapi caches.
547 self.vartree.dbapi.mtdircache.pop(self.cat, None)
548 @@ -10022,7 +10070,8 @@
549 """Returns keys for all packages within pkgdir"""
550 return self.portdb.cp_list(self.cp, mytree=self.mytree)
551
552 -def pkgmerge(mytbz2, myroot, mysettings, mydbapi=None, vartree=None, prev_mtimes=None):
553 +def pkgmerge(mytbz2, myroot, mysettings, mydbapi=None,
554 + vartree=None, prev_mtimes=None, blockers=None):
555 """will merge a .tbz2 file, returning a list of runtime dependencies
556 that must be satisfied, or None if there was a merge error. This
557 code assumes the package exists."""
558 @@ -10115,7 +10164,7 @@
559 #tbz2_lock = None
560
561 mylink = dblink(mycat, mypkg, myroot, mysettings, vartree=vartree,
562 - treetype="bintree")
563 + treetype="bintree", blockers=blockers)
564 retval = mylink.merge(pkgloc, infloc, myroot, myebuild, cleanup=0,
565 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
566 did_merge_phase = True
567
568 --
569 gentoo-commits@l.g.o mailing list