Gentoo Archives: gentoo-commits

From: "Zac Medico (zmedico)" <zmedico@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] portage r10225 - in main/trunk: doc/dependency_resolution pym/_emerge pym/portage pym/portage/dbapi
Date: Thu, 08 May 2008 07:49:04
Message-Id: E1Ju0s9-0003fm-5P@stork.gentoo.org
1 Author: zmedico
2 Date: 2008-05-08 07:48:59 +0000 (Thu, 08 May 2008)
3 New Revision: 10225
4
5 Modified:
6 main/trunk/doc/dependency_resolution/task_scheduling.docbook
7 main/trunk/pym/_emerge/__init__.py
8 main/trunk/pym/portage/__init__.py
9 main/trunk/pym/portage/dbapi/vartree.py
10 Log:
11 Instead of doing automatic uninstalls in advance, install conflicting
12 packages first and then do the uninstall afterwards. This requires
13 special handling for file collisions occur, but it's preferred
14 because it ensures that package files remain installed in a usable
15 state whenever possible.
16
17 When file collisions occur between conflicting packages, the contents
18 entries for those files are removed from the packages that are
19 scheduled for uninstallation. This prevents uninstallation operations
20 from removing overlapping files that have been claimed by conflicting
21 packages.
22
23
24 Modified: main/trunk/doc/dependency_resolution/task_scheduling.docbook
25 ===================================================================
26 --- main/trunk/doc/dependency_resolution/task_scheduling.docbook 2008-05-07 18:49:19 UTC (rev 10224)
27 +++ main/trunk/doc/dependency_resolution/task_scheduling.docbook 2008-05-08 07:48:59 UTC (rev 10225)
28 @@ -21,7 +21,7 @@
29 </para>
30 <para>
31 In order to avoid a conflict, a package may need to be uninstalled
32 - in advance, rather than through replacement. The following constraints
33 + rather than replaced. The following constraints
34 protect inappropriate packages from being chosen for automatic
35 uninstallation:
36 <itemizedlist>
37 @@ -46,6 +46,16 @@
38 </listitem>
39 </itemizedlist>
40 </para>
41 + <para>
42 + In order to ensure that package files remain installed in a usable state
43 + whenever possible, uninstallation operations are not executed
44 + until after all associated conflicting packages have been installed.
45 + When file collisions occur between conflicting packages, the contents
46 + entries for those files are removed from the packages
47 + that are scheduled for uninstallation. This prevents
48 + uninstallation operations from removing overlapping files that
49 + have been claimed by conflicting packages.
50 + </para>
51 </sect1>
52 <sect1 id='dependency-resolution-task-scheduling-circular-dependencies'>
53 <title>Circular Dependencies</title>
54
55 Modified: main/trunk/pym/_emerge/__init__.py
56 ===================================================================
57 --- main/trunk/pym/_emerge/__init__.py 2008-05-07 18:49:19 UTC (rev 10224)
58 +++ main/trunk/pym/_emerge/__init__.py 2008-05-08 07:48:59 UTC (rev 10225)
59 @@ -729,7 +729,6 @@
60 result = ""
61 return result
62
63 -
64 class RootConfig(object):
65 """This is used internally by depgraph to track information about a
66 particular $ROOT."""
67 @@ -1273,13 +1272,22 @@
68 __slots__ = ("built", "cpv", "depth",
69 "installed", "metadata", "onlydeps", "operation",
70 "root", "type_name",
71 - "cp", "cpv_slot", "pv_split", "slot_atom")
72 + "category", "cp", "cpv_slot", "pf", "pv_split", "slot_atom")
73 +
74 + metadata_keys = [
75 + "CHOST", "COUNTER", "DEPEND", "EAPI", "IUSE", "KEYWORDS",
76 + "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
77 + "repository", "RESTRICT", "SLOT", "USE"]
78 +
79 def __init__(self, **kwargs):
80 Task.__init__(self, **kwargs)
81 self.cp = portage.cpv_getkey(self.cpv)
82 self.slot_atom = "%s:%s" % (self.cp, self.metadata["SLOT"])
83 self.cpv_slot = "%s:%s" % (self.cpv, self.metadata["SLOT"])
84 - self.pv_split = portage.catpkgsplit(self.cpv)[1:]
85 + cpv_parts = portage.catpkgsplit(self.cpv)
86 + self.category = cpv_parts[0]
87 + self.pv_split = cpv_parts[1:]
88 + self.pf = self.cpv.replace(self.category + "/", "", 1)
89
90 def _get_hash_key(self):
91 hash_key = getattr(self, "_hash_key", None)
92 @@ -1366,6 +1374,9 @@
93 2) the old-style virtuals have changed
94 """
95 class BlockerData(object):
96 +
97 + __slots__ = ("__weakref__", "atoms", "counter")
98 +
99 def __init__(self, counter, atoms):
100 self.counter = counter
101 self.atoms = atoms
102 @@ -1507,6 +1518,84 @@
103 an AttributeError."""
104 return list(self)
105
106 +class BlockerDB(object):
107 +
108 + def __init__(self, vartree, portdb):
109 + self._vartree = vartree
110 + self._portdb = portdb
111 + self._blocker_cache = \
112 + BlockerCache(self._vartree.root, vartree.dbapi)
113 + self._dep_check_trees = { self._vartree.root : {
114 + "porttree" : self._vartree,
115 + "vartree" : self._vartree,
116 + }}
117 + self._installed_pkgs = None
118 +
119 + def findInstalledBlockers(self, new_pkg):
120 + self._update_cache()
121 + blocker_parents = digraph()
122 + blocker_atoms = []
123 + for pkg in self._installed_pkgs:
124 + for blocker_atom in self._blocker_cache[pkg.cpv].atoms:
125 + blocker_atom = blocker_atom[1:]
126 + blocker_atoms.append(blocker_atom)
127 + blocker_parents.add(blocker_atom, pkg)
128 +
129 + blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
130 + blocking_pkgs = set()
131 + for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
132 + blocking_pkgs.update(blocker_parents.parent_nodes(atom))
133 + return blocking_pkgs
134 +
135 + def _update_cache(self):
136 + blocker_cache = self._blocker_cache
137 + dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
138 + dep_check_trees = self._dep_check_trees
139 + settings = self._vartree.settings
140 + stale_cache = set(blocker_cache)
141 + fake_vartree = \
142 + FakeVartree(self._vartree,
143 + self._portdb, Package.metadata_keys, {})
144 + vardb = fake_vartree.dbapi
145 + self._installed_pkgs = list(vardb)
146 +
147 + for inst_pkg in self._installed_pkgs:
148 + stale_cache.discard(inst_pkg.cpv)
149 + cached_blockers = blocker_cache.get(inst_pkg.cpv)
150 + if cached_blockers is not None and \
151 + cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
152 + cached_blockers = None
153 + if cached_blockers is not None:
154 + blocker_atoms = cached_blockers.atoms
155 + else:
156 + myuse = inst_pkg.metadata["USE"].split()
157 + # Use aux_get() to trigger FakeVartree global
158 + # updates on *DEPEND when appropriate.
159 + depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
160 + try:
161 + portage.dep._dep_check_strict = False
162 + success, atoms = portage.dep_check(depstr,
163 + vardb, settings, myuse=myuse,
164 + trees=dep_check_trees, myroot=inst_pkg.root)
165 + finally:
166 + portage.dep._dep_check_strict = True
167 + if not success:
168 + pkg_location = os.path.join(inst_pkg.root,
169 + portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
170 + portage.writemsg("!!! %s/*DEPEND: %s\n" % \
171 + (pkg_location, atoms), noiselevel=-1)
172 + continue
173 +
174 + blocker_atoms = [atom for atom in atoms \
175 + if atom.startswith("!")]
176 + blocker_atoms.sort()
177 + counter = long(inst_pkg.metadata["COUNTER"])
178 + blocker_cache[inst_pkg.cpv] = \
179 + blocker_cache.BlockerData(counter, blocker_atoms)
180 + for cpv in stale_cache:
181 + del blocker_cache[cpv]
182 + blocker_cache.flush()
183 +
184 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
185
186 from formatter import AbstractFormatter, DumbWriter
187 @@ -1660,10 +1749,7 @@
188 "binary":"bintree",
189 "installed":"vartree"}
190
191 - _mydbapi_keys = [
192 - "CHOST", "COUNTER", "DEPEND", "EAPI", "IUSE", "KEYWORDS",
193 - "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
194 - "repository", "RESTRICT", "SLOT", "USE"]
195 + _mydbapi_keys = Package.metadata_keys
196
197 _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
198
199 @@ -3484,6 +3570,9 @@
200 return -1
201 myblocker_uninstalls = self._blocker_uninstalls.copy()
202 retlist=[]
203 + # Contains uninstall tasks that have been scheduled to
204 + # occur after overlapping blockers have been installed.
205 + scheduled_uninstalls = set()
206 # Contains any Uninstall tasks that have been ignored
207 # in order to avoid the circular deps code path. These
208 # correspond to blocker conflicts that could not be
209 @@ -3698,10 +3787,16 @@
210 selected_nodes = list(selected_nodes)
211 selected_nodes.sort(cmp_circular_bias)
212
213 + if not selected_nodes and scheduled_uninstalls:
214 + selected_nodes = set()
215 + for node in scheduled_uninstalls:
216 + if not mygraph.child_nodes(node):
217 + selected_nodes.add(node)
218 + scheduled_uninstalls.difference_update(selected_nodes)
219 +
220 if not selected_nodes and not myblocker_uninstalls.is_empty():
221 # An Uninstall task needs to be executed in order to
222 # avoid conflict if possible.
223 -
224 min_parent_deps = None
225 uninst_task = None
226 for task in myblocker_uninstalls.leaf_nodes():
227 @@ -3819,7 +3914,20 @@
228 uninst_task = task
229
230 if uninst_task is not None:
231 - selected_nodes = [uninst_task]
232 + # The uninstall is performed only after blocking
233 + # packages have been merged on top of it. File
234 + # collisions between blocking packages are detected
235 + # and removed from the list of files to be uninstalled.
236 + scheduled_uninstalls.add(uninst_task)
237 + parent_nodes = mygraph.parent_nodes(uninst_task)
238 +
239 + # Reverse the parent -> uninstall edges since we want
240 + # to do the uninstall after blocking packages have
241 + # been merged on top of it.
242 + mygraph.remove(uninst_task)
243 + for blocked_pkg in parent_nodes:
244 + mygraph.add(blocked_pkg, uninst_task,
245 + priority=BlockerDepPriority.instance)
246 else:
247 # None of the Uninstall tasks are acceptable, so
248 # the corresponding blockers are unresolvable.
249 @@ -3836,12 +3944,12 @@
250 ignored_uninstall_tasks.add(node)
251 break
252
253 - # After dropping an Uninstall task, reset
254 - # the state variables for leaf node selection and
255 - # continue trying to select leaf nodes.
256 - prefer_asap = True
257 - accept_root_node = False
258 - continue
259 + # After dropping an Uninstall task, reset
260 + # the state variables for leaf node selection and
261 + # continue trying to select leaf nodes.
262 + prefer_asap = True
263 + accept_root_node = False
264 + continue
265
266 if not selected_nodes:
267 self._circular_deps_for_display = mygraph
268 @@ -4002,6 +4110,8 @@
269 verbosity = ("--quiet" in self.myopts and 1 or \
270 "--verbose" in self.myopts and 3 or 2)
271 favorites_set = InternalPackageSet(favorites)
272 + oneshot = "--oneshot" in self.myopts or \
273 + "--onlydeps" in self.myopts
274 changelogs=[]
275 p=[]
276 blockers = []
277 @@ -4558,7 +4668,8 @@
278 try:
279 pkg_system = system_set.findAtomForPackage(pkg_key, metadata)
280 pkg_world = world_set.findAtomForPackage(pkg_key, metadata)
281 - if not pkg_world and myroot == self.target_root and \
282 + if not (oneshot or pkg_world) and \
283 + myroot == self.target_root and \
284 favorites_set.findAtomForPackage(pkg_key, metadata):
285 # Maybe it will be added to world now.
286 if create_world_atom(pkg_key, metadata,
287 @@ -5368,13 +5479,36 @@
288 if settings.get("PORTAGE_DEBUG", "") == "1":
289 self.edebug = 1
290 self.pkgsettings = {}
291 + self._blocker_db = {}
292 for root in trees:
293 self.pkgsettings[root] = portage.config(
294 clone=trees[root]["vartree"].settings)
295 + self._blocker_db[root] = BlockerDB(
296 + trees[root]["vartree"],
297 + trees[root]["porttree"].dbapi)
298 self.curval = 0
299 self._spawned_pids = []
300 - self._uninstall_queue = []
301
302 + def _find_blockers(self, new_pkg):
303 + for opt in ("--buildpkgonly", "--nodeps",
304 + "--fetchonly", "--fetch-all-uri", "--pretend"):
305 + if opt in self.myopts:
306 + return None
307 +
308 + blocker_dblinks = []
309 + for blocking_pkg in self._blocker_db[
310 + new_pkg.root].findInstalledBlockers(new_pkg):
311 + if new_pkg.slot_atom == blocking_pkg.slot_atom:
312 + continue
313 + if new_pkg.cpv == blocking_pkg.cpv:
314 + continue
315 + blocker_dblinks.append(portage.dblink(
316 + blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
317 + self.pkgsettings[blocking_pkg.root], treetype="vartree",
318 + vartree=self.trees[blocking_pkg.root]["vartree"]))
319 +
320 + return blocker_dblinks
321 +
322 def merge(self, mylist, favorites, mtimedb):
323 try:
324 return self._merge(mylist, favorites, mtimedb)
325 @@ -5403,17 +5537,6 @@
326 pass
327 spawned_pids.remove(pid)
328
329 - def _dequeue_uninstall_tasks(self, mtimedb):
330 - if not self._uninstall_queue:
331 - return
332 - for uninst_task in self._uninstall_queue:
333 - root_config = self.trees[uninst_task.root]["root_config"]
334 - unmerge(root_config, self.myopts, "unmerge",
335 - [uninst_task.cpv], mtimedb["ldpath"], clean_world=0)
336 - del mtimedb["resume"]["mergelist"][0]
337 - mtimedb.commit()
338 - del self._uninstall_queue[:]
339 -
340 def _merge(self, mylist, favorites, mtimedb):
341 from portage.elog import elog_process
342 from portage.elog.filtering import filter_mergephases
343 @@ -5554,7 +5677,10 @@
344 metadata = pkg.metadata
345 if pkg.installed:
346 if not (buildpkgonly or fetchonly or pretend):
347 - self._uninstall_queue.append(pkg)
348 + unmerge(root_config, self.myopts, "unmerge",
349 + [pkg.cpv], mtimedb["ldpath"], clean_world=0)
350 + del mtimedb["resume"]["mergelist"][0]
351 + mtimedb.commit()
352 continue
353
354 if x[0]=="blocks":
355 @@ -5655,20 +5781,22 @@
356 return retval
357 bintree = self.trees[myroot]["bintree"]
358 bintree.inject(pkg_key, filename=binpkg_tmpfile)
359 - self._dequeue_uninstall_tasks(mtimedb)
360 +
361 if "--buildpkgonly" not in self.myopts:
362 msg = " === (%s of %s) Merging (%s::%s)" % \
363 (mergecount, len(mymergelist), pkg_key, y)
364 short_msg = "emerge: (%s of %s) %s Merge" % \
365 (mergecount, len(mymergelist), pkg_key)
366 emergelog(xterm_titles, msg, short_msg=short_msg)
367 +
368 retval = portage.merge(pkgsettings["CATEGORY"],
369 pkgsettings["PF"], pkgsettings["D"],
370 os.path.join(pkgsettings["PORTAGE_BUILDDIR"],
371 "build-info"), myroot, pkgsettings,
372 myebuild=pkgsettings["EBUILD"],
373 mytree="porttree", mydbapi=portdb,
374 - vartree=vartree, prev_mtimes=ldpath_mtimes)
375 + vartree=vartree, prev_mtimes=ldpath_mtimes,
376 + blockers=self._find_blockers(pkg))
377 if retval != os.EX_OK:
378 return retval
379 elif "noclean" not in pkgsettings.features:
380 @@ -5687,14 +5815,15 @@
381 prev_mtimes=ldpath_mtimes)
382 if retval != os.EX_OK:
383 return retval
384 - self._dequeue_uninstall_tasks(mtimedb)
385 +
386 retval = portage.merge(pkgsettings["CATEGORY"],
387 pkgsettings["PF"], pkgsettings["D"],
388 os.path.join(pkgsettings["PORTAGE_BUILDDIR"],
389 "build-info"), myroot, pkgsettings,
390 myebuild=pkgsettings["EBUILD"],
391 mytree="porttree", mydbapi=portdb,
392 - vartree=vartree, prev_mtimes=ldpath_mtimes)
393 + vartree=vartree, prev_mtimes=ldpath_mtimes,
394 + blockers=self._find_blockers(pkg))
395 if retval != os.EX_OK:
396 return retval
397 finally:
398 @@ -5716,7 +5845,6 @@
399 portage.locks.unlockdir(catdir_lock)
400
401 elif x[0]=="binary":
402 - self._dequeue_uninstall_tasks(mtimedb)
403 #merge the tbz2
404 mytbz2 = self.trees[myroot]["bintree"].getname(pkg_key)
405 if "--getbinpkg" in self.myopts:
406 @@ -5772,7 +5900,8 @@
407 retval = portage.pkgmerge(mytbz2, x[1], pkgsettings,
408 mydbapi=bindb,
409 vartree=self.trees[myroot]["vartree"],
410 - prev_mtimes=ldpath_mtimes)
411 + prev_mtimes=ldpath_mtimes,
412 + blockers=self._find_blockers(pkg))
413 if retval != os.EX_OK:
414 return retval
415 #need to check for errors
416 @@ -7888,6 +8017,7 @@
417 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
418 ask = "--ask" in myopts
419 nodeps = "--nodeps" in myopts
420 + oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
421 tree = "--tree" in myopts
422 if nodeps and tree:
423 tree = False
424 @@ -8059,7 +8189,7 @@
425 mergecount += 1
426
427 if mergecount==0:
428 - if "--noreplace" in myopts and favorites:
429 + if "--noreplace" in myopts and not oneshot and favorites:
430 print
431 for x in favorites:
432 print " %s %s" % (good("*"), x)
433
434 Modified: main/trunk/pym/portage/__init__.py
435 ===================================================================
436 --- main/trunk/pym/portage/__init__.py 2008-05-07 18:49:19 UTC (rev 10224)
437 +++ main/trunk/pym/portage/__init__.py 2008-05-08 07:48:59 UTC (rev 10225)
438 @@ -5313,13 +5313,13 @@
439 return newmtime
440
441 def merge(mycat, mypkg, pkgloc, infloc, myroot, mysettings, myebuild=None,
442 - mytree=None, mydbapi=None, vartree=None, prev_mtimes=None):
443 + mytree=None, mydbapi=None, vartree=None, prev_mtimes=None, blockers=None):
444 if not os.access(myroot, os.W_OK):
445 writemsg("Permission denied: access('%s', W_OK)\n" % myroot,
446 noiselevel=-1)
447 return errno.EACCES
448 mylink = dblink(mycat, mypkg, myroot, mysettings, treetype=mytree,
449 - vartree=vartree)
450 + vartree=vartree, blockers=blockers)
451 return mylink.merge(pkgloc, infloc, myroot, myebuild,
452 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
453
454 @@ -6184,7 +6184,8 @@
455 """Returns keys for all packages within pkgdir"""
456 return self.portdb.cp_list(self.cp, mytree=self.mytree)
457
458 -def pkgmerge(mytbz2, myroot, mysettings, mydbapi=None, vartree=None, prev_mtimes=None):
459 +def pkgmerge(mytbz2, myroot, mysettings, mydbapi=None,
460 + vartree=None, prev_mtimes=None, blockers=None):
461 """will merge a .tbz2 file, returning a list of runtime dependencies
462 that must be satisfied, or None if there was a merge error. This
463 code assumes the package exists."""
464 @@ -6277,7 +6278,7 @@
465 #tbz2_lock = None
466
467 mylink = dblink(mycat, mypkg, myroot, mysettings, vartree=vartree,
468 - treetype="bintree")
469 + treetype="bintree", blockers=blockers)
470 retval = mylink.merge(pkgloc, infloc, myroot, myebuild, cleanup=0,
471 mydbapi=mydbapi, prev_mtimes=prev_mtimes)
472 did_merge_phase = True
473
474 Modified: main/trunk/pym/portage/dbapi/vartree.py
475 ===================================================================
476 --- main/trunk/pym/portage/dbapi/vartree.py 2008-05-07 18:49:19 UTC (rev 10224)
477 +++ main/trunk/pym/portage/dbapi/vartree.py 2008-05-08 07:48:59 UTC (rev 10225)
478 @@ -932,7 +932,7 @@
479 }
480
481 def __init__(self, cat, pkg, myroot, mysettings, treetype=None,
482 - vartree=None):
483 + vartree=None, blockers=None):
484 """
485 Creates a DBlink object for a given CPV.
486 The given CPV may not be present in the database already.
487 @@ -961,6 +961,7 @@
488 from portage import db
489 vartree = db[myroot]["vartree"]
490 self.vartree = vartree
491 + self._blockers = blockers
492
493 self.dbroot = normalize_path(os.path.join(myroot, VDB_PATH))
494 self.dbcatdir = self.dbroot+"/"+cat
495 @@ -1037,6 +1038,11 @@
496 if os.path.exists(self.dbdir+"/CONTENTS"):
497 os.unlink(self.dbdir+"/CONTENTS")
498
499 + def _clear_contents_cache(self):
500 + self.contentscache = None
501 + self._contents_inodes = None
502 + self._contents_basenames = None
503 +
504 def getcontents(self):
505 """
506 Get the installed files of a given package (aka what that package installed)
507 @@ -1923,6 +1929,7 @@
508 """
509
510 srcroot = normalize_path(srcroot).rstrip(os.path.sep) + os.path.sep
511 + destroot = normalize_path(destroot).rstrip(os.path.sep) + os.path.sep
512
513 if not os.path.isdir(srcroot):
514 writemsg("!!! Directory Not Found: D='%s'\n" % srcroot,
515 @@ -2063,8 +2070,11 @@
516 self._preserve_libs(srcroot, destroot, myfilelist+mylinklist, counter, inforoot)
517
518 # check for package collisions
519 - collisions = self._collision_protect(srcroot, destroot, others_in_slot,
520 - myfilelist+mylinklist)
521 + blockers = self._blockers
522 + if blockers is None:
523 + blockers = []
524 + collisions = self._collision_protect(srcroot, destroot,
525 + others_in_slot + blockers, myfilelist + mylinklist)
526
527 # Make sure the ebuild environment is initialized and that ${T}/elog
528 # exists for logging of collision-protect eerror messages.
529 @@ -2284,6 +2294,44 @@
530 self.dbdir = self.dbpkgdir
531 self.delete()
532 _movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
533 +
534 + # Check for file collisions with blocking packages
535 + # and remove any colliding files from their CONTENTS
536 + # since they now belong to this package.
537 + self._clear_contents_cache()
538 + contents = self.getcontents()
539 + destroot_len = len(destroot) - 1
540 + for blocker in blockers:
541 + blocker_contents = blocker.getcontents()
542 + collisions = []
543 + for filename in blocker_contents:
544 + relative_filename = filename[destroot_len:]
545 + if self.isowner(relative_filename, destroot):
546 + collisions.append(filename)
547 + if not collisions:
548 + continue
549 + for filename in collisions:
550 + del blocker_contents[filename]
551 + f = atomic_ofstream(os.path.join(blocker.dbdir, "CONTENTS"))
552 + try:
553 + for filename in sorted(blocker_contents):
554 + entry_data = blocker_contents[filename]
555 + entry_type = entry_data[0]
556 + relative_filename = filename[destroot_len:]
557 + if entry_type == "obj":
558 + entry_type, mtime, md5sum = entry_data
559 + line = "%s %s %s %s\n" % \
560 + (entry_type, relative_filename, md5sum, mtime)
561 + elif entry_type == "sym":
562 + entry_type, mtime, link = entry_data
563 + line = "%s %s -> %s %s\n" % \
564 + (entry_type, relative_filename, link, mtime)
565 + else: # dir, dev, fif
566 + line = "%s %s\n" % (entry_type, relative_filename)
567 + f.write(line)
568 + finally:
569 + f.close()
570 +
571 # Due to mtime granularity, mtime checks do not always properly
572 # invalidate vardbapi caches.
573 self.vartree.dbapi.mtdircache.pop(self.cat, None)
574
575 --
576 gentoo-commits@l.g.o mailing list