Gentoo Archives: gentoo-commits

From: "Fabian Groffen (grobian)" <grobian@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] portage r10880 - in main/branches/prefix: bin man pym/_emerge pym/portage pym/portage/cache pym/portage/dbapi
Date: Tue, 01 Jul 2008 17:22:37
Message-Id: E1KDjYk-0006b8-BU@stork.gentoo.org
1 Author: grobian
2 Date: 2008-07-01 17:22:29 +0000 (Tue, 01 Jul 2008)
3 New Revision: 10880
4
5 Modified:
6 main/branches/prefix/bin/repoman
7 main/branches/prefix/man/portage.5
8 main/branches/prefix/pym/_emerge/__init__.py
9 main/branches/prefix/pym/portage/__init__.py
10 main/branches/prefix/pym/portage/cache/mappings.py
11 main/branches/prefix/pym/portage/checksum.py
12 main/branches/prefix/pym/portage/dbapi/porttree.py
13 main/branches/prefix/pym/portage/dbapi/vartree.py
14 Log:
15 Merged from trunk 10853:10869
16
17 | 10854 | Avoid python-2.6 deprecation warnings for md5 and sha |
18 | zmedico | modules by trying to import hashlib first and then falling |
19 | | back to the deprecated modules if necessary. Thanks to |
20 | | ColdWind for reporting. |
21
22 | 10855 | Reimplement parallel-fetch by spawning the `ebuild fetch` |
23 | zmedico | command for each ebuild. The benefit of using this approach |
24 | | is that it can be integrated together with parallel build |
25 | | scheduling that's planned. Parallel-fetch support for |
26 | | binhost is not implemented yet, though it worked previously. |
27
28 | 10856 | Clear the self._task_queue to avoid duplicate parallel-fetch |
29 | zmedico | tasks in --keep-going mode. |
30
31 | 10857 | Add "(no inline comments)" to qualify "comments begin with |
32 | zmedico | #" statements. |
33
34 | 10858 | Bug #230245 - Pass the correct directory when calling `snv |
35 | zmedico | list` and `svn status` since repoman supports category-level |
36 | | and repo-level commits. |
37
38 | 10859 | Bug #230245 - Use os.path.basename() on paths returned from |
39 | zmedico | `svn list` and `svn status`. |
40
41 | 10860 | Bug #230249 - Disable the "ebuild.notadded" check when not |
42 | zmedico | in commit mode and running `svn list` and `svn status` calls |
43 | | in every package dir will be too expensive. |
44
45 | 10861 | Fix typo. |
46 | zmedico | |
47
48 | 10862 | add a call to pruneNonExisting() at the end of |
49 | zmedico | dbapi.vartree.PreservedLibsRegistry.__init__() |
50
51 | 10864 | Split out a write_contents() function and a |
52 | zmedico | vardbapi.removeFromContents() function. This is refactoring |
53 | | of code from the blocker file collision contents handling in |
54 | | dblink.treewalk(). Also, there is a new |
55 | | dblink._match_contents() method derived from isowner(). It |
56 | | returns the exact path from the contents file that matches |
57 | | the given path, regardless of path differences due to things |
58 | | such as symlinks. |
59
60 | 10865 | Handle potential errors in PreservedLibsRegistry.store() now |
61 | zmedico | that it can be called via pruneNonExisting(), due to things |
62 | | such as portageq calls where the user may not have write |
63 | | permission to the registry. |
64
65 | 10866 | Also avoid sandbox violations in |
66 | zmedico | PreservedLibsRegistry.store(), for running portage inside |
67 | | ebuild phases. |
68
69 | 10867 | Never do realpath() on an empty string for |
70 | zmedico | portdbapi.porttree_root since otherwise it can evaluate to |
71 | | $CWD which leads to undesireable results. |
72
73 | 10868 | Add a new BinpkgFetcherAsync class and use it to implement |
74 | zmedico | parellel-fetch for --getbinpkg. |
75
76 | 10869 | Add a "prefix" keyword parameter to slot_dict_class() which |
77 | zmedico | controls the prefix used when mapping attribute names from |
78 | | keys. Use this to change the syntax from files["foo"] to |
79 | | files.foo (it's fewer characters to look at). |
80
81
82 Modified: main/branches/prefix/bin/repoman
83 ===================================================================
84 --- main/branches/prefix/bin/repoman 2008-07-01 17:09:13 UTC (rev 10879)
85 +++ main/branches/prefix/bin/repoman 2008-07-01 17:22:29 UTC (rev 10880)
86 @@ -769,6 +769,12 @@
87 arch_xmatch_caches = {}
88 shared_xmatch_caches = {"cp-list":{}}
89
90 +# Disable the "ebuild.notadded" check when not in commit mode and
91 +# running `svn list` and `svn status` calls in every package dir
92 +# will be too expensive.
93 +check_ebuild_notadded = not \
94 + (vcs == "svn" and repolevel < 3 and options.mode != "commit")
95 +
96 for x in scanlist:
97 #ebuilds and digests added to cvs respectively.
98 logging.info("checking package %s" % x)
99 @@ -865,12 +871,12 @@
100 if not os.path.isdir(os.path.join(checkdir, "files")):
101 has_filesdir = False
102
103 - if vcs:
104 + if vcs and check_ebuild_notadded:
105 try:
106 if vcs == "cvs":
107 myf=open(checkdir+"/CVS/Entries","r")
108 if vcs == "svn":
109 - myf=os.popen("svn list")
110 + myf = os.popen("svn list " + checkdir)
111 myl=myf.readlines()
112 myf.close()
113 for l in myl:
114 @@ -887,16 +893,16 @@
115 if l[-1:] == "/":
116 continue
117 if l[-7:] == ".ebuild":
118 - eadded.append(l[:-7])
119 + eadded.append(os.path.basename(l[:-7]))
120 if vcs == "svn":
121 - myf=os.popen("svn status")
122 + myf = os.popen("svn status " + checkdir)
123 myl=myf.readlines()
124 myf.close()
125 for l in myl:
126 if l[0] == "A":
127 l = l.rstrip().split(' ')[-1]
128 if l[-7:] == ".ebuild":
129 - eadded.append(l[:-7])
130 + eadded.append(os.path.basename(l[:-7]))
131 except IOError:
132 if options.mode == 'commit' and vcs == "cvs":
133 stats["CVS/Entries.IO_error"] += 1
134 @@ -1068,7 +1074,7 @@
135 if stat.S_IMODE(os.stat(full_path).st_mode) & 0111:
136 stats["file.executable"] += 1
137 fails["file.executable"].append(x+"/"+y+".ebuild")
138 - if vcs and y not in eadded:
139 + if vcs and check_ebuild_notadded and y not in eadded:
140 #ebuild not added to vcs
141 stats["ebuild.notadded"]=stats["ebuild.notadded"]+1
142 fails["ebuild.notadded"].append(x+"/"+y+".ebuild")
143
144 Modified: main/branches/prefix/man/portage.5
145 ===================================================================
146 --- main/branches/prefix/man/portage.5 2008-07-01 17:09:13 UTC (rev 10879)
147 +++ main/branches/prefix/man/portage.5 2008-07-01 17:22:29 UTC (rev 10880)
148 @@ -187,7 +187,7 @@
149
150 .I Format:
151 .nf
152 -\- comments begin with #
153 +\- comments begin with # (no inline comments)
154 \- one DEPEND atom per line
155 \- packages to be added to the system set begin with a *
156 .fi
157 @@ -234,7 +234,7 @@
158
159 .I Format:
160 .nf
161 -\- comments begin with #
162 +\- comments begin with # (no inline comments)
163 \- one DEPEND atom per line
164 \- relational operators are not allowed
165 \- must include a version
166 @@ -262,7 +262,7 @@
167
168 .I Format:
169 .nf
170 -\- comments begin with #
171 +\- comments begin with # (no inline comments)
172 \- one DEPEND atom per line with space-delimited USE flags
173 .fi
174
175 @@ -284,7 +284,7 @@
176
177 .I Format:
178 .nf
179 -\- comments begin with #
180 +\- comments begin with # (no inline comments)
181 \- one DEPEND atom per line with space-delimited USE flags
182 .fi
183
184 @@ -318,7 +318,7 @@
185
186 .I Format:
187 .nf
188 -\- comments begin with #
189 +\- comments begin with # (no inline comments)
190 \- one USE flag per line
191 .fi
192 .TP
193 @@ -334,7 +334,7 @@
194
195 .I Format:
196 .nf
197 -\- comments begin with #
198 +\- comments begin with # (no inline comments)
199 \- one USE flag per line
200 .fi
201 .TP
202 @@ -348,7 +348,7 @@
203
204 .I Format:
205 .nf
206 -\- comments begin with #
207 +\- comments begin with # (no inline comments)
208 \- one virtual and DEPEND atom base pair per line
209 .fi
210
211 @@ -478,7 +478,7 @@
212
213 .I Format:
214 .nf
215 -\- comments begin with #
216 +\- comments begin with # (no inline comments)
217 \- one DEPEND atom per line with space-delimited USE flags
218 .fi
219
220 @@ -500,7 +500,7 @@
221
222 .I Format:
223 .nf
224 -\- comments begin with #
225 +\- comments begin with # (no inline comments)
226 \- mirror type followed by a list of hosts
227 .fi
228
229 @@ -585,7 +585,7 @@
230
231 .I Format:
232 .nf
233 -\- comments begin with #
234 +\- comments begin with # (no inline comments)
235 \- one DEPEND atom per line
236 .fi
237
238 @@ -606,7 +606,7 @@
239
240 .I Format:
241 .nf
242 -\- comments begin with #
243 +\- comments begin with # (no inline comments)
244 \- one profile list per line in format: arch dir status
245 \- arch must be listed in arch.list
246 \- dir is relative to profiles.desc
247 @@ -631,7 +631,7 @@
248
249 .I Format:
250 .nf
251 -\- comments begin with #
252 +\- comments begin with # (no inline comments)
253 \- mirror type followed by a list of hosts
254 .fi
255
256 @@ -649,7 +649,7 @@
257
258 .I Format:
259 .nf
260 -\- comments begin with #
261 +\- comments begin with # (no inline comments)
262 \- use flag \- some description
263 .fi
264
265 @@ -666,7 +666,7 @@
266
267 .nf
268 .I Format:
269 -\- comments begin with #
270 +\- comments begin with # (no inline comments)
271 \- package:use flag \- description
272
273 .I Example:
274
275 Modified: main/branches/prefix/pym/_emerge/__init__.py
276 ===================================================================
277 --- main/branches/prefix/pym/_emerge/__init__.py 2008-07-01 17:09:13 UTC (rev 10879)
278 +++ main/branches/prefix/pym/_emerge/__init__.py 2008-07-01 17:22:29 UTC (rev 10880)
279 @@ -21,7 +21,11 @@
280 sys.exit(1)
281
282 import array
283 +import fcntl
284 import select
285 +import shlex
286 +import urlparse
287 +import weakref
288 import gc
289 import os, stat
290 import platform
291 @@ -1460,26 +1464,141 @@
292 v = 0
293 self._pkg.mtime = v
294
295 -class EbuildFetcher(Task):
296 +class EbuildFetcher(SlotObject):
297
298 __slots__ = ("fetch_all", "pkg", "pretend", "settings")
299
300 - def _get_hash_key(self):
301 - hash_key = getattr(self, "_hash_key", None)
302 - if hash_key is None:
303 - self._hash_key = ("EbuildFetcher", self.pkg._get_hash_key())
304 - return self._hash_key
305 -
306 def execute(self):
307 portdb = self.pkg.root_config.trees["porttree"].dbapi
308 ebuild_path = portdb.findname(self.pkg.cpv)
309 debug = self.settings.get("PORTAGE_DEBUG") == "1"
310 +
311 retval = portage.doebuild(ebuild_path, "fetch",
312 - self.settings["ROOT"], self.settings, debug,
313 - self.pretend, fetchonly=1, fetchall=self.fetch_all,
314 + self.settings["ROOT"], self.settings, debug=debug,
315 + listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
316 mydbapi=portdb, tree="porttree")
317 return retval
318
319 +class EbuildFetcherAsync(SlotObject):
320 +
321 + __slots__ = ("log_file", "fd_pipes", "pkg",
322 + "register", "unregister",
323 + "pid", "returncode", "files")
324 +
325 + _file_names = ("fetcher", "out")
326 + _files_dict = slot_dict_class(_file_names, prefix="")
327 + _bufsize = 4096
328 +
329 + def start(self):
330 + # flush any pending output
331 + fd_pipes = self.fd_pipes
332 + if fd_pipes is None:
333 + fd_pipes = {
334 + 0 : sys.stdin.fileno(),
335 + 1 : sys.stdout.fileno(),
336 + 2 : sys.stderr.fileno(),
337 + }
338 +
339 + log_file = self.log_file
340 + self.files = self._files_dict()
341 + files = self.files
342 +
343 + if log_file is not None:
344 + files.out = open(log_file, "a")
345 + portage.util.apply_secpass_permissions(log_file,
346 + uid=portage.portage_uid, gid=portage.portage_gid,
347 + mode=0660)
348 + else:
349 + for fd in fd_pipes.itervalues():
350 + if fd == sys.stdout.fileno():
351 + sys.stdout.flush()
352 + if fd == sys.stderr.fileno():
353 + sys.stderr.flush()
354 +
355 + files.out = os.fdopen(os.dup(fd_pipes[1]), 'w')
356 +
357 + master_fd, slave_fd = os.pipe()
358 +
359 + import fcntl
360 + fcntl.fcntl(master_fd, fcntl.F_SETFL,
361 + fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
362 +
363 + fd_pipes.setdefault(0, sys.stdin.fileno())
364 + fd_pipes_orig = fd_pipes.copy()
365 + fd_pipes[0] = fd_pipes_orig[0]
366 + fd_pipes[1] = slave_fd
367 + fd_pipes[2] = slave_fd
368 +
369 + root_config = self.pkg.root_config
370 + portdb = root_config.trees["porttree"].dbapi
371 + ebuild_path = portdb.findname(self.pkg.cpv)
372 + settings = root_config.settings
373 +
374 + fetch_env = dict((k, settings[k]) for k in settings)
375 + fetch_env["FEATURES"] = fetch_env.get("FEATURES", "") + " -cvs"
376 + fetch_env["PORTAGE_NICENESS"] = "0"
377 + fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
378 +
379 + ebuild_binary = os.path.join(
380 + settings["EBUILD_BIN_PATH"], "ebuild")
381 +
382 + fetch_args = [ebuild_binary, ebuild_path, "fetch"]
383 + debug = settings.get("PORTAGE_DEBUG") == "1"
384 + if debug:
385 + fetch_args.append("--debug")
386 +
387 + retval = portage.process.spawn(fetch_args, env=fetch_env,
388 + fd_pipes=fd_pipes, returnpid=True)
389 +
390 + self.pid = retval[0]
391 +
392 + os.close(slave_fd)
393 + files.fetcher = os.fdopen(master_fd, 'r')
394 + self.register(files.fetcher.fileno(),
395 + select.POLLIN, self._output_handler)
396 +
397 + def _output_handler(self, fd, event):
398 + files = self.files
399 + buf = array.array('B')
400 + try:
401 + buf.fromfile(files.fetcher, self._bufsize)
402 + except EOFError:
403 + pass
404 + if buf:
405 + buf.tofile(files.out)
406 + files.out.flush()
407 + else:
408 + self.unregister(files.fetcher.fileno())
409 + for f in files.values():
410 + f.close()
411 +
412 + def poll(self):
413 + if self.returncode is not None:
414 + return self.returncode
415 + retval = os.waitpid(self.pid, os.WNOHANG)
416 + if retval == (0, 0):
417 + return None
418 + self._set_returncode(retval)
419 + return self.returncode
420 +
421 + def wait(self):
422 + if self.returncode is not None:
423 + return self.returncode
424 + self._set_returncode(os.waitpid(self.pid, 0))
425 + return self.returncode
426 +
427 + def _set_returncode(self, wait_retval):
428 +
429 + retval = wait_retval[1]
430 + portage.process.spawned_pids.remove(self.pid)
431 + if retval != os.EX_OK:
432 + if retval & 0xff:
433 + retval = (retval & 0xff) << 8
434 + else:
435 + retval = retval >> 8
436 +
437 + self.returncode = retval
438 +
439 class EbuildBuildDir(SlotObject):
440
441 __slots__ = ("pkg", "settings",
442 @@ -1593,9 +1712,12 @@
443 ebuild_phase = EbuildPhase(fd_pipes=fd_pipes,
444 pkg=self.pkg, phase=mydo, register=self.register,
445 settings=settings, unregister=self.unregister)
446 +
447 ebuild_phase.start()
448 - self.schedule()
449 - retval = ebuild_phase.wait()
450 + retval = None
451 + while retval is None:
452 + self.schedule()
453 + retval = ebuild_phase.poll()
454
455 portage._post_phase_userpriv_perms(settings)
456 if mydo == "install":
457 @@ -1615,7 +1737,7 @@
458 "pid", "returncode", "files")
459
460 _file_names = ("log", "stdout", "ebuild")
461 - _files_dict = slot_dict_class(_file_names)
462 + _files_dict = slot_dict_class(_file_names, prefix="")
463 _bufsize = 4096
464
465 def start(self):
466 @@ -1690,33 +1812,48 @@
467
468 if logfile:
469 os.close(slave_fd)
470 - files["log"] = open(logfile, 'a')
471 - files["stdout"] = os.fdopen(os.dup(fd_pipes_orig[1]), 'w')
472 - files["ebuild"] = os.fdopen(master_fd, 'r')
473 - self.register(files["ebuild"].fileno(),
474 + files.log = open(logfile, 'a')
475 + files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'w')
476 + files.ebuild = os.fdopen(master_fd, 'r')
477 + self.register(files.ebuild.fileno(),
478 select.POLLIN, self._output_handler)
479
480 def _output_handler(self, fd, event):
481 files = self.files
482 buf = array.array('B')
483 try:
484 - buf.fromfile(files["ebuild"], self._bufsize)
485 + buf.fromfile(files.ebuild, self._bufsize)
486 except EOFError:
487 pass
488 if buf:
489 - buf.tofile(files["stdout"])
490 - files["stdout"].flush()
491 - buf.tofile(files["log"])
492 - files["log"].flush()
493 + buf.tofile(files.stdout)
494 + files.stdout.flush()
495 + buf.tofile(files.log)
496 + files.log.flush()
497 else:
498 - self.unregister(files["ebuild"].fileno())
499 + self.unregister(files.ebuild.fileno())
500 for f in files.values():
501 f.close()
502
503 + def poll(self):
504 + if self.returncode is not None:
505 + return self.returncode
506 + retval = os.waitpid(self.pid, os.WNOHANG)
507 + if retval == (0, 0):
508 + return None
509 + self._set_returncode(retval)
510 + return self.returncode
511 +
512 def wait(self):
513 - pid = self.pid
514 - retval = os.waitpid(pid, 0)[1]
515 - portage.process.spawned_pids.remove(pid)
516 + if self.returncode is not None:
517 + return self.returncode
518 + self._set_returncode(os.waitpid(self.pid, 0))
519 + return self.returncode
520 +
521 + def _set_returncode(self, wait_retval):
522 +
523 + retval = wait_retval[1]
524 + portage.process.spawned_pids.remove(self.pid)
525 if retval != os.EX_OK:
526 if retval & 0xff:
527 retval = (retval & 0xff) << 8
528 @@ -1733,7 +1870,6 @@
529 eerror(l, phase=self.phase, key=self.pkg.cpv)
530
531 self.returncode = retval
532 - return self.returncode
533
534 class EbuildBinpkg(Task):
535 """
536 @@ -1886,6 +2022,195 @@
537 rval = 1
538 return rval
539
540 +class BinpkgFetcherAsync(SlotObject):
541 +
542 + __slots__ = ("cancelled", "log_file", "fd_pipes", "pkg",
543 + "register", "unregister",
544 + "locked", "files", "pid", "pkg_path", "returncode", "_lock_obj")
545 +
546 + _file_names = ("fetcher", "out")
547 + _files_dict = slot_dict_class(_file_names, prefix="")
548 + _bufsize = 4096
549 +
550 + def __init__(self, **kwargs):
551 + SlotObject.__init__(self, **kwargs)
552 + pkg = self.pkg
553 + self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
554 +
555 + def start(self):
556 +
557 + if self.cancelled:
558 + self.pid = -1
559 + return
560 +
561 + fd_pipes = self.fd_pipes
562 + if fd_pipes is None:
563 + fd_pipes = {
564 + 0 : sys.stdin.fileno(),
565 + 1 : sys.stdout.fileno(),
566 + 2 : sys.stderr.fileno(),
567 + }
568 +
569 + log_file = self.log_file
570 + self.files = self._files_dict()
571 + files = self.files
572 +
573 + if log_file is not None:
574 + files.out = open(log_file, "a")
575 + portage.util.apply_secpass_permissions(log_file,
576 + uid=portage.portage_uid, gid=portage.portage_gid,
577 + mode=0660)
578 + else:
579 + # flush any pending output
580 + for fd in fd_pipes.itervalues():
581 + if fd == sys.stdout.fileno():
582 + sys.stdout.flush()
583 + if fd == sys.stderr.fileno():
584 + sys.stderr.flush()
585 +
586 + files.out = os.fdopen(os.dup(fd_pipes[1]), 'w')
587 +
588 + master_fd, slave_fd = os.pipe()
589 + fcntl.fcntl(master_fd, fcntl.F_SETFL,
590 + fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
591 +
592 + fd_pipes.setdefault(0, sys.stdin.fileno())
593 + fd_pipes_orig = fd_pipes.copy()
594 + fd_pipes[0] = fd_pipes_orig[0]
595 + fd_pipes[1] = slave_fd
596 + fd_pipes[2] = slave_fd
597 +
598 + pkg = self.pkg
599 + bintree = pkg.root_config.trees["bintree"]
600 + settings = bintree.settings
601 + use_locks = "distlocks" in settings.features
602 + pkg_path = self.pkg_path
603 + resume = os.path.exists(pkg_path)
604 +
605 + # urljoin doesn't work correctly with
606 + # unrecognized protocols like sftp
607 + if bintree._remote_has_index:
608 + rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
609 + if not rel_uri:
610 + rel_uri = pkg.cpv + ".tbz2"
611 + uri = bintree._remote_base_uri.rstrip("/") + \
612 + "/" + rel_uri.lstrip("/")
613 + else:
614 + uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
615 + "/" + pkg.pf + ".tbz2"
616 +
617 + protocol = urlparse.urlparse(uri)[0]
618 + fcmd_prefix = "FETCHCOMMAND"
619 + if resume:
620 + fcmd_prefix = "RESUMECOMMAND"
621 + fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
622 + if not fcmd:
623 + fcmd = settings.get(fcmd_prefix)
624 +
625 + fcmd_vars = {
626 + "DISTDIR" : os.path.dirname(pkg_path),
627 + "URI" : uri,
628 + "FILE" : os.path.basename(pkg_path)
629 + }
630 +
631 + fetch_env = dict((k, settings[k]) for k in settings)
632 + fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
633 + for x in shlex.split(fcmd)]
634 +
635 + portage.util.ensure_dirs(os.path.dirname(pkg_path))
636 + if use_locks:
637 + self.lock()
638 +
639 + retval = portage.process.spawn(fetch_args, env=fetch_env,
640 + fd_pipes=fd_pipes, returnpid=True)
641 +
642 + self.pid = retval[0]
643 +
644 + os.close(slave_fd)
645 + files.fetcher = os.fdopen(master_fd, 'r')
646 + self.register(files.fetcher.fileno(),
647 + select.POLLIN, self._output_handler)
648 +
649 + def _output_handler(self, fd, event):
650 + files = self.files
651 + buf = array.array('B')
652 + try:
653 + buf.fromfile(files.fetcher, self._bufsize)
654 + except EOFError:
655 + pass
656 + if buf:
657 + buf.tofile(files.out)
658 + files.out.flush()
659 + else:
660 + self.unregister(files.fetcher.fileno())
661 + for f in files.values():
662 + f.close()
663 + if self.locked:
664 + self.unlock()
665 +
666 + def lock(self):
667 + """
668 + This raises an AlreadyLocked exception if lock() is called
669 + while a lock is already held. In order to avoid this, call
670 + unlock() or check whether the "locked" attribute is True
671 + or False before calling lock().
672 + """
673 + if self._lock_obj is not None:
674 + raise self.AlreadyLocked((self._lock_obj,))
675 +
676 + self._lock_obj = portage.locks.lockfile(
677 + self.pkg_path, wantnewlockfile=1)
678 + self.locked = True
679 +
680 + class AlreadyLocked(portage.exception.PortageException):
681 + pass
682 +
683 + def unlock(self):
684 + if self._lock_obj is None:
685 + return
686 + portage.locks.unlockfile(self._lock_obj)
687 + self._lock_obj = None
688 + self.locked = False
689 +
690 + def poll(self):
691 + if self.returncode is not None:
692 + return self.returncode
693 + retval = os.waitpid(self.pid, os.WNOHANG)
694 + if retval == (0, 0):
695 + return None
696 + self._set_returncode(retval)
697 + return self.returncode
698 +
699 + def cancel(self):
700 + if self.isAlive():
701 + os.kill(self.pid, signal.SIGTERM)
702 + self.cancelled = True
703 + if self.pid is not None:
704 + self.wait()
705 + return self.returncode
706 +
707 + def isAlive(self):
708 + return self.pid is not None and \
709 + self.returncode is None
710 +
711 + def wait(self):
712 + if self.returncode is not None:
713 + return self.returncode
714 + self._set_returncode(os.waitpid(self.pid, 0))
715 + return self.returncode
716 +
717 + def _set_returncode(self, wait_retval):
718 +
719 + retval = wait_retval[1]
720 + portage.process.spawned_pids.remove(self.pid)
721 + if retval != os.EX_OK:
722 + if retval & 0xff:
723 + retval = (retval & 0xff) << 8
724 + else:
725 + retval = retval >> 8
726 +
727 + self.returncode = retval
728 +
729 class BinpkgMerge(Task):
730
731 __slots__ = ("find_blockers", "ldpath_mtimes",
732 @@ -6368,6 +6693,8 @@
733 "--fetchonly", "--fetch-all-uri",
734 "--nodeps", "--pretend"])
735
736 + _fetch_log = EPREFIX + "/var/log/emerge-fetch.log"
737 +
738 def __init__(self, settings, trees, mtimedb, myopts,
739 spinner, mergelist, favorites, digraph):
740 self.settings = settings
741 @@ -6386,10 +6713,39 @@
742 self.pkgsettings[root] = portage.config(
743 clone=trees[root]["vartree"].settings)
744 self.curval = 0
745 - self._spawned_pids = []
746 self._poll_event_handlers = {}
747 self._poll = select.poll()
748 + from collections import deque
749 + self._task_queue = deque()
750 + self._running_tasks = set()
751 + self._max_jobs = 1
752 + self._parallel_fetch = False
753 + features = self.settings.features
754 + if "parallel-fetch" in features and \
755 + not ("--pretend" in self.myopts or \
756 + "--fetch-all-uri" in self.myopts or \
757 + "--fetchonly" in self.myopts):
758 + if "distlocks" not in features:
759 + portage.writemsg(red("!!!")+"\n", noiselevel=-1)
760 + portage.writemsg(red("!!!")+" parallel-fetching " + \
761 + "requires the distlocks feature enabled"+"\n",
762 + noiselevel=-1)
763 + portage.writemsg(red("!!!")+" you have it disabled, " + \
764 + "thus parallel-fetching is being disabled"+"\n",
765 + noiselevel=-1)
766 + portage.writemsg(red("!!!")+"\n", noiselevel=-1)
767 + elif len(mergelist) > 1:
768 + self._parallel_fetch = True
769
770 + # clear out existing fetch log if it exists
771 + try:
772 + open(self._fetch_log, 'w')
773 + except EnvironmentError:
774 + pass
775 +
776 + def _add_task(self, task):
777 + self._task_queue.append(task)
778 +
779 class _pkg_failure(portage.exception.PortageException):
780 """
781 An instance of this class is raised by unmerge() when
782 @@ -6441,20 +6797,17 @@
783 def merge(self):
784
785 keep_going = "--keep-going" in self.myopts
786 + running_tasks = self._running_tasks
787
788 while True:
789 try:
790 rval = self._merge()
791 finally:
792 - spawned_pids = self._spawned_pids
793 - while spawned_pids:
794 - pid = spawned_pids.pop()
795 - try:
796 - if os.waitpid(pid, os.WNOHANG) == (0, 0):
797 - os.kill(pid, signal.SIGTERM)
798 - os.waitpid(pid, 0)
799 - except OSError:
800 - pass # cleaned up elsewhere.
801 + # clean up child process if necessary
802 + self._task_queue.clear()
803 + while running_tasks:
804 + task = running_tasks.pop()
805 + task.cancel()
806
807 if rval == os.EX_OK or not keep_going:
808 break
809 @@ -6534,25 +6887,6 @@
810 mydepgraph.break_refs(dropped_tasks)
811 return (mylist, dropped_tasks)
812
813 - def _poll_child_processes(self):
814 - """
815 - After each merge, collect status from child processes
816 - in order to clean up zombies (such as the parallel-fetch
817 - process).
818 - """
819 - spawned_pids = self._spawned_pids
820 - if not spawned_pids:
821 - return
822 - for pid in list(spawned_pids):
823 - try:
824 - if os.waitpid(pid, os.WNOHANG) == (0, 0):
825 - continue
826 - except OSError:
827 - # This pid has been cleaned up elsewhere,
828 - # so remove it from our list.
829 - pass
830 - spawned_pids.remove(pid)
831 -
832 def _register(self, f, eventmask, handler):
833 self._poll_event_handlers[f] = handler
834 self._poll.register(f, eventmask)
835 @@ -6560,12 +6894,46 @@
836 def _unregister(self, f):
837 self._poll.unregister(f)
838 del self._poll_event_handlers[f]
839 + self._schedule_tasks()
840
841 def _schedule(self):
842 - while self._poll_event_handlers:
843 - for f, event in self._poll.poll():
844 - self._poll_event_handlers[f](f, event)
845 + event_handlers = self._poll_event_handlers
846 + running_tasks = self._running_tasks
847 + poll = self._poll.poll
848
849 + self._schedule_tasks()
850 +
851 + while event_handlers:
852 + for f, event in poll():
853 + event_handlers[f](f, event)
854 +
855 + if len(event_handlers) <= len(running_tasks):
856 + # Assuming one handler per task, this
857 + # means the caller has unregistered it's
858 + # handler, so it's time to yield.
859 + break
860 +
861 + def _schedule_tasks(self):
862 + task_queue = self._task_queue
863 + running_tasks = self._running_tasks
864 + max_jobs = self._max_jobs
865 + state_changed = False
866 +
867 + for task in list(running_tasks):
868 + if task.poll() is not None:
869 + running_tasks.remove(task)
870 + state_changed = True
871 +
872 + while task_queue and (len(running_tasks) < max_jobs):
873 + task = task_queue.popleft()
874 + cancelled = getattr(task, "cancelled", None)
875 + if not cancelled:
876 + task.start()
877 + running_tasks.add(task)
878 + state_changed = True
879 +
880 + return state_changed
881 +
882 def _merge(self):
883 mylist = self._mergelist
884 favorites = self._favorites
885 @@ -6592,6 +6960,28 @@
886 if isinstance(x, Package) and x.operation == "merge"]
887 mtimedb.commit()
888
889 + prefetchers = weakref.WeakValueDictionary()
890 + getbinpkg = "--getbinpkg" in self.myopts
891 +
892 + if self._parallel_fetch:
893 + for pkg in mylist:
894 + if not isinstance(pkg, Package):
895 + continue
896 + if pkg.type_name == "ebuild":
897 + self._add_task(EbuildFetcherAsync(
898 + log_file=self._fetch_log,
899 + pkg=pkg, register=self._register,
900 + unregister=self._unregister))
901 + elif pkg.type_name == "binary" and getbinpkg and \
902 + pkg.root_config.trees["bintree"].isremote(pkg.cpv):
903 + prefetcher = BinpkgFetcherAsync(
904 + log_file=self._fetch_log,
905 + pkg=pkg, register=self._register,
906 + unregister=self._unregister)
907 + prefetchers[pkg] = prefetcher
908 + self._add_task(prefetcher)
909 + del prefetcher
910 +
911 # Verify all the manifests now so that the user is notified of failure
912 # as soon as possible.
913 if "--fetchonly" not in self.myopts and \
914 @@ -6625,49 +7015,6 @@
915 myfeat = self.settings.features[:]
916 bad_resume_opts = set(["--ask", "--changelog", "--skipfirst",
917 "--resume"])
918 - if "parallel-fetch" in myfeat and \
919 - not ("--pretend" in self.myopts or \
920 - "--fetch-all-uri" in self.myopts or \
921 - "--fetchonly" in self.myopts):
922 - if "distlocks" not in myfeat:
923 - print red("!!!")
924 - print red("!!!")+" parallel-fetching requires the distlocks feature enabled"
925 - print red("!!!")+" you have it disabled, thus parallel-fetching is being disabled"
926 - print red("!!!")
927 - elif len(mymergelist) > 1:
928 - fetch_log = EPREFIX+"/var/log/emerge-fetch.log"
929 - logfile = open(fetch_log, "w")
930 - fd_pipes = {1:logfile.fileno(), 2:logfile.fileno()}
931 - portage.util.apply_secpass_permissions(fetch_log,
932 - uid=portage.portage_uid, gid=portage.portage_gid,
933 - mode=0660)
934 - fetch_env = os.environ.copy()
935 - fetch_env["FEATURES"] = fetch_env.get("FEATURES", "") + " -cvs"
936 - fetch_env["PORTAGE_NICENESS"] = "0"
937 - fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
938 - fetch_args = [sys.argv[0], "--resume",
939 - "--fetchonly", "--nodeps"]
940 - resume_opts = self.myopts.copy()
941 - # For automatic resume, we need to prevent
942 - # any of bad_resume_opts from leaking in
943 - # via EMERGE_DEFAULT_OPTS.
944 - resume_opts["--ignore-default-opts"] = True
945 - for myopt, myarg in resume_opts.iteritems():
946 - if myopt not in bad_resume_opts:
947 - if myarg is True:
948 - fetch_args.append(myopt)
949 - else:
950 - fetch_args.append(myopt +"="+ myarg)
951 - self._spawned_pids.extend(
952 - portage.process.spawn(
953 - fetch_args, env=fetch_env,
954 - fd_pipes=fd_pipes, returnpid=True))
955 - logfile.close() # belongs to the spawned process
956 - del fetch_log, logfile, fd_pipes, fetch_env, fetch_args, \
957 - resume_opts
958 - print ">>> starting parallel fetching pid %d" % \
959 - self._spawned_pids[-1]
960 -
961 metadata_keys = [k for k in portage.auxdbkeys \
962 if not k.startswith("UNUSED_")] + ["USE"]
963
964 @@ -6704,14 +7051,15 @@
965 self._execute_task(bad_resume_opts,
966 failed_fetches,
967 mydbapi, mergecount,
968 - myfeat, mymergelist, x, xterm_titles)
969 + myfeat, mymergelist, x,
970 + prefetchers, xterm_titles)
971 except self._pkg_failure, e:
972 return e.status
973 return self._post_merge(mtimedb, xterm_titles, failed_fetches)
974
975 def _execute_task(self, bad_resume_opts,
976 failed_fetches, mydbapi, mergecount, myfeat,
977 - mymergelist, pkg, xterm_titles):
978 + mymergelist, pkg, prefetchers, xterm_titles):
979 favorites = self._favorites
980 mtimedb = self._mtimedb
981 from portage.elog import elog_process
982 @@ -6862,8 +7210,27 @@
983 phasefilter=filter_mergephases)
984 build_dir.unlock()
985
986 - elif x[0]=="binary":
987 - #merge the tbz2
988 + elif x.type_name == "binary":
989 + # The prefetcher have already completed or it
990 + # could be running now. If it's running now,
991 + # wait for it to complete since it holds
992 + # a lock on the file being fetched. The
993 + # portage.locks functions are only designed
994 + # to work between separate processes. Since
995 + # the lock is held by the current process,
996 + # use the scheduler and fetcher methods to
997 + # synchronize with the fetcher.
998 + prefetcher = prefetchers.get(pkg)
999 + if prefetcher is not None:
1000 + if not prefetcher.isAlive():
1001 + prefetcher.cancel()
1002 + else:
1003 + retval = None
1004 + while retval is None:
1005 + self._schedule()
1006 + retval = prefetcher.poll()
1007 + del prefetcher
1008 +
1009 fetcher = BinpkgFetcher(pkg=pkg, pretend=pretend,
1010 use_locks=("distlocks" in pkgsettings.features))
1011 mytbz2 = fetcher.pkg_path
1012 @@ -6967,7 +7334,6 @@
1013 # due to power failure, SIGKILL, etc...
1014 mtimedb.commit()
1015 self.curval += 1
1016 - self._poll_child_processes()
1017
1018 def _post_merge(self, mtimedb, xterm_titles, failed_fetches):
1019 if "--pretend" not in self.myopts:
1020
1021 Modified: main/branches/prefix/pym/portage/__init__.py
1022 ===================================================================
1023 --- main/branches/prefix/pym/portage/__init__.py 2008-07-01 17:09:13 UTC (rev 10879)
1024 +++ main/branches/prefix/pym/portage/__init__.py 2008-07-01 17:22:29 UTC (rev 10880)
1025 @@ -3294,8 +3294,9 @@
1026 # file size. The parent process will verify their checksums prior to
1027 # the unpack phase.
1028
1029 - parallel_fetchonly = fetchonly and \
1030 - "PORTAGE_PARALLEL_FETCHONLY" in mysettings
1031 + parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings
1032 + if parallel_fetchonly:
1033 + fetchonly = 1
1034
1035 check_config_instance(mysettings)
1036
1037
1038 Modified: main/branches/prefix/pym/portage/cache/mappings.py
1039 ===================================================================
1040 --- main/branches/prefix/pym/portage/cache/mappings.py 2008-07-01 17:09:13 UTC (rev 10879)
1041 +++ main/branches/prefix/pym/portage/cache/mappings.py 2008-07-01 17:22:29 UTC (rev 10880)
1042 @@ -104,14 +104,17 @@
1043
1044 _slot_dict_classes = weakref.WeakValueDictionary()
1045
1046 -def slot_dict_class(keys):
1047 +def slot_dict_class(keys, prefix="_val_"):
1048 """
1049 Generates mapping classes that behave similar to a dict but store values
1050 as object attributes that are allocated via __slots__. Instances of these
1051 objects have a smaller memory footprint than a normal dict object.
1052
1053 @param keys: Fixed set of allowed keys
1054 - @type keys: iterable
1055 + @type keys: Iterable
1056 + @param prefix: a prefix to use when mapping
1057 + attribute names from keys
1058 + @type prefix: String
1059 @rtype: SlotDict
1060 @returns: A class that constructs SlotDict instances
1061 having the specified keys.
1062 @@ -120,14 +123,15 @@
1063 keys_set = keys
1064 else:
1065 keys_set = frozenset(keys)
1066 - v = _slot_dict_classes.get(keys_set)
1067 + v = _slot_dict_classes.get((keys_set, prefix))
1068 if v is None:
1069
1070 class SlotDict(object):
1071
1072 allowed_keys = keys_set
1073 + _prefix = prefix
1074 __slots__ = ("__weakref__",) + \
1075 - tuple("_val_" + k for k in allowed_keys)
1076 + tuple(prefix + k for k in allowed_keys)
1077
1078 def __iter__(self):
1079 for k, v in self.iteritems():
1080 @@ -145,7 +149,7 @@
1081 def iteritems(self):
1082 for k in self.allowed_keys:
1083 try:
1084 - yield (k, getattr(self, "_val_" + k))
1085 + yield (k, getattr(self, self._prefix + k))
1086 except AttributeError:
1087 pass
1088
1089 @@ -161,12 +165,12 @@
1090
1091 def __delitem__(self, k):
1092 try:
1093 - delattr(self, "_val_" + k)
1094 + delattr(self, self._prefix + k)
1095 except AttributeError:
1096 raise KeyError(k)
1097
1098 def __setitem__(self, k, v):
1099 - setattr(self, "_val_" + k, v)
1100 + setattr(self, self._prefix + k, v)
1101
1102 def setdefault(self, key, default=None):
1103 try:
1104 @@ -186,7 +190,7 @@
1105
1106 def __getitem__(self, k):
1107 try:
1108 - return getattr(self, "_val_" + k)
1109 + return getattr(self, self._prefix + k)
1110 except AttributeError:
1111 raise KeyError(k)
1112
1113 @@ -197,7 +201,7 @@
1114 return default
1115
1116 def __contains__(self, k):
1117 - return hasattr(self, "_val_" + k)
1118 + return hasattr(self, self._prefix + k)
1119
1120 def has_key(self, k):
1121 return k in self
1122 @@ -232,7 +236,7 @@
1123 def clear(self):
1124 for k in self.allowed_keys:
1125 try:
1126 - delattr(self, "_val_" + k)
1127 + delattr(self, self._prefix + k)
1128 except AttributeError:
1129 pass
1130
1131
1132 Modified: main/branches/prefix/pym/portage/checksum.py
1133 ===================================================================
1134 --- main/branches/prefix/pym/portage/checksum.py 2008-07-01 17:09:13 UTC (rev 10879)
1135 +++ main/branches/prefix/pym/portage/checksum.py 2008-07-01 17:22:29 UTC (rev 10880)
1136 @@ -11,7 +11,6 @@
1137 import portage.exception
1138 import portage.process
1139 import commands
1140 -import md5, sha
1141
1142 #dict of all available hash functions
1143 hashfunc_map = {}
1144 @@ -46,9 +45,20 @@
1145 # override earlier ones
1146
1147 # Use the internal modules as last fallback
1148 -md5hash = _generate_hash_function("MD5", md5.new, origin="internal")
1149 -sha1hash = _generate_hash_function("SHA1", sha.new, origin="internal")
1150 +try:
1151 + from hashlib import md5 as _new_md5
1152 +except ImportError:
1153 + from md5 import new as _new_md5
1154
1155 +md5hash = _generate_hash_function("MD5", _new_md5, origin="internal")
1156 +
1157 +try:
1158 + from hashlib import sha1 as _new_sha1
1159 +except ImportError:
1160 + from sha import new as _new_sha1
1161 +
1162 +sha1hash = _generate_hash_function("SHA1", _new_sha1, origin="internal")
1163 +
1164 # Use pycrypto when available, prefer it over the internal fallbacks
1165 try:
1166 from Crypto.Hash import MD5, SHA, SHA256, RIPEMD
1167
1168 Modified: main/branches/prefix/pym/portage/dbapi/porttree.py
1169 ===================================================================
1170 --- main/branches/prefix/pym/portage/dbapi/porttree.py 2008-07-01 17:09:13 UTC (rev 10879)
1171 +++ main/branches/prefix/pym/portage/dbapi/porttree.py 2008-07-01 17:22:29 UTC (rev 10880)
1172 @@ -63,7 +63,9 @@
1173 self.manifestVerifier = portage.gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
1174
1175 #self.root=settings["PORTDIR"]
1176 - self.porttree_root = os.path.realpath(porttree_root)
1177 + self.porttree_root = porttree_root
1178 + if porttree_root:
1179 + self.porttree_root = os.path.realpath(porttree_root)
1180
1181 self.depcachedir = os.path.realpath(self.mysettings.depcachedir)
1182
1183
1184 Modified: main/branches/prefix/pym/portage/dbapi/vartree.py
1185 ===================================================================
1186 --- main/branches/prefix/pym/portage/dbapi/vartree.py 2008-07-01 17:09:13 UTC (rev 10879)
1187 +++ main/branches/prefix/pym/portage/dbapi/vartree.py 2008-07-01 17:22:29 UTC (rev 10880)
1188 @@ -46,7 +46,8 @@
1189 self._filename = filename
1190 self._autocommit = autocommit
1191 self.load()
1192 -
1193 + self.pruneNonExisting()
1194 +
1195 def load(self):
1196 """ Reload the registry data from file """
1197 try:
1198 @@ -63,9 +64,15 @@
1199 """ Store the registry data to file. No need to call this if autocommit
1200 was enabled.
1201 """
1202 - f = atomic_ofstream(self._filename)
1203 - cPickle.dump(self._data, f)
1204 - f.close()
1205 + if os.environ.get("SANDBOX_ON") == "1":
1206 + return
1207 + try:
1208 + f = atomic_ofstream(self._filename)
1209 + cPickle.dump(self._data, f)
1210 + f.close()
1211 + except EnvironmentError, e:
1212 + if e.errno != PermissionDenied.errno:
1213 + writemsg("!!! %s %s\n" % (e, self._filename), noiselevel=-1)
1214
1215 def register(self, cpv, slot, counter, paths):
1216 """ Register new objects in the registry. If there is a record with the
1217 @@ -1030,6 +1037,35 @@
1218 return dblink(category, pf, self.root,
1219 self.settings, vartree=self.vartree)
1220
1221 + def removeFromContents(self, pkg, paths, relative_paths=True):
1222 + """
1223 + @param pkg: cpv for an installed package
1224 + @type pkg: string
1225 + @param paths: paths of files to remove from contents
1226 + @type paths: iterable
1227 + """
1228 + if not hasattr(pkg, "getcontents"):
1229 + pkg = self._dblink(pkg)
1230 + root = self.root
1231 + root_len = len(root) - 1
1232 + new_contents = pkg.getcontents().copy()
1233 + contents_key = None
1234 +
1235 + for filename in paths:
1236 + filename = normalize_path(filename)
1237 + if relative_paths:
1238 + relative_filename = filename
1239 + else:
1240 + relative_filename = filename[root_len:]
1241 + contents_key = pkg._match_contents(relative_filename, root)
1242 + if contents_key:
1243 + del new_contents[contents_key]
1244 +
1245 + if contents_key:
1246 + f = atomic_ofstream(os.path.join(pkg.dbdir, "CONTENTS"))
1247 + write_contents(new_contents, root, f)
1248 + f.close()
1249 +
1250 class _owners_cache(object):
1251 """
1252 This class maintains an hash table that serves to index package
1253 @@ -2061,7 +2097,7 @@
1254 #remove self from vartree database so that our own virtual gets zapped if we're the last node
1255 self.vartree.zap(self.mycpv)
1256
1257 - def isowner(self,filename, destroot):
1258 + def isowner(self, filename, destroot):
1259 """
1260 Check if a file belongs to this package. This may
1261 result in a stat call for the parent directory of
1262 @@ -2080,12 +2116,25 @@
1263 1. True if this package owns the file.
1264 2. False if this package does not own the file.
1265 """
1266 + return bool(self._match_contents(filename, destroot))
1267 +
1268 + def _match_contents(self, filename, destroot):
1269 + """
1270 + The matching contents entry is returned, which is useful
1271 + since the path may differ from the one given by the caller,
1272 + due to symlinks.
1273 +
1274 + @rtype: String
1275 + @return: the contents entry corresponding to the given path, or False
1276 + if the file is not owned by this package.
1277 + """
1278 +
1279 destfile = normalize_path(
1280 os.path.join(destroot, filename.lstrip(os.path.sep)))
1281
1282 pkgfiles = self.getcontents()
1283 if pkgfiles and destfile in pkgfiles:
1284 - return True
1285 + return destfile
1286 if pkgfiles:
1287 basename = os.path.basename(destfile)
1288 if self._contents_basenames is None:
1289 @@ -2135,7 +2184,7 @@
1290 for p_path in p_path_list:
1291 x = os.path.join(p_path, basename)
1292 if x in pkgfiles:
1293 - return True
1294 + return x
1295
1296 return False
1297
1298 @@ -2803,33 +2852,8 @@
1299 contents = self.getcontents()
1300 destroot_len = len(destroot) - 1
1301 for blocker in blockers:
1302 - blocker_contents = blocker.getcontents()
1303 - collisions = []
1304 - for filename in blocker_contents:
1305 - relative_filename = filename[destroot_len:]
1306 - if self.isowner(relative_filename, destroot):
1307 - collisions.append(filename)
1308 - if not collisions:
1309 - continue
1310 - for filename in collisions:
1311 - del blocker_contents[filename]
1312 - f = atomic_ofstream(os.path.join(blocker.dbdir, "CONTENTS"))
1313 - for filename in sorted(blocker_contents):
1314 - entry_data = blocker_contents[filename]
1315 - entry_type = entry_data[0]
1316 - relative_filename = filename[destroot_len:]
1317 - if entry_type == "obj":
1318 - entry_type, mtime, md5sum = entry_data
1319 - line = "%s %s %s %s\n" % \
1320 - (entry_type, relative_filename, md5sum, mtime)
1321 - elif entry_type == "sym":
1322 - entry_type, mtime, link = entry_data
1323 - line = "%s %s -> %s %s\n" % \
1324 - (entry_type, relative_filename, link, mtime)
1325 - else: # dir, dev, fif
1326 - line = "%s %s\n" % (entry_type, relative_filename)
1327 - f.write(line)
1328 - f.close()
1329 + self.vartree.dbapi.removeFromContents(blocker, iter(contents),
1330 + relative_paths=False)
1331
1332 self.vartree.dbapi._add(self)
1333 contents = self.getcontents()
1334 @@ -3240,6 +3264,27 @@
1335 "Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
1336 return os.path.exists(os.path.join(self.dbdir, "CATEGORY"))
1337
1338 +def write_contents(contents, root, f):
1339 + """
1340 + Write contents to any file like object. The file will be left open.
1341 + """
1342 + root_len = len(root) - 1
1343 + for filename in sorted(contents):
1344 + entry_data = contents[filename]
1345 + entry_type = entry_data[0]
1346 + relative_filename = filename[root_len:]
1347 + if entry_type == "obj":
1348 + entry_type, mtime, md5sum = entry_data
1349 + line = "%s %s %s %s\n" % \
1350 + (entry_type, relative_filename, md5sum, mtime)
1351 + elif entry_type == "sym":
1352 + entry_type, mtime, link = entry_data
1353 + line = "%s %s -> %s %s\n" % \
1354 + (entry_type, relative_filename, link, mtime)
1355 + else: # dir, dev, fif
1356 + line = "%s %s\n" % (entry_type, relative_filename)
1357 + f.write(line)
1358 +
1359 def tar_contents(contents, root, tar, protect=None, onProgress=None):
1360 from portage.util import normalize_path
1361 import tarfile
1362
1363 --
1364 gentoo-commits@l.g.o mailing list