Gentoo Archives: gentoo-commits

From: "Zac Medico (zmedico)" <zmedico@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] portage r15425 - in main/trunk/pym: _emerge portage portage/dbapi portage/package/ebuild
Date: Mon, 22 Feb 2010 04:56:39
Message-Id: E1NjQLP-0001DC-Hv@stork.gentoo.org
1 Author: zmedico
2 Date: 2010-02-22 04:56:30 +0000 (Mon, 22 Feb 2010)
3 New Revision: 15425
4
5 Added:
6 main/trunk/pym/portage/package/ebuild/fetch.py
7 Modified:
8 main/trunk/pym/_emerge/BinpkgVerifier.py
9 main/trunk/pym/portage/__init__.py
10 main/trunk/pym/portage/dbapi/bintree.py
11 Log:
12 Move portage.fetch() to portage.package.ebuild.fetch.fetch().
13
14
15 Modified: main/trunk/pym/_emerge/BinpkgVerifier.py
16 ===================================================================
17 --- main/trunk/pym/_emerge/BinpkgVerifier.py 2010-02-22 04:13:28 UTC (rev 15424)
18 +++ main/trunk/pym/_emerge/BinpkgVerifier.py 2010-02-22 04:56:30 UTC (rev 15425)
19 @@ -9,6 +9,7 @@
20 from portage import os
21 from portage import _encodings
22 from portage import _unicode_encode
23 +from portage.package.ebuild.fetch import _checksum_failure_temp_file
24 import codecs
25
26 class BinpkgVerifier(AsynchronousTask):
27 @@ -77,7 +78,7 @@
28 else:
29 pkg_path = bintree.getname(pkg.cpv)
30 head, tail = os.path.split(pkg_path)
31 - temp_filename = portage._checksum_failure_temp_file(head, tail)
32 + temp_filename = _checksum_failure_temp_file(head, tail)
33 writemsg("File renamed to '%s'\n" % (temp_filename,),
34 noiselevel=-1)
35 finally:
36
37 Modified: main/trunk/pym/portage/__init__.py
38 ===================================================================
39 --- main/trunk/pym/portage/__init__.py 2010-02-22 04:13:28 UTC (rev 15424)
40 +++ main/trunk/pym/portage/__init__.py 2010-02-22 04:56:30 UTC (rev 15425)
41 @@ -43,7 +43,6 @@
42 from StringIO import StringIO
43
44 from time import sleep
45 - from random import shuffle
46 from itertools import chain
47 import platform
48 import warnings
49 @@ -104,6 +103,7 @@
50 'portage.output:bold,colorize',
51 'portage.package.ebuild.config:autouse,best_from_dict,' + \
52 'check_config_instance,config',
53 + 'portage.package.ebuild.fetch:fetch',
54 'portage.process',
55 'portage.process:atexit_register,run_exitfuncs',
56 'portage.update:dep_transform,fixdbentries,grab_updates,' + \
57 @@ -1377,1108 +1377,6 @@
58 return retval >> 8
59 return retval
60
61 -_userpriv_spawn_kwargs = (
62 - ("uid", portage_uid),
63 - ("gid", portage_gid),
64 - ("groups", userpriv_groups),
65 - ("umask", 0o02),
66 -)
67 -
68 -def _spawn_fetch(settings, args, **kwargs):
69 - """
70 - Spawn a process with appropriate settings for fetching, including
71 - userfetch and selinux support.
72 - """
73 -
74 - global _userpriv_spawn_kwargs
75 -
76 - # Redirect all output to stdout since some fetchers like
77 - # wget pollute stderr (if portage detects a problem then it
78 - # can send it's own message to stderr).
79 - if "fd_pipes" not in kwargs:
80 -
81 - kwargs["fd_pipes"] = {
82 - 0 : sys.stdin.fileno(),
83 - 1 : sys.stdout.fileno(),
84 - 2 : sys.stdout.fileno(),
85 - }
86 -
87 - if "userfetch" in settings.features and \
88 - os.getuid() == 0 and portage_gid and portage_uid:
89 - kwargs.update(_userpriv_spawn_kwargs)
90 -
91 - spawn_func = portage.process.spawn
92 -
93 - if settings.selinux_enabled():
94 - spawn_func = selinux.spawn_wrapper(spawn_func,
95 - settings["PORTAGE_FETCH_T"])
96 -
97 - # bash is an allowed entrypoint, while most binaries are not
98 - if args[0] != BASH_BINARY:
99 - args = [BASH_BINARY, "-c", "exec \"$@\"", args[0]] + args
100 -
101 - rval = spawn_func(args, env=settings.environ(), **kwargs)
102 -
103 - return rval
104 -
105 -_userpriv_test_write_file_cache = {}
106 -_userpriv_test_write_cmd_script = "touch %(file_path)s 2>/dev/null ; rval=$? ; " + \
107 - "rm -f %(file_path)s ; exit $rval"
108 -
109 -def _userpriv_test_write_file(settings, file_path):
110 - """
111 - Drop privileges and try to open a file for writing. The file may or
112 - may not exist, and the parent directory is assumed to exist. The file
113 - is removed before returning.
114 -
115 - @param settings: A config instance which is passed to _spawn_fetch()
116 - @param file_path: A file path to open and write.
117 - @return: True if write succeeds, False otherwise.
118 - """
119 -
120 - global _userpriv_test_write_file_cache, _userpriv_test_write_cmd_script
121 - rval = _userpriv_test_write_file_cache.get(file_path)
122 - if rval is not None:
123 - return rval
124 -
125 - args = [BASH_BINARY, "-c", _userpriv_test_write_cmd_script % \
126 - {"file_path" : _shell_quote(file_path)}]
127 -
128 - returncode = _spawn_fetch(settings, args)
129 -
130 - rval = returncode == os.EX_OK
131 - _userpriv_test_write_file_cache[file_path] = rval
132 - return rval
133 -
134 -def _checksum_failure_temp_file(distdir, basename):
135 - """
136 - First try to find a duplicate temp file with the same checksum and return
137 - that filename if available. Otherwise, use mkstemp to create a new unique
138 - filename._checksum_failure_.$RANDOM, rename the given file, and return the
139 - new filename. In any case, filename will be renamed or removed before this
140 - function returns a temp filename.
141 - """
142 -
143 - filename = os.path.join(distdir, basename)
144 - size = os.stat(filename).st_size
145 - checksum = None
146 - tempfile_re = re.compile(re.escape(basename) + r'\._checksum_failure_\..*')
147 - for temp_filename in os.listdir(distdir):
148 - if not tempfile_re.match(temp_filename):
149 - continue
150 - temp_filename = os.path.join(distdir, temp_filename)
151 - try:
152 - if size != os.stat(temp_filename).st_size:
153 - continue
154 - except OSError:
155 - continue
156 - try:
157 - temp_checksum = portage.checksum.perform_md5(temp_filename)
158 - except portage.exception.FileNotFound:
159 - # Apparently the temp file disappeared. Let it go.
160 - continue
161 - if checksum is None:
162 - checksum = portage.checksum.perform_md5(filename)
163 - if checksum == temp_checksum:
164 - os.unlink(filename)
165 - return temp_filename
166 -
167 - from tempfile import mkstemp
168 - fd, temp_filename = mkstemp("", basename + "._checksum_failure_.", distdir)
169 - os.close(fd)
170 - os.rename(filename, temp_filename)
171 - return temp_filename
172 -
173 -def _check_digests(filename, digests, show_errors=1):
174 - """
175 - Check digests and display a message if an error occurs.
176 - @return True if all digests match, False otherwise.
177 - """
178 - verified_ok, reason = portage.checksum.verify_all(filename, digests)
179 - if not verified_ok:
180 - if show_errors:
181 - writemsg(_("!!! Previously fetched"
182 - " file: '%s'\n") % filename, noiselevel=-1)
183 - writemsg(_("!!! Reason: %s\n") % reason[0],
184 - noiselevel=-1)
185 - writemsg(_("!!! Got: %s\n"
186 - "!!! Expected: %s\n") % \
187 - (reason[1], reason[2]), noiselevel=-1)
188 - return False
189 - return True
190 -
191 -def _check_distfile(filename, digests, eout, show_errors=1):
192 - """
193 - @return a tuple of (match, stat_obj) where match is True if filename
194 - matches all given digests (if any) and stat_obj is a stat result, or
195 - None if the file does not exist.
196 - """
197 - if digests is None:
198 - digests = {}
199 - size = digests.get("size")
200 - if size is not None and len(digests) == 1:
201 - digests = None
202 -
203 - try:
204 - st = os.stat(filename)
205 - except OSError:
206 - return (False, None)
207 - if size is not None and size != st.st_size:
208 - return (False, st)
209 - if not digests:
210 - if size is not None:
211 - eout.ebegin(_("%s size ;-)") % os.path.basename(filename))
212 - eout.eend(0)
213 - elif st.st_size == 0:
214 - # Zero-byte distfiles are always invalid.
215 - return (False, st)
216 - else:
217 - if _check_digests(filename, digests, show_errors=show_errors):
218 - eout.ebegin("%s %s ;-)" % (os.path.basename(filename),
219 - " ".join(sorted(digests))))
220 - eout.eend(0)
221 - else:
222 - return (False, st)
223 - return (True, st)
224 -
225 -_fetch_resume_size_re = re.compile('(^[\d]+)([KMGTPEZY]?$)')
226 -
227 -_size_suffix_map = {
228 - '' : 0,
229 - 'K' : 10,
230 - 'M' : 20,
231 - 'G' : 30,
232 - 'T' : 40,
233 - 'P' : 50,
234 - 'E' : 60,
235 - 'Z' : 70,
236 - 'Y' : 80,
237 -}
238 -
239 -def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks",use_locks=1, try_mirrors=1):
240 - "fetch files. Will use digest file if available."
241 -
242 - if not myuris:
243 - return 1
244 -
245 - features = mysettings.features
246 - restrict = mysettings.get("PORTAGE_RESTRICT","").split()
247 -
248 - from portage.data import secpass
249 - userfetch = secpass >= 2 and "userfetch" in features
250 - userpriv = secpass >= 2 and "userpriv" in features
251 -
252 - # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
253 - if "mirror" in restrict or \
254 - "nomirror" in restrict:
255 - if ("mirror" in features) and ("lmirror" not in features):
256 - # lmirror should allow you to bypass mirror restrictions.
257 - # XXX: This is not a good thing, and is temporary at best.
258 - print(_(">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."))
259 - return 1
260 -
261 - # Generally, downloading the same file repeatedly from
262 - # every single available mirror is a waste of bandwidth
263 - # and time, so there needs to be a cap.
264 - checksum_failure_max_tries = 5
265 - v = checksum_failure_max_tries
266 - try:
267 - v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS",
268 - checksum_failure_max_tries))
269 - except (ValueError, OverflowError):
270 - writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
271 - " contains non-integer value: '%s'\n") % \
272 - mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1)
273 - writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
274 - "default value: %s\n") % checksum_failure_max_tries,
275 - noiselevel=-1)
276 - v = checksum_failure_max_tries
277 - if v < 1:
278 - writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
279 - " contains value less than 1: '%s'\n") % v, noiselevel=-1)
280 - writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
281 - "default value: %s\n") % checksum_failure_max_tries,
282 - noiselevel=-1)
283 - v = checksum_failure_max_tries
284 - checksum_failure_max_tries = v
285 - del v
286 -
287 - fetch_resume_size_default = "350K"
288 - fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE")
289 - if fetch_resume_size is not None:
290 - fetch_resume_size = "".join(fetch_resume_size.split())
291 - if not fetch_resume_size:
292 - # If it's undefined or empty, silently use the default.
293 - fetch_resume_size = fetch_resume_size_default
294 - match = _fetch_resume_size_re.match(fetch_resume_size)
295 - if match is None or \
296 - (match.group(2).upper() not in _size_suffix_map):
297 - writemsg(_("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE"
298 - " contains an unrecognized format: '%s'\n") % \
299 - mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1)
300 - writemsg(_("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE "
301 - "default value: %s\n") % fetch_resume_size_default,
302 - noiselevel=-1)
303 - fetch_resume_size = None
304 - if fetch_resume_size is None:
305 - fetch_resume_size = fetch_resume_size_default
306 - match = _fetch_resume_size_re.match(fetch_resume_size)
307 - fetch_resume_size = int(match.group(1)) * \
308 - 2 ** _size_suffix_map[match.group(2).upper()]
309 -
310 - # Behave like the package has RESTRICT="primaryuri" after a
311 - # couple of checksum failures, to increase the probablility
312 - # of success before checksum_failure_max_tries is reached.
313 - checksum_failure_primaryuri = 2
314 - thirdpartymirrors = mysettings.thirdpartymirrors()
315 -
316 - # In the background parallel-fetch process, it's safe to skip checksum
317 - # verification of pre-existing files in $DISTDIR that have the correct
318 - # file size. The parent process will verify their checksums prior to
319 - # the unpack phase.
320 -
321 - parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings
322 - if parallel_fetchonly:
323 - fetchonly = 1
324 -
325 - check_config_instance(mysettings)
326 -
327 - custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
328 - CUSTOM_MIRRORS_FILE), recursive=1)
329 -
330 - mymirrors=[]
331 -
332 - if listonly or ("distlocks" not in features):
333 - use_locks = 0
334 -
335 - fetch_to_ro = 0
336 - if "skiprocheck" in features:
337 - fetch_to_ro = 1
338 -
339 - if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
340 - if use_locks:
341 - writemsg(colorize("BAD",
342 - _("!!! For fetching to a read-only filesystem, "
343 - "locking should be turned off.\n")), noiselevel=-1)
344 - writemsg(_("!!! This can be done by adding -distlocks to "
345 - "FEATURES in /etc/make.conf\n"), noiselevel=-1)
346 -# use_locks = 0
347 -
348 - # local mirrors are always added
349 - if "local" in custommirrors:
350 - mymirrors += custommirrors["local"]
351 -
352 - if "nomirror" in restrict or \
353 - "mirror" in restrict:
354 - # We don't add any mirrors.
355 - pass
356 - else:
357 - if try_mirrors:
358 - mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]
359 -
360 - skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1"
361 - pkgdir = mysettings.get("O")
362 - if not (pkgdir is None or skip_manifest):
363 - mydigests = Manifest(
364 - pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
365 - else:
366 - # no digests because fetch was not called for a specific package
367 - mydigests = {}
368 -
369 - ro_distdirs = [x for x in \
370 - util.shlex_split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \
371 - if os.path.isdir(x)]
372 -
373 - fsmirrors = []
374 - for x in range(len(mymirrors)-1,-1,-1):
375 - if mymirrors[x] and mymirrors[x][0]=='/':
376 - fsmirrors += [mymirrors[x]]
377 - del mymirrors[x]
378 -
379 - restrict_fetch = "fetch" in restrict
380 - custom_local_mirrors = custommirrors.get("local", [])
381 - if restrict_fetch:
382 - # With fetch restriction, a normal uri may only be fetched from
383 - # custom local mirrors (if available). A mirror:// uri may also
384 - # be fetched from specific mirrors (effectively overriding fetch
385 - # restriction, but only for specific mirrors).
386 - locations = custom_local_mirrors
387 - else:
388 - locations = mymirrors
389 -
390 - file_uri_tuples = []
391 - # Check for 'items' attribute since OrderedDict is not a dict.
392 - if hasattr(myuris, 'items'):
393 - for myfile, uri_set in myuris.items():
394 - for myuri in uri_set:
395 - file_uri_tuples.append((myfile, myuri))
396 - else:
397 - for myuri in myuris:
398 - file_uri_tuples.append((os.path.basename(myuri), myuri))
399 -
400 - filedict = OrderedDict()
401 - primaryuri_indexes={}
402 - primaryuri_dict = {}
403 - thirdpartymirror_uris = {}
404 - for myfile, myuri in file_uri_tuples:
405 - if myfile not in filedict:
406 - filedict[myfile]=[]
407 - for y in range(0,len(locations)):
408 - filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
409 - if myuri[:9]=="mirror://":
410 - eidx = myuri.find("/", 9)
411 - if eidx != -1:
412 - mirrorname = myuri[9:eidx]
413 - path = myuri[eidx+1:]
414 -
415 - # Try user-defined mirrors first
416 - if mirrorname in custommirrors:
417 - for cmirr in custommirrors[mirrorname]:
418 - filedict[myfile].append(
419 - cmirr.rstrip("/") + "/" + path)
420 -
421 - # now try the official mirrors
422 - if mirrorname in thirdpartymirrors:
423 - shuffle(thirdpartymirrors[mirrorname])
424 -
425 - uris = [locmirr.rstrip("/") + "/" + path \
426 - for locmirr in thirdpartymirrors[mirrorname]]
427 - filedict[myfile].extend(uris)
428 - thirdpartymirror_uris.setdefault(myfile, []).extend(uris)
429 -
430 - if not filedict[myfile]:
431 - writemsg(_("No known mirror by the name: %s\n") % (mirrorname))
432 - else:
433 - writemsg(_("Invalid mirror definition in SRC_URI:\n"), noiselevel=-1)
434 - writemsg(" %s\n" % (myuri), noiselevel=-1)
435 - else:
436 - if restrict_fetch:
437 - # Only fetch from specific mirrors is allowed.
438 - continue
439 - if "primaryuri" in restrict:
440 - # Use the source site first.
441 - if myfile in primaryuri_indexes:
442 - primaryuri_indexes[myfile] += 1
443 - else:
444 - primaryuri_indexes[myfile] = 0
445 - filedict[myfile].insert(primaryuri_indexes[myfile], myuri)
446 - else:
447 - filedict[myfile].append(myuri)
448 - primaryuris = primaryuri_dict.get(myfile)
449 - if primaryuris is None:
450 - primaryuris = []
451 - primaryuri_dict[myfile] = primaryuris
452 - primaryuris.append(myuri)
453 -
454 - # Prefer thirdpartymirrors over normal mirrors in cases when
455 - # the file does not yet exist on the normal mirrors.
456 - for myfile, uris in thirdpartymirror_uris.items():
457 - primaryuri_dict.setdefault(myfile, []).extend(uris)
458 -
459 - can_fetch=True
460 -
461 - if listonly:
462 - can_fetch = False
463 -
464 - if can_fetch and not fetch_to_ro:
465 - global _userpriv_test_write_file_cache
466 - dirmode = 0o2070
467 - filemode = 0o60
468 - modemask = 0o2
469 - dir_gid = portage_gid
470 - if "FAKED_MODE" in mysettings:
471 - # When inside fakeroot, directories with portage's gid appear
472 - # to have root's gid. Therefore, use root's gid instead of
473 - # portage's gid to avoid spurrious permissions adjustments
474 - # when inside fakeroot.
475 - dir_gid = 0
476 - distdir_dirs = [""]
477 - if "distlocks" in features:
478 - distdir_dirs.append(".locks")
479 - try:
480 -
481 - for x in distdir_dirs:
482 - mydir = os.path.join(mysettings["DISTDIR"], x)
483 - write_test_file = os.path.join(
484 - mydir, ".__portage_test_write__")
485 -
486 - try:
487 - st = os.stat(mydir)
488 - except OSError:
489 - st = None
490 -
491 - if st is not None and stat.S_ISDIR(st.st_mode):
492 - if not (userfetch or userpriv):
493 - continue
494 - if _userpriv_test_write_file(mysettings, write_test_file):
495 - continue
496 -
497 - _userpriv_test_write_file_cache.pop(write_test_file, None)
498 - if portage.util.ensure_dirs(mydir, gid=dir_gid, mode=dirmode, mask=modemask):
499 - if st is None:
500 - # The directory has just been created
501 - # and therefore it must be empty.
502 - continue
503 - writemsg(_("Adjusting permissions recursively: '%s'\n") % mydir,
504 - noiselevel=-1)
505 - def onerror(e):
506 - raise # bail out on the first error that occurs during recursion
507 - if not apply_recursive_permissions(mydir,
508 - gid=dir_gid, dirmode=dirmode, dirmask=modemask,
509 - filemode=filemode, filemask=modemask, onerror=onerror):
510 - raise portage.exception.OperationNotPermitted(
511 - _("Failed to apply recursive permissions for the portage group."))
512 - except portage.exception.PortageException as e:
513 - if not os.path.isdir(mysettings["DISTDIR"]):
514 - writemsg("!!! %s\n" % str(e), noiselevel=-1)
515 - writemsg(_("!!! Directory Not Found: DISTDIR='%s'\n") % mysettings["DISTDIR"], noiselevel=-1)
516 - writemsg(_("!!! Fetching will fail!\n"), noiselevel=-1)
517 -
518 - if can_fetch and \
519 - not fetch_to_ro and \
520 - not os.access(mysettings["DISTDIR"], os.W_OK):
521 - writemsg(_("!!! No write access to '%s'\n") % mysettings["DISTDIR"],
522 - noiselevel=-1)
523 - can_fetch = False
524 -
525 - if can_fetch and use_locks and locks_in_subdir:
526 - distlocks_subdir = os.path.join(mysettings["DISTDIR"], locks_in_subdir)
527 - if not os.access(distlocks_subdir, os.W_OK):
528 - writemsg(_("!!! No write access to write to %s. Aborting.\n") % distlocks_subdir,
529 - noiselevel=-1)
530 - return 0
531 - del distlocks_subdir
532 -
533 - distdir_writable = can_fetch and not fetch_to_ro
534 - failed_files = set()
535 - restrict_fetch_msg = False
536 -
537 - for myfile in filedict:
538 - """
539 - fetched status
540 - 0 nonexistent
541 - 1 partially downloaded
542 - 2 completely downloaded
543 - """
544 - fetched = 0
545 -
546 - orig_digests = mydigests.get(myfile, {})
547 - size = orig_digests.get("size")
548 - if size == 0:
549 - # Zero-byte distfiles are always invalid, so discard their digests.
550 - del mydigests[myfile]
551 - orig_digests.clear()
552 - size = None
553 - pruned_digests = orig_digests
554 - if parallel_fetchonly:
555 - pruned_digests = {}
556 - if size is not None:
557 - pruned_digests["size"] = size
558 -
559 - myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
560 - has_space = True
561 - has_space_superuser = True
562 - file_lock = None
563 - if listonly:
564 - writemsg_stdout("\n", noiselevel=-1)
565 - else:
566 - # check if there is enough space in DISTDIR to completely store myfile
567 - # overestimate the filesize so we aren't bitten by FS overhead
568 - if size is not None and hasattr(os, "statvfs"):
569 - vfs_stat = os.statvfs(mysettings["DISTDIR"])
570 - try:
571 - mysize = os.stat(myfile_path).st_size
572 - except OSError as e:
573 - if e.errno not in (errno.ENOENT, errno.ESTALE):
574 - raise
575 - del e
576 - mysize = 0
577 - if (size - mysize + vfs_stat.f_bsize) >= \
578 - (vfs_stat.f_bsize * vfs_stat.f_bavail):
579 -
580 - if (size - mysize + vfs_stat.f_bsize) >= \
581 - (vfs_stat.f_bsize * vfs_stat.f_bfree):
582 - has_space_superuser = False
583 -
584 - if not has_space_superuser:
585 - has_space = False
586 - elif secpass < 2:
587 - has_space = False
588 - elif userfetch:
589 - has_space = False
590 -
591 - if not has_space:
592 - writemsg(_("!!! Insufficient space to store %s in %s\n") % \
593 - (myfile, mysettings["DISTDIR"]), noiselevel=-1)
594 -
595 - if has_space_superuser:
596 - writemsg(_("!!! Insufficient privileges to use "
597 - "remaining space.\n"), noiselevel=-1)
598 - if userfetch:
599 - writemsg(_("!!! You may set FEATURES=\"-userfetch\""
600 - " in /etc/make.conf in order to fetch with\n"
601 - "!!! superuser privileges.\n"), noiselevel=-1)
602 -
603 - if distdir_writable and use_locks:
604 -
605 - if locks_in_subdir:
606 - lock_file = os.path.join(mysettings["DISTDIR"],
607 - locks_in_subdir, myfile)
608 - else:
609 - lock_file = myfile_path
610 -
611 - lock_kwargs = {}
612 - if fetchonly:
613 - lock_kwargs["flags"] = os.O_NONBLOCK
614 -
615 - try:
616 - file_lock = portage.locks.lockfile(myfile_path,
617 - wantnewlockfile=1, **lock_kwargs)
618 - except portage.exception.TryAgain:
619 - writemsg(_(">>> File '%s' is already locked by "
620 - "another fetcher. Continuing...\n") % myfile,
621 - noiselevel=-1)
622 - continue
623 - try:
624 - if not listonly:
625 -
626 - eout = portage.output.EOutput()
627 - eout.quiet = mysettings.get("PORTAGE_QUIET") == "1"
628 - match, mystat = _check_distfile(
629 - myfile_path, pruned_digests, eout)
630 - if match:
631 - if distdir_writable:
632 - try:
633 - apply_secpass_permissions(myfile_path,
634 - gid=portage_gid, mode=0o664, mask=0o2,
635 - stat_cached=mystat)
636 - except portage.exception.PortageException as e:
637 - if not os.access(myfile_path, os.R_OK):
638 - writemsg(_("!!! Failed to adjust permissions:"
639 - " %s\n") % str(e), noiselevel=-1)
640 - del e
641 - continue
642 -
643 - if distdir_writable and mystat is None:
644 - # Remove broken symlinks if necessary.
645 - try:
646 - os.unlink(myfile_path)
647 - except OSError:
648 - pass
649 -
650 - if mystat is not None:
651 - if stat.S_ISDIR(mystat.st_mode):
652 - portage.util.writemsg_level(
653 - _("!!! Unable to fetch file since "
654 - "a directory is in the way: \n"
655 - "!!! %s\n") % myfile_path,
656 - level=logging.ERROR, noiselevel=-1)
657 - return 0
658 -
659 - if mystat.st_size == 0:
660 - if distdir_writable:
661 - try:
662 - os.unlink(myfile_path)
663 - except OSError:
664 - pass
665 - elif distdir_writable:
666 - if mystat.st_size < fetch_resume_size and \
667 - mystat.st_size < size:
668 - # If the file already exists and the size does not
669 - # match the existing digests, it may be that the
670 - # user is attempting to update the digest. In this
671 - # case, the digestgen() function will advise the
672 - # user to use `ebuild --force foo.ebuild manifest`
673 - # in order to force the old digests to be replaced.
674 - # Since the user may want to keep this file, rename
675 - # it instead of deleting it.
676 - writemsg(_(">>> Renaming distfile with size "
677 - "%d (smaller than " "PORTAGE_FETCH_RESU"
678 - "ME_MIN_SIZE)\n") % mystat.st_size)
679 - temp_filename = \
680 - _checksum_failure_temp_file(
681 - mysettings["DISTDIR"], myfile)
682 - writemsg_stdout(_("Refetching... "
683 - "File renamed to '%s'\n\n") % \
684 - temp_filename, noiselevel=-1)
685 - elif mystat.st_size >= size:
686 - temp_filename = \
687 - _checksum_failure_temp_file(
688 - mysettings["DISTDIR"], myfile)
689 - writemsg_stdout(_("Refetching... "
690 - "File renamed to '%s'\n\n") % \
691 - temp_filename, noiselevel=-1)
692 -
693 - if distdir_writable and ro_distdirs:
694 - readonly_file = None
695 - for x in ro_distdirs:
696 - filename = os.path.join(x, myfile)
697 - match, mystat = _check_distfile(
698 - filename, pruned_digests, eout)
699 - if match:
700 - readonly_file = filename
701 - break
702 - if readonly_file is not None:
703 - try:
704 - os.unlink(myfile_path)
705 - except OSError as e:
706 - if e.errno not in (errno.ENOENT, errno.ESTALE):
707 - raise
708 - del e
709 - os.symlink(readonly_file, myfile_path)
710 - continue
711 -
712 - if fsmirrors and not os.path.exists(myfile_path) and has_space:
713 - for mydir in fsmirrors:
714 - mirror_file = os.path.join(mydir, myfile)
715 - try:
716 - shutil.copyfile(mirror_file, myfile_path)
717 - writemsg(_("Local mirror has file: %s\n") % myfile)
718 - break
719 - except (IOError, OSError) as e:
720 - if e.errno not in (errno.ENOENT, errno.ESTALE):
721 - raise
722 - del e
723 -
724 - try:
725 - mystat = os.stat(myfile_path)
726 - except OSError as e:
727 - if e.errno not in (errno.ENOENT, errno.ESTALE):
728 - raise
729 - del e
730 - else:
731 - try:
732 - apply_secpass_permissions(
733 - myfile_path, gid=portage_gid, mode=0o664, mask=0o2,
734 - stat_cached=mystat)
735 - except portage.exception.PortageException as e:
736 - if not os.access(myfile_path, os.R_OK):
737 - writemsg(_("!!! Failed to adjust permissions:"
738 - " %s\n") % str(e), noiselevel=-1)
739 -
740 - # If the file is empty then it's obviously invalid. Remove
741 - # the empty file and try to download if possible.
742 - if mystat.st_size == 0:
743 - if distdir_writable:
744 - try:
745 - os.unlink(myfile_path)
746 - except EnvironmentError:
747 - pass
748 - elif myfile not in mydigests:
749 - # We don't have a digest, but the file exists. We must
750 - # assume that it is fully downloaded.
751 - continue
752 - else:
753 - if mystat.st_size < mydigests[myfile]["size"] and \
754 - not restrict_fetch:
755 - fetched = 1 # Try to resume this download.
756 - elif parallel_fetchonly and \
757 - mystat.st_size == mydigests[myfile]["size"]:
758 - eout = portage.output.EOutput()
759 - eout.quiet = \
760 - mysettings.get("PORTAGE_QUIET") == "1"
761 - eout.ebegin(
762 - "%s size ;-)" % (myfile, ))
763 - eout.eend(0)
764 - continue
765 - else:
766 - verified_ok, reason = portage.checksum.verify_all(
767 - myfile_path, mydigests[myfile])
768 - if not verified_ok:
769 - writemsg(_("!!! Previously fetched"
770 - " file: '%s'\n") % myfile, noiselevel=-1)
771 - writemsg(_("!!! Reason: %s\n") % reason[0],
772 - noiselevel=-1)
773 - writemsg(_("!!! Got: %s\n"
774 - "!!! Expected: %s\n") % \
775 - (reason[1], reason[2]), noiselevel=-1)
776 - if reason[0] == _("Insufficient data for checksum verification"):
777 - return 0
778 - if distdir_writable:
779 - temp_filename = \
780 - _checksum_failure_temp_file(
781 - mysettings["DISTDIR"], myfile)
782 - writemsg_stdout(_("Refetching... "
783 - "File renamed to '%s'\n\n") % \
784 - temp_filename, noiselevel=-1)
785 - else:
786 - eout = portage.output.EOutput()
787 - eout.quiet = \
788 - mysettings.get("PORTAGE_QUIET", None) == "1"
789 - digests = mydigests.get(myfile)
790 - if digests:
791 - digests = list(digests)
792 - digests.sort()
793 - eout.ebegin(
794 - "%s %s ;-)" % (myfile, " ".join(digests)))
795 - eout.eend(0)
796 - continue # fetch any remaining files
797 -
798 - # Create a reversed list since that is optimal for list.pop().
799 - uri_list = filedict[myfile][:]
800 - uri_list.reverse()
801 - checksum_failure_count = 0
802 - tried_locations = set()
803 - while uri_list:
804 - loc = uri_list.pop()
805 - # Eliminate duplicates here in case we've switched to
806 - # "primaryuri" mode on the fly due to a checksum failure.
807 - if loc in tried_locations:
808 - continue
809 - tried_locations.add(loc)
810 - if listonly:
811 - writemsg_stdout(loc+" ", noiselevel=-1)
812 - continue
813 - # allow different fetchcommands per protocol
814 - protocol = loc[0:loc.find("://")]
815 -
816 - missing_file_param = False
817 - fetchcommand_var = "FETCHCOMMAND_" + protocol.upper()
818 - fetchcommand = mysettings.get(fetchcommand_var)
819 - if fetchcommand is None:
820 - fetchcommand_var = "FETCHCOMMAND"
821 - fetchcommand = mysettings.get(fetchcommand_var)
822 - if fetchcommand is None:
823 - portage.util.writemsg_level(
824 - _("!!! %s is unset. It should "
825 - "have been defined in\n!!! %s/make.globals.\n") \
826 - % (fetchcommand_var,
827 - portage.const.GLOBAL_CONFIG_PATH),
828 - level=logging.ERROR, noiselevel=-1)
829 - return 0
830 - if "${FILE}" not in fetchcommand:
831 - portage.util.writemsg_level(
832 - _("!!! %s does not contain the required ${FILE}"
833 - " parameter.\n") % fetchcommand_var,
834 - level=logging.ERROR, noiselevel=-1)
835 - missing_file_param = True
836 -
837 - resumecommand_var = "RESUMECOMMAND_" + protocol.upper()
838 - resumecommand = mysettings.get(resumecommand_var)
839 - if resumecommand is None:
840 - resumecommand_var = "RESUMECOMMAND"
841 - resumecommand = mysettings.get(resumecommand_var)
842 - if resumecommand is None:
843 - portage.util.writemsg_level(
844 - _("!!! %s is unset. It should "
845 - "have been defined in\n!!! %s/make.globals.\n") \
846 - % (resumecommand_var,
847 - portage.const.GLOBAL_CONFIG_PATH),
848 - level=logging.ERROR, noiselevel=-1)
849 - return 0
850 - if "${FILE}" not in resumecommand:
851 - portage.util.writemsg_level(
852 - _("!!! %s does not contain the required ${FILE}"
853 - " parameter.\n") % resumecommand_var,
854 - level=logging.ERROR, noiselevel=-1)
855 - missing_file_param = True
856 -
857 - if missing_file_param:
858 - portage.util.writemsg_level(
859 - _("!!! Refer to the make.conf(5) man page for "
860 - "information about how to\n!!! correctly specify "
861 - "FETCHCOMMAND and RESUMECOMMAND.\n"),
862 - level=logging.ERROR, noiselevel=-1)
863 - if myfile != os.path.basename(loc):
864 - return 0
865 -
866 - if not can_fetch:
867 - if fetched != 2:
868 - try:
869 - mysize = os.stat(myfile_path).st_size
870 - except OSError as e:
871 - if e.errno not in (errno.ENOENT, errno.ESTALE):
872 - raise
873 - del e
874 - mysize = 0
875 -
876 - if mysize == 0:
877 - writemsg(_("!!! File %s isn't fetched but unable to get it.\n") % myfile,
878 - noiselevel=-1)
879 - elif size is None or size > mysize:
880 - writemsg(_("!!! File %s isn't fully fetched, but unable to complete it\n") % myfile,
881 - noiselevel=-1)
882 - else:
883 - writemsg(_("!!! File %s is incorrect size, "
884 - "but unable to retry.\n") % myfile, noiselevel=-1)
885 - return 0
886 - else:
887 - continue
888 -
889 - if fetched != 2 and has_space:
890 - #we either need to resume or start the download
891 - if fetched == 1:
892 - try:
893 - mystat = os.stat(myfile_path)
894 - except OSError as e:
895 - if e.errno not in (errno.ENOENT, errno.ESTALE):
896 - raise
897 - del e
898 - fetched = 0
899 - else:
900 - if mystat.st_size < fetch_resume_size:
901 - writemsg(_(">>> Deleting distfile with size "
902 - "%d (smaller than " "PORTAGE_FETCH_RESU"
903 - "ME_MIN_SIZE)\n") % mystat.st_size)
904 - try:
905 - os.unlink(myfile_path)
906 - except OSError as e:
907 - if e.errno not in \
908 - (errno.ENOENT, errno.ESTALE):
909 - raise
910 - del e
911 - fetched = 0
912 - if fetched == 1:
913 - #resume mode:
914 - writemsg(_(">>> Resuming download...\n"))
915 - locfetch=resumecommand
916 - command_var = resumecommand_var
917 - else:
918 - #normal mode:
919 - locfetch=fetchcommand
920 - command_var = fetchcommand_var
921 - writemsg_stdout(_(">>> Downloading '%s'\n") % \
922 - re.sub(r'//(.+):.+@(.+)/',r'//\1:*password*@\2/', loc))
923 - variables = {
924 - "DISTDIR": mysettings["DISTDIR"],
925 - "URI": loc,
926 - "FILE": myfile
927 - }
928 -
929 - myfetch = util.shlex_split(locfetch)
930 - myfetch = [varexpand(x, mydict=variables) for x in myfetch]
931 - myret = -1
932 - try:
933 -
934 - myret = _spawn_fetch(mysettings, myfetch)
935 -
936 - finally:
937 - try:
938 - apply_secpass_permissions(myfile_path,
939 - gid=portage_gid, mode=0o664, mask=0o2)
940 - except portage.exception.FileNotFound as e:
941 - pass
942 - except portage.exception.PortageException as e:
943 - if not os.access(myfile_path, os.R_OK):
944 - writemsg(_("!!! Failed to adjust permissions:"
945 - " %s\n") % str(e), noiselevel=-1)
946 -
947 - # If the file is empty then it's obviously invalid. Don't
948 - # trust the return value from the fetcher. Remove the
949 - # empty file and try to download again.
950 - try:
951 - if os.stat(myfile_path).st_size == 0:
952 - os.unlink(myfile_path)
953 - fetched = 0
954 - continue
955 - except EnvironmentError:
956 - pass
957 -
958 - if mydigests is not None and myfile in mydigests:
959 - try:
960 - mystat = os.stat(myfile_path)
961 - except OSError as e:
962 - if e.errno not in (errno.ENOENT, errno.ESTALE):
963 - raise
964 - del e
965 - fetched = 0
966 - else:
967 -
968 - if stat.S_ISDIR(mystat.st_mode):
969 - # This can happen if FETCHCOMMAND erroneously
970 - # contains wget's -P option where it should
971 - # instead have -O.
972 - portage.util.writemsg_level(
973 - _("!!! The command specified in the "
974 - "%s variable appears to have\n!!! "
975 - "created a directory instead of a "
976 - "normal file.\n") % command_var,
977 - level=logging.ERROR, noiselevel=-1)
978 - portage.util.writemsg_level(
979 - _("!!! Refer to the make.conf(5) "
980 - "man page for information about how "
981 - "to\n!!! correctly specify "
982 - "FETCHCOMMAND and RESUMECOMMAND.\n"),
983 - level=logging.ERROR, noiselevel=-1)
984 - return 0
985 -
986 - # no exception? file exists. let digestcheck() report
987 - # an appropriately for size or checksum errors
988 -
989 - # If the fetcher reported success and the file is
990 - # too small, it's probably because the digest is
991 - # bad (upstream changed the distfile). In this
992 - # case we don't want to attempt to resume. Show a
993 - # digest verification failure to that the user gets
994 - # a clue about what just happened.
995 - if myret != os.EX_OK and \
996 - mystat.st_size < mydigests[myfile]["size"]:
997 - # Fetch failed... Try the next one... Kill 404 files though.
998 - if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
999 - html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
1000 - if html404.search(codecs.open(
1001 - _unicode_encode(myfile_path,
1002 - encoding=_encodings['fs'], errors='strict'),
1003 - mode='r', encoding=_encodings['content'], errors='replace'
1004 - ).read()):
1005 - try:
1006 - os.unlink(mysettings["DISTDIR"]+"/"+myfile)
1007 - writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n"))
1008 - fetched = 0
1009 - continue
1010 - except (IOError, OSError):
1011 - pass
1012 - fetched = 1
1013 - continue
1014 - if True:
1015 - # File is the correct size--check the checksums for the fetched
1016 - # file NOW, for those users who don't have a stable/continuous
1017 - # net connection. This way we have a chance to try to download
1018 - # from another mirror...
1019 - verified_ok,reason = portage.checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
1020 - if not verified_ok:
1021 - print(reason)
1022 - writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
1023 - noiselevel=-1)
1024 - writemsg(_("!!! Reason: %s\n") % reason[0],
1025 - noiselevel=-1)
1026 - writemsg(_("!!! Got: %s\n!!! Expected: %s\n") % \
1027 - (reason[1], reason[2]), noiselevel=-1)
1028 - if reason[0] == _("Insufficient data for checksum verification"):
1029 - return 0
1030 - temp_filename = \
1031 - _checksum_failure_temp_file(
1032 - mysettings["DISTDIR"], myfile)
1033 - writemsg_stdout(_("Refetching... "
1034 - "File renamed to '%s'\n\n") % \
1035 - temp_filename, noiselevel=-1)
1036 - fetched=0
1037 - checksum_failure_count += 1
1038 - if checksum_failure_count == \
1039 - checksum_failure_primaryuri:
1040 - # Switch to "primaryuri" mode in order
1041 - # to increase the probablility of
1042 - # of success.
1043 - primaryuris = \
1044 - primaryuri_dict.get(myfile)
1045 - if primaryuris:
1046 - uri_list.extend(
1047 - reversed(primaryuris))
1048 - if checksum_failure_count >= \
1049 - checksum_failure_max_tries:
1050 - break
1051 - else:
1052 - eout = portage.output.EOutput()
1053 - eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
1054 - digests = mydigests.get(myfile)
1055 - if digests:
1056 - eout.ebegin("%s %s ;-)" % \
1057 - (myfile, " ".join(sorted(digests))))
1058 - eout.eend(0)
1059 - fetched=2
1060 - break
1061 - else:
1062 - if not myret:
1063 - fetched=2
1064 - break
1065 - elif mydigests!=None:
1066 - writemsg(_("No digest file available and download failed.\n\n"),
1067 - noiselevel=-1)
1068 - finally:
1069 - if use_locks and file_lock:
1070 - portage.locks.unlockfile(file_lock)
1071 -
1072 - if listonly:
1073 - writemsg_stdout("\n", noiselevel=-1)
1074 - if fetched != 2:
1075 - if restrict_fetch and not restrict_fetch_msg:
1076 - restrict_fetch_msg = True
1077 - msg = _("\n!!! %s/%s"
1078 - " has fetch restriction turned on.\n"
1079 - "!!! This probably means that this "
1080 - "ebuild's files must be downloaded\n"
1081 - "!!! manually. See the comments in"
1082 - " the ebuild for more information.\n\n") % \
1083 - (mysettings["CATEGORY"], mysettings["PF"])
1084 - portage.util.writemsg_level(msg,
1085 - level=logging.ERROR, noiselevel=-1)
1086 - have_builddir = "PORTAGE_BUILDDIR" in mysettings and \
1087 - os.path.isdir(mysettings["PORTAGE_BUILDDIR"])
1088 -
1089 - global_tmpdir = mysettings["PORTAGE_TMPDIR"]
1090 - private_tmpdir = None
1091 - if not parallel_fetchonly and not have_builddir:
1092 - # When called by digestgen(), it's normal that
1093 - # PORTAGE_BUILDDIR doesn't exist. It's helpful
1094 - # to show the pkg_nofetch output though, so go
1095 - # ahead and create a temporary PORTAGE_BUILDDIR.
1096 - # Use a temporary config instance to avoid altering
1097 - # the state of the one that's been passed in.
1098 - mysettings = config(clone=mysettings)
1099 - from tempfile import mkdtemp
1100 - try:
1101 - private_tmpdir = mkdtemp("", "._portage_fetch_.",
1102 - global_tmpdir)
1103 - except OSError as e:
1104 - if e.errno != portage.exception.PermissionDenied.errno:
1105 - raise
1106 - raise portage.exception.PermissionDenied(global_tmpdir)
1107 - mysettings["PORTAGE_TMPDIR"] = private_tmpdir
1108 - mysettings.backup_changes("PORTAGE_TMPDIR")
1109 - debug = mysettings.get("PORTAGE_DEBUG") == "1"
1110 - portage.doebuild_environment(mysettings["EBUILD"], "fetch",
1111 - mysettings["ROOT"], mysettings, debug, 1, None)
1112 - prepare_build_dirs(mysettings["ROOT"], mysettings, 0)
1113 - have_builddir = True
1114 -
1115 - if not parallel_fetchonly and have_builddir:
1116 - # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1117 - # ensuring sane $PWD (bug #239560) and storing elog
1118 - # messages. Therefore, calling code needs to ensure that
1119 - # PORTAGE_BUILDDIR is already clean and locked here.
1120 -
1121 - # All the pkg_nofetch goes to stderr since it's considered
1122 - # to be an error message.
1123 - fd_pipes = {
1124 - 0 : sys.stdin.fileno(),
1125 - 1 : sys.stderr.fileno(),
1126 - 2 : sys.stderr.fileno(),
1127 - }
1128 -
1129 - ebuild_phase = mysettings.get("EBUILD_PHASE")
1130 - try:
1131 - mysettings["EBUILD_PHASE"] = "nofetch"
1132 - spawn(_shell_quote(EBUILD_SH_BINARY) + \
1133 - " nofetch", mysettings, fd_pipes=fd_pipes)
1134 - finally:
1135 - if ebuild_phase is None:
1136 - mysettings.pop("EBUILD_PHASE", None)
1137 - else:
1138 - mysettings["EBUILD_PHASE"] = ebuild_phase
1139 - if private_tmpdir is not None:
1140 - shutil.rmtree(private_tmpdir)
1141 -
1142 - elif restrict_fetch:
1143 - pass
1144 - elif listonly:
1145 - pass
1146 - elif not filedict[myfile]:
1147 - writemsg(_("Warning: No mirrors available for file"
1148 - " '%s'\n") % (myfile), noiselevel=-1)
1149 - else:
1150 - writemsg(_("!!! Couldn't download '%s'. Aborting.\n") % myfile,
1151 - noiselevel=-1)
1152 -
1153 - if listonly:
1154 - continue
1155 - elif fetchonly:
1156 - failed_files.add(myfile)
1157 - continue
1158 - return 0
1159 - if failed_files:
1160 - return 0
1161 - return 1
1162 -
1163 def digestgen(myarchives, mysettings, overwrite=1, manifestonly=0, myportdb=None):
1164 """
1165 Generates a digest file if missing. Assumes all files are available.
1166
1167 Modified: main/trunk/pym/portage/dbapi/bintree.py
1168 ===================================================================
1169 --- main/trunk/pym/portage/dbapi/bintree.py 2010-02-22 04:13:28 UTC (rev 15424)
1170 +++ main/trunk/pym/portage/dbapi/bintree.py 2010-02-22 04:56:30 UTC (rev 15425)
1171 @@ -21,11 +21,12 @@
1172 PermissionDenied, PortageException
1173 from portage.localization import _
1174
1175 -from portage import dep_expand, listdir, _check_distfile, _movefile
1176 +from portage import dep_expand, listdir, _movefile
1177 from portage import os
1178 from portage import _encodings
1179 from portage import _unicode_decode
1180 from portage import _unicode_encode
1181 +from portage.package.ebuild.fetch import _check_distfile
1182
1183 import codecs
1184 import errno
1185
1186 Added: main/trunk/pym/portage/package/ebuild/fetch.py
1187 ===================================================================
1188 --- main/trunk/pym/portage/package/ebuild/fetch.py (rev 0)
1189 +++ main/trunk/pym/portage/package/ebuild/fetch.py 2010-02-22 04:56:30 UTC (rev 15425)
1190 @@ -0,0 +1,1123 @@
1191 +# Copyright 2010 Gentoo Foundation
1192 +# Distributed under the terms of the GNU General Public License v2
1193 +# $Id$
1194 +
1195 +from __future__ import print_function
1196 +
1197 +__all__ = ['fetch']
1198 +
1199 +import codecs
1200 +import errno
1201 +import logging
1202 +import random
1203 +import re
1204 +import shutil
1205 +import stat
1206 +import sys
1207 +
1208 +from portage import check_config_instance, doebuild_environment, OrderedDict, os, prepare_build_dirs, selinux, _encodings, _shell_quote, _unicode_encode
1209 +from portage.checksum import perform_md5, verify_all
1210 +from portage.const import BASH_BINARY, CUSTOM_MIRRORS_FILE, EBUILD_SH_BINARY, GLOBAL_CONFIG_PATH
1211 +from portage.data import portage_gid, portage_uid, secpass, userpriv_groups
1212 +from portage.exception import FileNotFound, OperationNotPermitted, PermissionDenied, PortageException, TryAgain
1213 +from portage.localization import _
1214 +from portage.locks import lockfile, unlockfile
1215 +from portage.manifest import Manifest
1216 +from portage.output import colorize, EOutput
1217 +from portage.package.ebuild.config import config
1218 +from portage.util import apply_recursive_permissions, apply_secpass_permissions, ensure_dirs, grabdict, shlex_split, varexpand, writemsg, writemsg_level, writemsg_stdout
1219 +from portage.process import spawn
1220 +
1221 +_userpriv_spawn_kwargs = (
1222 + ("uid", portage_uid),
1223 + ("gid", portage_gid),
1224 + ("groups", userpriv_groups),
1225 + ("umask", 0o02),
1226 +)
1227 +
1228 +def _spawn_fetch(settings, args, **kwargs):
1229 + """
1230 + Spawn a process with appropriate settings for fetching, including
1231 + userfetch and selinux support.
1232 + """
1233 +
1234 + global _userpriv_spawn_kwargs
1235 +
1236 + # Redirect all output to stdout since some fetchers like
1237 + # wget pollute stderr (if portage detects a problem then it
1238 + # can send it's own message to stderr).
1239 + if "fd_pipes" not in kwargs:
1240 +
1241 + kwargs["fd_pipes"] = {
1242 + 0 : sys.stdin.fileno(),
1243 + 1 : sys.stdout.fileno(),
1244 + 2 : sys.stdout.fileno(),
1245 + }
1246 +
1247 + if "userfetch" in settings.features and \
1248 + os.getuid() == 0 and portage_gid and portage_uid:
1249 + kwargs.update(_userpriv_spawn_kwargs)
1250 +
1251 + spawn_func = spawn
1252 +
1253 + if settings.selinux_enabled():
1254 + spawn_func = selinux.spawn_wrapper(spawn_func,
1255 + settings["PORTAGE_FETCH_T"])
1256 +
1257 + # bash is an allowed entrypoint, while most binaries are not
1258 + if args[0] != BASH_BINARY:
1259 + args = [BASH_BINARY, "-c", "exec \"$@\"", args[0]] + args
1260 +
1261 + rval = spawn_func(args, env=settings.environ(), **kwargs)
1262 +
1263 + return rval
1264 +
1265 +_userpriv_test_write_file_cache = {}
1266 +_userpriv_test_write_cmd_script = "touch %(file_path)s 2>/dev/null ; rval=$? ; " + \
1267 + "rm -f %(file_path)s ; exit $rval"
1268 +
1269 +def _userpriv_test_write_file(settings, file_path):
1270 + """
1271 + Drop privileges and try to open a file for writing. The file may or
1272 + may not exist, and the parent directory is assumed to exist. The file
1273 + is removed before returning.
1274 +
1275 + @param settings: A config instance which is passed to _spawn_fetch()
1276 + @param file_path: A file path to open and write.
1277 + @return: True if write succeeds, False otherwise.
1278 + """
1279 +
1280 + global _userpriv_test_write_file_cache, _userpriv_test_write_cmd_script
1281 + rval = _userpriv_test_write_file_cache.get(file_path)
1282 + if rval is not None:
1283 + return rval
1284 +
1285 + args = [BASH_BINARY, "-c", _userpriv_test_write_cmd_script % \
1286 + {"file_path" : _shell_quote(file_path)}]
1287 +
1288 + returncode = _spawn_fetch(settings, args)
1289 +
1290 + rval = returncode == os.EX_OK
1291 + _userpriv_test_write_file_cache[file_path] = rval
1292 + return rval
1293 +
1294 +def _checksum_failure_temp_file(distdir, basename):
1295 + """
1296 + First try to find a duplicate temp file with the same checksum and return
1297 + that filename if available. Otherwise, use mkstemp to create a new unique
1298 + filename._checksum_failure_.$RANDOM, rename the given file, and return the
1299 + new filename. In any case, filename will be renamed or removed before this
1300 + function returns a temp filename.
1301 + """
1302 +
1303 + filename = os.path.join(distdir, basename)
1304 + size = os.stat(filename).st_size
1305 + checksum = None
1306 + tempfile_re = re.compile(re.escape(basename) + r'\._checksum_failure_\..*')
1307 + for temp_filename in os.listdir(distdir):
1308 + if not tempfile_re.match(temp_filename):
1309 + continue
1310 + temp_filename = os.path.join(distdir, temp_filename)
1311 + try:
1312 + if size != os.stat(temp_filename).st_size:
1313 + continue
1314 + except OSError:
1315 + continue
1316 + try:
1317 + temp_checksum = perform_md5(temp_filename)
1318 + except FileNotFound:
1319 + # Apparently the temp file disappeared. Let it go.
1320 + continue
1321 + if checksum is None:
1322 + checksum = perform_md5(filename)
1323 + if checksum == temp_checksum:
1324 + os.unlink(filename)
1325 + return temp_filename
1326 +
1327 + from tempfile import mkstemp
1328 + fd, temp_filename = mkstemp("", basename + "._checksum_failure_.", distdir)
1329 + os.close(fd)
1330 + os.rename(filename, temp_filename)
1331 + return temp_filename
1332 +
1333 +def _check_digests(filename, digests, show_errors=1):
1334 + """
1335 + Check digests and display a message if an error occurs.
1336 + @return True if all digests match, False otherwise.
1337 + """
1338 + verified_ok, reason = verify_all(filename, digests)
1339 + if not verified_ok:
1340 + if show_errors:
1341 + writemsg(_("!!! Previously fetched"
1342 + " file: '%s'\n") % filename, noiselevel=-1)
1343 + writemsg(_("!!! Reason: %s\n") % reason[0],
1344 + noiselevel=-1)
1345 + writemsg(_("!!! Got: %s\n"
1346 + "!!! Expected: %s\n") % \
1347 + (reason[1], reason[2]), noiselevel=-1)
1348 + return False
1349 + return True
1350 +
1351 +def _check_distfile(filename, digests, eout, show_errors=1):
1352 + """
1353 + @return a tuple of (match, stat_obj) where match is True if filename
1354 + matches all given digests (if any) and stat_obj is a stat result, or
1355 + None if the file does not exist.
1356 + """
1357 + if digests is None:
1358 + digests = {}
1359 + size = digests.get("size")
1360 + if size is not None and len(digests) == 1:
1361 + digests = None
1362 +
1363 + try:
1364 + st = os.stat(filename)
1365 + except OSError:
1366 + return (False, None)
1367 + if size is not None and size != st.st_size:
1368 + return (False, st)
1369 + if not digests:
1370 + if size is not None:
1371 + eout.ebegin(_("%s size ;-)") % os.path.basename(filename))
1372 + eout.eend(0)
1373 + elif st.st_size == 0:
1374 + # Zero-byte distfiles are always invalid.
1375 + return (False, st)
1376 + else:
1377 + if _check_digests(filename, digests, show_errors=show_errors):
1378 + eout.ebegin("%s %s ;-)" % (os.path.basename(filename),
1379 + " ".join(sorted(digests))))
1380 + eout.eend(0)
1381 + else:
1382 + return (False, st)
1383 + return (True, st)
1384 +
1385 +_fetch_resume_size_re = re.compile('(^[\d]+)([KMGTPEZY]?$)')
1386 +
1387 +_size_suffix_map = {
1388 + '' : 0,
1389 + 'K' : 10,
1390 + 'M' : 20,
1391 + 'G' : 30,
1392 + 'T' : 40,
1393 + 'P' : 50,
1394 + 'E' : 60,
1395 + 'Z' : 70,
1396 + 'Y' : 80,
1397 +}
1398 +
1399 +def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks",use_locks=1, try_mirrors=1):
1400 + "fetch files. Will use digest file if available."
1401 +
1402 + if not myuris:
1403 + return 1
1404 +
1405 + features = mysettings.features
1406 + restrict = mysettings.get("PORTAGE_RESTRICT","").split()
1407 +
1408 + userfetch = secpass >= 2 and "userfetch" in features
1409 + userpriv = secpass >= 2 and "userpriv" in features
1410 +
1411 + # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
1412 + if "mirror" in restrict or \
1413 + "nomirror" in restrict:
1414 + if ("mirror" in features) and ("lmirror" not in features):
1415 + # lmirror should allow you to bypass mirror restrictions.
1416 + # XXX: This is not a good thing, and is temporary at best.
1417 + print(_(">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."))
1418 + return 1
1419 +
1420 + # Generally, downloading the same file repeatedly from
1421 + # every single available mirror is a waste of bandwidth
1422 + # and time, so there needs to be a cap.
1423 + checksum_failure_max_tries = 5
1424 + v = checksum_failure_max_tries
1425 + try:
1426 + v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS",
1427 + checksum_failure_max_tries))
1428 + except (ValueError, OverflowError):
1429 + writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
1430 + " contains non-integer value: '%s'\n") % \
1431 + mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1)
1432 + writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
1433 + "default value: %s\n") % checksum_failure_max_tries,
1434 + noiselevel=-1)
1435 + v = checksum_failure_max_tries
1436 + if v < 1:
1437 + writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
1438 + " contains value less than 1: '%s'\n") % v, noiselevel=-1)
1439 + writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
1440 + "default value: %s\n") % checksum_failure_max_tries,
1441 + noiselevel=-1)
1442 + v = checksum_failure_max_tries
1443 + checksum_failure_max_tries = v
1444 + del v
1445 +
1446 + fetch_resume_size_default = "350K"
1447 + fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE")
1448 + if fetch_resume_size is not None:
1449 + fetch_resume_size = "".join(fetch_resume_size.split())
1450 + if not fetch_resume_size:
1451 + # If it's undefined or empty, silently use the default.
1452 + fetch_resume_size = fetch_resume_size_default
1453 + match = _fetch_resume_size_re.match(fetch_resume_size)
1454 + if match is None or \
1455 + (match.group(2).upper() not in _size_suffix_map):
1456 + writemsg(_("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE"
1457 + " contains an unrecognized format: '%s'\n") % \
1458 + mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1)
1459 + writemsg(_("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE "
1460 + "default value: %s\n") % fetch_resume_size_default,
1461 + noiselevel=-1)
1462 + fetch_resume_size = None
1463 + if fetch_resume_size is None:
1464 + fetch_resume_size = fetch_resume_size_default
1465 + match = _fetch_resume_size_re.match(fetch_resume_size)
1466 + fetch_resume_size = int(match.group(1)) * \
1467 + 2 ** _size_suffix_map[match.group(2).upper()]
1468 +
1469 + # Behave like the package has RESTRICT="primaryuri" after a
1470 + # couple of checksum failures, to increase the probablility
1471 + # of success before checksum_failure_max_tries is reached.
1472 + checksum_failure_primaryuri = 2
1473 + thirdpartymirrors = mysettings.thirdpartymirrors()
1474 +
1475 + # In the background parallel-fetch process, it's safe to skip checksum
1476 + # verification of pre-existing files in $DISTDIR that have the correct
1477 + # file size. The parent process will verify their checksums prior to
1478 + # the unpack phase.
1479 +
1480 + parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings
1481 + if parallel_fetchonly:
1482 + fetchonly = 1
1483 +
1484 + check_config_instance(mysettings)
1485 +
1486 + custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
1487 + CUSTOM_MIRRORS_FILE), recursive=1)
1488 +
1489 + mymirrors=[]
1490 +
1491 + if listonly or ("distlocks" not in features):
1492 + use_locks = 0
1493 +
1494 + fetch_to_ro = 0
1495 + if "skiprocheck" in features:
1496 + fetch_to_ro = 1
1497 +
1498 + if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
1499 + if use_locks:
1500 + writemsg(colorize("BAD",
1501 + _("!!! For fetching to a read-only filesystem, "
1502 + "locking should be turned off.\n")), noiselevel=-1)
1503 + writemsg(_("!!! This can be done by adding -distlocks to "
1504 + "FEATURES in /etc/make.conf\n"), noiselevel=-1)
1505 +# use_locks = 0
1506 +
1507 + # local mirrors are always added
1508 + if "local" in custommirrors:
1509 + mymirrors += custommirrors["local"]
1510 +
1511 + if "nomirror" in restrict or \
1512 + "mirror" in restrict:
1513 + # We don't add any mirrors.
1514 + pass
1515 + else:
1516 + if try_mirrors:
1517 + mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]
1518 +
1519 + skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1"
1520 + pkgdir = mysettings.get("O")
1521 + if not (pkgdir is None or skip_manifest):
1522 + mydigests = Manifest(
1523 + pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
1524 + else:
1525 + # no digests because fetch was not called for a specific package
1526 + mydigests = {}
1527 +
1528 + ro_distdirs = [x for x in \
1529 + shlex_split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \
1530 + if os.path.isdir(x)]
1531 +
1532 + fsmirrors = []
1533 + for x in range(len(mymirrors)-1,-1,-1):
1534 + if mymirrors[x] and mymirrors[x][0]=='/':
1535 + fsmirrors += [mymirrors[x]]
1536 + del mymirrors[x]
1537 +
1538 + restrict_fetch = "fetch" in restrict
1539 + custom_local_mirrors = custommirrors.get("local", [])
1540 + if restrict_fetch:
1541 + # With fetch restriction, a normal uri may only be fetched from
1542 + # custom local mirrors (if available). A mirror:// uri may also
1543 + # be fetched from specific mirrors (effectively overriding fetch
1544 + # restriction, but only for specific mirrors).
1545 + locations = custom_local_mirrors
1546 + else:
1547 + locations = mymirrors
1548 +
1549 + file_uri_tuples = []
1550 + # Check for 'items' attribute since OrderedDict is not a dict.
1551 + if hasattr(myuris, 'items'):
1552 + for myfile, uri_set in myuris.items():
1553 + for myuri in uri_set:
1554 + file_uri_tuples.append((myfile, myuri))
1555 + else:
1556 + for myuri in myuris:
1557 + file_uri_tuples.append((os.path.basename(myuri), myuri))
1558 +
1559 + filedict = OrderedDict()
1560 + primaryuri_indexes={}
1561 + primaryuri_dict = {}
1562 + thirdpartymirror_uris = {}
1563 + for myfile, myuri in file_uri_tuples:
1564 + if myfile not in filedict:
1565 + filedict[myfile]=[]
1566 + for y in range(0,len(locations)):
1567 + filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
1568 + if myuri[:9]=="mirror://":
1569 + eidx = myuri.find("/", 9)
1570 + if eidx != -1:
1571 + mirrorname = myuri[9:eidx]
1572 + path = myuri[eidx+1:]
1573 +
1574 + # Try user-defined mirrors first
1575 + if mirrorname in custommirrors:
1576 + for cmirr in custommirrors[mirrorname]:
1577 + filedict[myfile].append(
1578 + cmirr.rstrip("/") + "/" + path)
1579 +
1580 + # now try the official mirrors
1581 + if mirrorname in thirdpartymirrors:
1582 + random.shuffle(thirdpartymirrors[mirrorname])
1583 +
1584 + uris = [locmirr.rstrip("/") + "/" + path \
1585 + for locmirr in thirdpartymirrors[mirrorname]]
1586 + filedict[myfile].extend(uris)
1587 + thirdpartymirror_uris.setdefault(myfile, []).extend(uris)
1588 +
1589 + if not filedict[myfile]:
1590 + writemsg(_("No known mirror by the name: %s\n") % (mirrorname))
1591 + else:
1592 + writemsg(_("Invalid mirror definition in SRC_URI:\n"), noiselevel=-1)
1593 + writemsg(" %s\n" % (myuri), noiselevel=-1)
1594 + else:
1595 + if restrict_fetch:
1596 + # Only fetch from specific mirrors is allowed.
1597 + continue
1598 + if "primaryuri" in restrict:
1599 + # Use the source site first.
1600 + if myfile in primaryuri_indexes:
1601 + primaryuri_indexes[myfile] += 1
1602 + else:
1603 + primaryuri_indexes[myfile] = 0
1604 + filedict[myfile].insert(primaryuri_indexes[myfile], myuri)
1605 + else:
1606 + filedict[myfile].append(myuri)
1607 + primaryuris = primaryuri_dict.get(myfile)
1608 + if primaryuris is None:
1609 + primaryuris = []
1610 + primaryuri_dict[myfile] = primaryuris
1611 + primaryuris.append(myuri)
1612 +
1613 + # Prefer thirdpartymirrors over normal mirrors in cases when
1614 + # the file does not yet exist on the normal mirrors.
1615 + for myfile, uris in thirdpartymirror_uris.items():
1616 + primaryuri_dict.setdefault(myfile, []).extend(uris)
1617 +
1618 + can_fetch=True
1619 +
1620 + if listonly:
1621 + can_fetch = False
1622 +
1623 + if can_fetch and not fetch_to_ro:
1624 + global _userpriv_test_write_file_cache
1625 + dirmode = 0o2070
1626 + filemode = 0o60
1627 + modemask = 0o2
1628 + dir_gid = portage_gid
1629 + if "FAKED_MODE" in mysettings:
1630 + # When inside fakeroot, directories with portage's gid appear
1631 + # to have root's gid. Therefore, use root's gid instead of
1632 + # portage's gid to avoid spurrious permissions adjustments
1633 + # when inside fakeroot.
1634 + dir_gid = 0
1635 + distdir_dirs = [""]
1636 + if "distlocks" in features:
1637 + distdir_dirs.append(".locks")
1638 + try:
1639 +
1640 + for x in distdir_dirs:
1641 + mydir = os.path.join(mysettings["DISTDIR"], x)
1642 + write_test_file = os.path.join(
1643 + mydir, ".__portage_test_write__")
1644 +
1645 + try:
1646 + st = os.stat(mydir)
1647 + except OSError:
1648 + st = None
1649 +
1650 + if st is not None and stat.S_ISDIR(st.st_mode):
1651 + if not (userfetch or userpriv):
1652 + continue
1653 + if _userpriv_test_write_file(mysettings, write_test_file):
1654 + continue
1655 +
1656 + _userpriv_test_write_file_cache.pop(write_test_file, None)
1657 + if ensure_dirs(mydir, gid=dir_gid, mode=dirmode, mask=modemask):
1658 + if st is None:
1659 + # The directory has just been created
1660 + # and therefore it must be empty.
1661 + continue
1662 + writemsg(_("Adjusting permissions recursively: '%s'\n") % mydir,
1663 + noiselevel=-1)
1664 + def onerror(e):
1665 + raise # bail out on the first error that occurs during recursion
1666 + if not apply_recursive_permissions(mydir,
1667 + gid=dir_gid, dirmode=dirmode, dirmask=modemask,
1668 + filemode=filemode, filemask=modemask, onerror=onerror):
1669 + raise OperationNotPermitted(
1670 + _("Failed to apply recursive permissions for the portage group."))
1671 + except PortageException as e:
1672 + if not os.path.isdir(mysettings["DISTDIR"]):
1673 + writemsg("!!! %s\n" % str(e), noiselevel=-1)
1674 + writemsg(_("!!! Directory Not Found: DISTDIR='%s'\n") % mysettings["DISTDIR"], noiselevel=-1)
1675 + writemsg(_("!!! Fetching will fail!\n"), noiselevel=-1)
1676 +
1677 + if can_fetch and \
1678 + not fetch_to_ro and \
1679 + not os.access(mysettings["DISTDIR"], os.W_OK):
1680 + writemsg(_("!!! No write access to '%s'\n") % mysettings["DISTDIR"],
1681 + noiselevel=-1)
1682 + can_fetch = False
1683 +
1684 + if can_fetch and use_locks and locks_in_subdir:
1685 + distlocks_subdir = os.path.join(mysettings["DISTDIR"], locks_in_subdir)
1686 + if not os.access(distlocks_subdir, os.W_OK):
1687 + writemsg(_("!!! No write access to write to %s. Aborting.\n") % distlocks_subdir,
1688 + noiselevel=-1)
1689 + return 0
1690 + del distlocks_subdir
1691 +
1692 + distdir_writable = can_fetch and not fetch_to_ro
1693 + failed_files = set()
1694 + restrict_fetch_msg = False
1695 +
1696 + for myfile in filedict:
1697 + """
1698 + fetched status
1699 + 0 nonexistent
1700 + 1 partially downloaded
1701 + 2 completely downloaded
1702 + """
1703 + fetched = 0
1704 +
1705 + orig_digests = mydigests.get(myfile, {})
1706 + size = orig_digests.get("size")
1707 + if size == 0:
1708 + # Zero-byte distfiles are always invalid, so discard their digests.
1709 + del mydigests[myfile]
1710 + orig_digests.clear()
1711 + size = None
1712 + pruned_digests = orig_digests
1713 + if parallel_fetchonly:
1714 + pruned_digests = {}
1715 + if size is not None:
1716 + pruned_digests["size"] = size
1717 +
1718 + myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
1719 + has_space = True
1720 + has_space_superuser = True
1721 + file_lock = None
1722 + if listonly:
1723 + writemsg_stdout("\n", noiselevel=-1)
1724 + else:
1725 + # check if there is enough space in DISTDIR to completely store myfile
1726 + # overestimate the filesize so we aren't bitten by FS overhead
1727 + if size is not None and hasattr(os, "statvfs"):
1728 + vfs_stat = os.statvfs(mysettings["DISTDIR"])
1729 + try:
1730 + mysize = os.stat(myfile_path).st_size
1731 + except OSError as e:
1732 + if e.errno not in (errno.ENOENT, errno.ESTALE):
1733 + raise
1734 + del e
1735 + mysize = 0
1736 + if (size - mysize + vfs_stat.f_bsize) >= \
1737 + (vfs_stat.f_bsize * vfs_stat.f_bavail):
1738 +
1739 + if (size - mysize + vfs_stat.f_bsize) >= \
1740 + (vfs_stat.f_bsize * vfs_stat.f_bfree):
1741 + has_space_superuser = False
1742 +
1743 + if not has_space_superuser:
1744 + has_space = False
1745 + elif secpass < 2:
1746 + has_space = False
1747 + elif userfetch:
1748 + has_space = False
1749 +
1750 + if not has_space:
1751 + writemsg(_("!!! Insufficient space to store %s in %s\n") % \
1752 + (myfile, mysettings["DISTDIR"]), noiselevel=-1)
1753 +
1754 + if has_space_superuser:
1755 + writemsg(_("!!! Insufficient privileges to use "
1756 + "remaining space.\n"), noiselevel=-1)
1757 + if userfetch:
1758 + writemsg(_("!!! You may set FEATURES=\"-userfetch\""
1759 + " in /etc/make.conf in order to fetch with\n"
1760 + "!!! superuser privileges.\n"), noiselevel=-1)
1761 +
1762 + if distdir_writable and use_locks:
1763 +
1764 + lock_kwargs = {}
1765 + if fetchonly:
1766 + lock_kwargs["flags"] = os.O_NONBLOCK
1767 +
1768 + try:
1769 + file_lock = lockfile(myfile_path,
1770 + wantnewlockfile=1, **lock_kwargs)
1771 + except TryAgain:
1772 + writemsg(_(">>> File '%s' is already locked by "
1773 + "another fetcher. Continuing...\n") % myfile,
1774 + noiselevel=-1)
1775 + continue
1776 + try:
1777 + if not listonly:
1778 +
1779 + eout = EOutput()
1780 + eout.quiet = mysettings.get("PORTAGE_QUIET") == "1"
1781 + match, mystat = _check_distfile(
1782 + myfile_path, pruned_digests, eout)
1783 + if match:
1784 + if distdir_writable:
1785 + try:
1786 + apply_secpass_permissions(myfile_path,
1787 + gid=portage_gid, mode=0o664, mask=0o2,
1788 + stat_cached=mystat)
1789 + except PortageException as e:
1790 + if not os.access(myfile_path, os.R_OK):
1791 + writemsg(_("!!! Failed to adjust permissions:"
1792 + " %s\n") % str(e), noiselevel=-1)
1793 + del e
1794 + continue
1795 +
1796 + if distdir_writable and mystat is None:
1797 + # Remove broken symlinks if necessary.
1798 + try:
1799 + os.unlink(myfile_path)
1800 + except OSError:
1801 + pass
1802 +
1803 + if mystat is not None:
1804 + if stat.S_ISDIR(mystat.st_mode):
1805 + writemsg_level(
1806 + _("!!! Unable to fetch file since "
1807 + "a directory is in the way: \n"
1808 + "!!! %s\n") % myfile_path,
1809 + level=logging.ERROR, noiselevel=-1)
1810 + return 0
1811 +
1812 + if mystat.st_size == 0:
1813 + if distdir_writable:
1814 + try:
1815 + os.unlink(myfile_path)
1816 + except OSError:
1817 + pass
1818 + elif distdir_writable:
1819 + if mystat.st_size < fetch_resume_size and \
1820 + mystat.st_size < size:
1821 + # If the file already exists and the size does not
1822 + # match the existing digests, it may be that the
1823 + # user is attempting to update the digest. In this
1824 + # case, the digestgen() function will advise the
1825 + # user to use `ebuild --force foo.ebuild manifest`
1826 + # in order to force the old digests to be replaced.
1827 + # Since the user may want to keep this file, rename
1828 + # it instead of deleting it.
1829 + writemsg(_(">>> Renaming distfile with size "
1830 + "%d (smaller than " "PORTAGE_FETCH_RESU"
1831 + "ME_MIN_SIZE)\n") % mystat.st_size)
1832 + temp_filename = \
1833 + _checksum_failure_temp_file(
1834 + mysettings["DISTDIR"], myfile)
1835 + writemsg_stdout(_("Refetching... "
1836 + "File renamed to '%s'\n\n") % \
1837 + temp_filename, noiselevel=-1)
1838 + elif mystat.st_size >= size:
1839 + temp_filename = \
1840 + _checksum_failure_temp_file(
1841 + mysettings["DISTDIR"], myfile)
1842 + writemsg_stdout(_("Refetching... "
1843 + "File renamed to '%s'\n\n") % \
1844 + temp_filename, noiselevel=-1)
1845 +
1846 + if distdir_writable and ro_distdirs:
1847 + readonly_file = None
1848 + for x in ro_distdirs:
1849 + filename = os.path.join(x, myfile)
1850 + match, mystat = _check_distfile(
1851 + filename, pruned_digests, eout)
1852 + if match:
1853 + readonly_file = filename
1854 + break
1855 + if readonly_file is not None:
1856 + try:
1857 + os.unlink(myfile_path)
1858 + except OSError as e:
1859 + if e.errno not in (errno.ENOENT, errno.ESTALE):
1860 + raise
1861 + del e
1862 + os.symlink(readonly_file, myfile_path)
1863 + continue
1864 +
1865 + if fsmirrors and not os.path.exists(myfile_path) and has_space:
1866 + for mydir in fsmirrors:
1867 + mirror_file = os.path.join(mydir, myfile)
1868 + try:
1869 + shutil.copyfile(mirror_file, myfile_path)
1870 + writemsg(_("Local mirror has file: %s\n") % myfile)
1871 + break
1872 + except (IOError, OSError) as e:
1873 + if e.errno not in (errno.ENOENT, errno.ESTALE):
1874 + raise
1875 + del e
1876 +
1877 + try:
1878 + mystat = os.stat(myfile_path)
1879 + except OSError as e:
1880 + if e.errno not in (errno.ENOENT, errno.ESTALE):
1881 + raise
1882 + del e
1883 + else:
1884 + try:
1885 + apply_secpass_permissions(
1886 + myfile_path, gid=portage_gid, mode=0o664, mask=0o2,
1887 + stat_cached=mystat)
1888 + except PortageException as e:
1889 + if not os.access(myfile_path, os.R_OK):
1890 + writemsg(_("!!! Failed to adjust permissions:"
1891 + " %s\n") % str(e), noiselevel=-1)
1892 +
1893 + # If the file is empty then it's obviously invalid. Remove
1894 + # the empty file and try to download if possible.
1895 + if mystat.st_size == 0:
1896 + if distdir_writable:
1897 + try:
1898 + os.unlink(myfile_path)
1899 + except EnvironmentError:
1900 + pass
1901 + elif myfile not in mydigests:
1902 + # We don't have a digest, but the file exists. We must
1903 + # assume that it is fully downloaded.
1904 + continue
1905 + else:
1906 + if mystat.st_size < mydigests[myfile]["size"] and \
1907 + not restrict_fetch:
1908 + fetched = 1 # Try to resume this download.
1909 + elif parallel_fetchonly and \
1910 + mystat.st_size == mydigests[myfile]["size"]:
1911 + eout = EOutput()
1912 + eout.quiet = \
1913 + mysettings.get("PORTAGE_QUIET") == "1"
1914 + eout.ebegin(
1915 + "%s size ;-)" % (myfile, ))
1916 + eout.eend(0)
1917 + continue
1918 + else:
1919 + verified_ok, reason = verify_all(
1920 + myfile_path, mydigests[myfile])
1921 + if not verified_ok:
1922 + writemsg(_("!!! Previously fetched"
1923 + " file: '%s'\n") % myfile, noiselevel=-1)
1924 + writemsg(_("!!! Reason: %s\n") % reason[0],
1925 + noiselevel=-1)
1926 + writemsg(_("!!! Got: %s\n"
1927 + "!!! Expected: %s\n") % \
1928 + (reason[1], reason[2]), noiselevel=-1)
1929 + if reason[0] == _("Insufficient data for checksum verification"):
1930 + return 0
1931 + if distdir_writable:
1932 + temp_filename = \
1933 + _checksum_failure_temp_file(
1934 + mysettings["DISTDIR"], myfile)
1935 + writemsg_stdout(_("Refetching... "
1936 + "File renamed to '%s'\n\n") % \
1937 + temp_filename, noiselevel=-1)
1938 + else:
1939 + eout = EOutput()
1940 + eout.quiet = \
1941 + mysettings.get("PORTAGE_QUIET", None) == "1"
1942 + digests = mydigests.get(myfile)
1943 + if digests:
1944 + digests = list(digests)
1945 + digests.sort()
1946 + eout.ebegin(
1947 + "%s %s ;-)" % (myfile, " ".join(digests)))
1948 + eout.eend(0)
1949 + continue # fetch any remaining files
1950 +
1951 + # Create a reversed list since that is optimal for list.pop().
1952 + uri_list = filedict[myfile][:]
1953 + uri_list.reverse()
1954 + checksum_failure_count = 0
1955 + tried_locations = set()
1956 + while uri_list:
1957 + loc = uri_list.pop()
1958 + # Eliminate duplicates here in case we've switched to
1959 + # "primaryuri" mode on the fly due to a checksum failure.
1960 + if loc in tried_locations:
1961 + continue
1962 + tried_locations.add(loc)
1963 + if listonly:
1964 + writemsg_stdout(loc+" ", noiselevel=-1)
1965 + continue
1966 + # allow different fetchcommands per protocol
1967 + protocol = loc[0:loc.find("://")]
1968 +
1969 + missing_file_param = False
1970 + fetchcommand_var = "FETCHCOMMAND_" + protocol.upper()
1971 + fetchcommand = mysettings.get(fetchcommand_var)
1972 + if fetchcommand is None:
1973 + fetchcommand_var = "FETCHCOMMAND"
1974 + fetchcommand = mysettings.get(fetchcommand_var)
1975 + if fetchcommand is None:
1976 + writemsg_level(
1977 + _("!!! %s is unset. It should "
1978 + "have been defined in\n!!! %s/make.globals.\n") \
1979 + % (fetchcommand_var, GLOBAL_CONFIG_PATH),
1980 + level=logging.ERROR, noiselevel=-1)
1981 + return 0
1982 + if "${FILE}" not in fetchcommand:
1983 + writemsg_level(
1984 + _("!!! %s does not contain the required ${FILE}"
1985 + " parameter.\n") % fetchcommand_var,
1986 + level=logging.ERROR, noiselevel=-1)
1987 + missing_file_param = True
1988 +
1989 + resumecommand_var = "RESUMECOMMAND_" + protocol.upper()
1990 + resumecommand = mysettings.get(resumecommand_var)
1991 + if resumecommand is None:
1992 + resumecommand_var = "RESUMECOMMAND"
1993 + resumecommand = mysettings.get(resumecommand_var)
1994 + if resumecommand is None:
1995 + writemsg_level(
1996 + _("!!! %s is unset. It should "
1997 + "have been defined in\n!!! %s/make.globals.\n") \
1998 + % (resumecommand_var, GLOBAL_CONFIG_PATH),
1999 + level=logging.ERROR, noiselevel=-1)
2000 + return 0
2001 + if "${FILE}" not in resumecommand:
2002 + writemsg_level(
2003 + _("!!! %s does not contain the required ${FILE}"
2004 + " parameter.\n") % resumecommand_var,
2005 + level=logging.ERROR, noiselevel=-1)
2006 + missing_file_param = True
2007 +
2008 + if missing_file_param:
2009 + writemsg_level(
2010 + _("!!! Refer to the make.conf(5) man page for "
2011 + "information about how to\n!!! correctly specify "
2012 + "FETCHCOMMAND and RESUMECOMMAND.\n"),
2013 + level=logging.ERROR, noiselevel=-1)
2014 + if myfile != os.path.basename(loc):
2015 + return 0
2016 +
2017 + if not can_fetch:
2018 + if fetched != 2:
2019 + try:
2020 + mysize = os.stat(myfile_path).st_size
2021 + except OSError as e:
2022 + if e.errno not in (errno.ENOENT, errno.ESTALE):
2023 + raise
2024 + del e
2025 + mysize = 0
2026 +
2027 + if mysize == 0:
2028 + writemsg(_("!!! File %s isn't fetched but unable to get it.\n") % myfile,
2029 + noiselevel=-1)
2030 + elif size is None or size > mysize:
2031 + writemsg(_("!!! File %s isn't fully fetched, but unable to complete it\n") % myfile,
2032 + noiselevel=-1)
2033 + else:
2034 + writemsg(_("!!! File %s is incorrect size, "
2035 + "but unable to retry.\n") % myfile, noiselevel=-1)
2036 + return 0
2037 + else:
2038 + continue
2039 +
2040 + if fetched != 2 and has_space:
2041 + #we either need to resume or start the download
2042 + if fetched == 1:
2043 + try:
2044 + mystat = os.stat(myfile_path)
2045 + except OSError as e:
2046 + if e.errno not in (errno.ENOENT, errno.ESTALE):
2047 + raise
2048 + del e
2049 + fetched = 0
2050 + else:
2051 + if mystat.st_size < fetch_resume_size:
2052 + writemsg(_(">>> Deleting distfile with size "
2053 + "%d (smaller than " "PORTAGE_FETCH_RESU"
2054 + "ME_MIN_SIZE)\n") % mystat.st_size)
2055 + try:
2056 + os.unlink(myfile_path)
2057 + except OSError as e:
2058 + if e.errno not in \
2059 + (errno.ENOENT, errno.ESTALE):
2060 + raise
2061 + del e
2062 + fetched = 0
2063 + if fetched == 1:
2064 + #resume mode:
2065 + writemsg(_(">>> Resuming download...\n"))
2066 + locfetch=resumecommand
2067 + command_var = resumecommand_var
2068 + else:
2069 + #normal mode:
2070 + locfetch=fetchcommand
2071 + command_var = fetchcommand_var
2072 + writemsg_stdout(_(">>> Downloading '%s'\n") % \
2073 + re.sub(r'//(.+):.+@(.+)/',r'//\1:*password*@\2/', loc))
2074 + variables = {
2075 + "DISTDIR": mysettings["DISTDIR"],
2076 + "URI": loc,
2077 + "FILE": myfile
2078 + }
2079 +
2080 + myfetch = shlex_split(locfetch)
2081 + myfetch = [varexpand(x, mydict=variables) for x in myfetch]
2082 + myret = -1
2083 + try:
2084 +
2085 + myret = _spawn_fetch(mysettings, myfetch)
2086 +
2087 + finally:
2088 + try:
2089 + apply_secpass_permissions(myfile_path,
2090 + gid=portage_gid, mode=0o664, mask=0o2)
2091 + except FileNotFound:
2092 + pass
2093 + except PortageException as e:
2094 + if not os.access(myfile_path, os.R_OK):
2095 + writemsg(_("!!! Failed to adjust permissions:"
2096 + " %s\n") % str(e), noiselevel=-1)
2097 + del e
2098 +
2099 + # If the file is empty then it's obviously invalid. Don't
2100 + # trust the return value from the fetcher. Remove the
2101 + # empty file and try to download again.
2102 + try:
2103 + if os.stat(myfile_path).st_size == 0:
2104 + os.unlink(myfile_path)
2105 + fetched = 0
2106 + continue
2107 + except EnvironmentError:
2108 + pass
2109 +
2110 + if mydigests is not None and myfile in mydigests:
2111 + try:
2112 + mystat = os.stat(myfile_path)
2113 + except OSError as e:
2114 + if e.errno not in (errno.ENOENT, errno.ESTALE):
2115 + raise
2116 + del e
2117 + fetched = 0
2118 + else:
2119 +
2120 + if stat.S_ISDIR(mystat.st_mode):
2121 + # This can happen if FETCHCOMMAND erroneously
2122 + # contains wget's -P option where it should
2123 + # instead have -O.
2124 + writemsg_level(
2125 + _("!!! The command specified in the "
2126 + "%s variable appears to have\n!!! "
2127 + "created a directory instead of a "
2128 + "normal file.\n") % command_var,
2129 + level=logging.ERROR, noiselevel=-1)
2130 + writemsg_level(
2131 + _("!!! Refer to the make.conf(5) "
2132 + "man page for information about how "
2133 + "to\n!!! correctly specify "
2134 + "FETCHCOMMAND and RESUMECOMMAND.\n"),
2135 + level=logging.ERROR, noiselevel=-1)
2136 + return 0
2137 +
2138 + # no exception? file exists. let digestcheck() report
2139 + # an appropriately for size or checksum errors
2140 +
2141 + # If the fetcher reported success and the file is
2142 + # too small, it's probably because the digest is
2143 + # bad (upstream changed the distfile). In this
2144 + # case we don't want to attempt to resume. Show a
2145 + # digest verification failure to that the user gets
2146 + # a clue about what just happened.
2147 + if myret != os.EX_OK and \
2148 + mystat.st_size < mydigests[myfile]["size"]:
2149 + # Fetch failed... Try the next one... Kill 404 files though.
2150 + if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
2151 + html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
2152 + if html404.search(codecs.open(
2153 + _unicode_encode(myfile_path,
2154 + encoding=_encodings['fs'], errors='strict'),
2155 + mode='r', encoding=_encodings['content'], errors='replace'
2156 + ).read()):
2157 + try:
2158 + os.unlink(mysettings["DISTDIR"]+"/"+myfile)
2159 + writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n"))
2160 + fetched = 0
2161 + continue
2162 + except (IOError, OSError):
2163 + pass
2164 + fetched = 1
2165 + continue
2166 + if True:
2167 + # File is the correct size--check the checksums for the fetched
2168 + # file NOW, for those users who don't have a stable/continuous
2169 + # net connection. This way we have a chance to try to download
2170 + # from another mirror...
2171 + verified_ok,reason = verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
2172 + if not verified_ok:
2173 + print(reason)
2174 + writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
2175 + noiselevel=-1)
2176 + writemsg(_("!!! Reason: %s\n") % reason[0],
2177 + noiselevel=-1)
2178 + writemsg(_("!!! Got: %s\n!!! Expected: %s\n") % \
2179 + (reason[1], reason[2]), noiselevel=-1)
2180 + if reason[0] == _("Insufficient data for checksum verification"):
2181 + return 0
2182 + temp_filename = \
2183 + _checksum_failure_temp_file(
2184 + mysettings["DISTDIR"], myfile)
2185 + writemsg_stdout(_("Refetching... "
2186 + "File renamed to '%s'\n\n") % \
2187 + temp_filename, noiselevel=-1)
2188 + fetched=0
2189 + checksum_failure_count += 1
2190 + if checksum_failure_count == \
2191 + checksum_failure_primaryuri:
2192 + # Switch to "primaryuri" mode in order
2193 + # to increase the probablility of
2194 + # of success.
2195 + primaryuris = \
2196 + primaryuri_dict.get(myfile)
2197 + if primaryuris:
2198 + uri_list.extend(
2199 + reversed(primaryuris))
2200 + if checksum_failure_count >= \
2201 + checksum_failure_max_tries:
2202 + break
2203 + else:
2204 + eout = EOutput()
2205 + eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
2206 + digests = mydigests.get(myfile)
2207 + if digests:
2208 + eout.ebegin("%s %s ;-)" % \
2209 + (myfile, " ".join(sorted(digests))))
2210 + eout.eend(0)
2211 + fetched=2
2212 + break
2213 + else:
2214 + if not myret:
2215 + fetched=2
2216 + break
2217 + elif mydigests!=None:
2218 + writemsg(_("No digest file available and download failed.\n\n"),
2219 + noiselevel=-1)
2220 + finally:
2221 + if use_locks and file_lock:
2222 + unlockfile(file_lock)
2223 +
2224 + if listonly:
2225 + writemsg_stdout("\n", noiselevel=-1)
2226 + if fetched != 2:
2227 + if restrict_fetch and not restrict_fetch_msg:
2228 + restrict_fetch_msg = True
2229 + msg = _("\n!!! %s/%s"
2230 + " has fetch restriction turned on.\n"
2231 + "!!! This probably means that this "
2232 + "ebuild's files must be downloaded\n"
2233 + "!!! manually. See the comments in"
2234 + " the ebuild for more information.\n\n") % \
2235 + (mysettings["CATEGORY"], mysettings["PF"])
2236 + writemsg_level(msg,
2237 + level=logging.ERROR, noiselevel=-1)
2238 + have_builddir = "PORTAGE_BUILDDIR" in mysettings and \
2239 + os.path.isdir(mysettings["PORTAGE_BUILDDIR"])
2240 +
2241 + global_tmpdir = mysettings["PORTAGE_TMPDIR"]
2242 + private_tmpdir = None
2243 + if not parallel_fetchonly and not have_builddir:
2244 + # When called by digestgen(), it's normal that
2245 + # PORTAGE_BUILDDIR doesn't exist. It's helpful
2246 + # to show the pkg_nofetch output though, so go
2247 + # ahead and create a temporary PORTAGE_BUILDDIR.
2248 + # Use a temporary config instance to avoid altering
2249 + # the state of the one that's been passed in.
2250 + mysettings = config(clone=mysettings)
2251 + from tempfile import mkdtemp
2252 + try:
2253 + private_tmpdir = mkdtemp("", "._portage_fetch_.",
2254 + global_tmpdir)
2255 + except OSError as e:
2256 + if e.errno != PermissionDenied.errno:
2257 + raise
2258 + raise PermissionDenied(global_tmpdir)
2259 + mysettings["PORTAGE_TMPDIR"] = private_tmpdir
2260 + mysettings.backup_changes("PORTAGE_TMPDIR")
2261 + debug = mysettings.get("PORTAGE_DEBUG") == "1"
2262 + doebuild_environment(mysettings["EBUILD"], "fetch",
2263 + mysettings["ROOT"], mysettings, debug, 1, None)
2264 + prepare_build_dirs(mysettings["ROOT"], mysettings, 0)
2265 + have_builddir = True
2266 +
2267 + if not parallel_fetchonly and have_builddir:
2268 + # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
2269 + # ensuring sane $PWD (bug #239560) and storing elog
2270 + # messages. Therefore, calling code needs to ensure that
2271 + # PORTAGE_BUILDDIR is already clean and locked here.
2272 +
2273 + # All the pkg_nofetch goes to stderr since it's considered
2274 + # to be an error message.
2275 + fd_pipes = {
2276 + 0 : sys.stdin.fileno(),
2277 + 1 : sys.stderr.fileno(),
2278 + 2 : sys.stderr.fileno(),
2279 + }
2280 +
2281 + ebuild_phase = mysettings.get("EBUILD_PHASE")
2282 + try:
2283 + mysettings["EBUILD_PHASE"] = "nofetch"
2284 + spawn(_shell_quote(EBUILD_SH_BINARY) + \
2285 + " nofetch", mysettings, fd_pipes=fd_pipes)
2286 + finally:
2287 + if ebuild_phase is None:
2288 + mysettings.pop("EBUILD_PHASE", None)
2289 + else:
2290 + mysettings["EBUILD_PHASE"] = ebuild_phase
2291 + if private_tmpdir is not None:
2292 + shutil.rmtree(private_tmpdir)
2293 +
2294 + elif restrict_fetch:
2295 + pass
2296 + elif listonly:
2297 + pass
2298 + elif not filedict[myfile]:
2299 + writemsg(_("Warning: No mirrors available for file"
2300 + " '%s'\n") % (myfile), noiselevel=-1)
2301 + else:
2302 + writemsg(_("!!! Couldn't download '%s'. Aborting.\n") % myfile,
2303 + noiselevel=-1)
2304 +
2305 + if listonly:
2306 + continue
2307 + elif fetchonly:
2308 + failed_files.add(myfile)
2309 + continue
2310 + return 0
2311 + if failed_files:
2312 + return 0
2313 + return 1
2314
2315
2316 Property changes on: main/trunk/pym/portage/package/ebuild/fetch.py
2317 ___________________________________________________________________
2318 Added: svn:keywords
2319 + Id