1 |
Author: zmedico |
2 |
Date: 2010-02-22 02:39:48 +0000 (Mon, 22 Feb 2010) |
3 |
New Revision: 15422 |
4 |
|
5 |
Added: |
6 |
main/trunk/pym/portage/util/__init__.py |
7 |
Removed: |
8 |
main/trunk/pym/portage/util.py |
9 |
Log: |
10 |
Move the portage.util module into a directory, for splitting into smaller files. |
11 |
|
12 |
|
13 |
Copied: main/trunk/pym/portage/util/__init__.py (from rev 15421, main/trunk/pym/portage/util.py) |
14 |
=================================================================== |
15 |
--- main/trunk/pym/portage/util/__init__.py (rev 0) |
16 |
+++ main/trunk/pym/portage/util/__init__.py 2010-02-22 02:39:48 UTC (rev 15422) |
17 |
@@ -0,0 +1,1429 @@ |
18 |
+# Copyright 2004-2009 Gentoo Foundation |
19 |
+# Distributed under the terms of the GNU General Public License v2 |
20 |
+# $Id$ |
21 |
+ |
22 |
+__all__ = ['apply_permissions', 'apply_recursive_permissions', |
23 |
+ 'apply_secpass_permissions', 'apply_stat_permissions', 'atomic_ofstream', |
24 |
+ 'cmp_sort_key', 'ConfigProtect', 'dump_traceback', 'ensure_dirs', |
25 |
+ 'find_updated_config_files', 'getconfig', 'getlibpaths', 'grabdict', |
26 |
+ 'grabdict_package', 'grabfile', 'grabfile_package', 'grablines', |
27 |
+ 'initialize_logger', 'LazyItemsDict', 'map_dictlist_vals', |
28 |
+ 'new_protect_filename', 'normalize_path', 'pickle_read', 'stack_dictlist', |
29 |
+ 'stack_dicts', 'stack_lists', 'unique_array', 'varexpand', 'write_atomic', |
30 |
+ 'writedict', 'writemsg', 'writemsg_level', 'writemsg_stdout'] |
31 |
+ |
32 |
+try: |
33 |
+ from subprocess import getstatusoutput as subprocess_getstatusoutput |
34 |
+except ImportError: |
35 |
+ from commands import getstatusoutput as subprocess_getstatusoutput |
36 |
+import codecs |
37 |
+import errno |
38 |
+import logging |
39 |
+import re |
40 |
+import shlex |
41 |
+import stat |
42 |
+import string |
43 |
+import sys |
44 |
+ |
45 |
+import portage |
46 |
+from portage import StringIO |
47 |
+from portage import os |
48 |
+from portage import _encodings |
49 |
+from portage import _os_merge |
50 |
+from portage import _unicode_encode |
51 |
+from portage import _unicode_decode |
52 |
+from portage.exception import InvalidAtom, PortageException, FileNotFound, \ |
53 |
+ OperationNotPermitted, PermissionDenied, ReadOnlyFileSystem |
54 |
+from portage.dep import Atom, isvalidatom |
55 |
+from portage.localization import _ |
56 |
+from portage.proxy.objectproxy import ObjectProxy |
57 |
+from portage.cache.mappings import UserDict |
58 |
+ |
59 |
+try: |
60 |
+ import cPickle as pickle |
61 |
+except ImportError: |
62 |
+ import pickle |
63 |
+ |
64 |
+noiselimit = 0 |
65 |
+ |
66 |
+def initialize_logger(level=logging.WARN): |
67 |
+ """Sets up basic logging of portage activities |
68 |
+ Args: |
69 |
+ level: the level to emit messages at ('info', 'debug', 'warning' ...) |
70 |
+ Returns: |
71 |
+ None |
72 |
+ """ |
73 |
+ logging.basicConfig(level=logging.WARN, format='[%(levelname)-4s] %(message)s') |
74 |
+ |
75 |
+def writemsg(mystr,noiselevel=0,fd=None): |
76 |
+ """Prints out warning and debug messages based on the noiselimit setting""" |
77 |
+ global noiselimit |
78 |
+ if fd is None: |
79 |
+ fd = sys.stderr |
80 |
+ if noiselevel <= noiselimit: |
81 |
+ # avoid potential UnicodeEncodeError |
82 |
+ mystr = _unicode_encode(mystr, |
83 |
+ encoding=_encodings['stdio'], errors='backslashreplace') |
84 |
+ if sys.hexversion >= 0x3000000: |
85 |
+ fd = fd.buffer |
86 |
+ fd.write(mystr) |
87 |
+ fd.flush() |
88 |
+ |
89 |
+def writemsg_stdout(mystr,noiselevel=0): |
90 |
+ """Prints messages stdout based on the noiselimit setting""" |
91 |
+ writemsg(mystr, noiselevel=noiselevel, fd=sys.stdout) |
92 |
+ |
93 |
+def writemsg_level(msg, level=0, noiselevel=0): |
94 |
+ """ |
95 |
+ Show a message for the given level as defined by the logging module |
96 |
+ (default is 0). When level >= logging.WARNING then the message is |
97 |
+ sent to stderr, otherwise it is sent to stdout. The noiselevel is |
98 |
+ passed directly to writemsg(). |
99 |
+ |
100 |
+ @type msg: str |
101 |
+ @param msg: a message string, including newline if appropriate |
102 |
+ @type level: int |
103 |
+ @param level: a numeric logging level (see the logging module) |
104 |
+ @type noiselevel: int |
105 |
+ @param noiselevel: passed directly to writemsg |
106 |
+ """ |
107 |
+ if level >= logging.WARNING: |
108 |
+ fd = sys.stderr |
109 |
+ else: |
110 |
+ fd = sys.stdout |
111 |
+ writemsg(msg, noiselevel=noiselevel, fd=fd) |
112 |
+ |
113 |
+def normalize_path(mypath): |
114 |
+ """ |
115 |
+ os.path.normpath("//foo") returns "//foo" instead of "/foo" |
116 |
+ We dislike this behavior so we create our own normpath func |
117 |
+ to fix it. |
118 |
+ """ |
119 |
+ if sys.hexversion >= 0x3000000 and isinstance(mypath, bytes): |
120 |
+ path_sep = os.path.sep.encode() |
121 |
+ else: |
122 |
+ path_sep = os.path.sep |
123 |
+ |
124 |
+ if mypath.startswith(path_sep): |
125 |
+ # posixpath.normpath collapses 3 or more leading slashes to just 1. |
126 |
+ return os.path.normpath(2*path_sep + mypath) |
127 |
+ else: |
128 |
+ return os.path.normpath(mypath) |
129 |
+ |
130 |
+def grabfile(myfilename, compat_level=0, recursive=0): |
131 |
+ """This function grabs the lines in a file, normalizes whitespace and returns lines in a list; if a line |
132 |
+ begins with a #, it is ignored, as are empty lines""" |
133 |
+ |
134 |
+ mylines=grablines(myfilename, recursive) |
135 |
+ newlines=[] |
136 |
+ for x in mylines: |
137 |
+ #the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line |
138 |
+ #into single spaces. |
139 |
+ myline = _unicode_decode(' ').join(x.split()) |
140 |
+ if not len(myline): |
141 |
+ continue |
142 |
+ if myline[0]=="#": |
143 |
+ # Check if we have a compat-level string. BC-integration data. |
144 |
+ # '##COMPAT==>N<==' 'some string attached to it' |
145 |
+ mylinetest = myline.split("<==",1) |
146 |
+ if len(mylinetest) == 2: |
147 |
+ myline_potential = mylinetest[1] |
148 |
+ mylinetest = mylinetest[0].split("##COMPAT==>") |
149 |
+ if len(mylinetest) == 2: |
150 |
+ if compat_level >= int(mylinetest[1]): |
151 |
+ # It's a compat line, and the key matches. |
152 |
+ newlines.append(myline_potential) |
153 |
+ continue |
154 |
+ else: |
155 |
+ continue |
156 |
+ newlines.append(myline) |
157 |
+ return newlines |
158 |
+ |
159 |
+def map_dictlist_vals(func,myDict): |
160 |
+ """Performs a function on each value of each key in a dictlist. |
161 |
+ Returns a new dictlist.""" |
162 |
+ new_dl = {} |
163 |
+ for key in myDict: |
164 |
+ new_dl[key] = [] |
165 |
+ new_dl[key] = [func(x) for x in myDict[key]] |
166 |
+ return new_dl |
167 |
+ |
168 |
+def stack_dictlist(original_dicts, incremental=0, incrementals=[], ignore_none=0): |
169 |
+ """ |
170 |
+ Stacks an array of dict-types into one array. Optionally merging or |
171 |
+ overwriting matching key/value pairs for the dict[key]->list. |
172 |
+ Returns a single dict. Higher index in lists is preferenced. |
173 |
+ |
174 |
+ Example usage: |
175 |
+ >>> from portage.util import stack_dictlist |
176 |
+ >>> print stack_dictlist( [{'a':'b'},{'x':'y'}]) |
177 |
+ >>> {'a':'b','x':'y'} |
178 |
+ >>> print stack_dictlist( [{'a':'b'},{'a':'c'}], incremental = True ) |
179 |
+ >>> {'a':['b','c'] } |
180 |
+ >>> a = {'KEYWORDS':['x86','alpha']} |
181 |
+ >>> b = {'KEYWORDS':['-x86']} |
182 |
+ >>> print stack_dictlist( [a,b] ) |
183 |
+ >>> { 'KEYWORDS':['x86','alpha','-x86']} |
184 |
+ >>> print stack_dictlist( [a,b], incremental=True) |
185 |
+ >>> { 'KEYWORDS':['alpha'] } |
186 |
+ >>> print stack_dictlist( [a,b], incrementals=['KEYWORDS']) |
187 |
+ >>> { 'KEYWORDS':['alpha'] } |
188 |
+ |
189 |
+ @param original_dicts a list of (dictionary objects or None) |
190 |
+ @type list |
191 |
+ @param incremental True or false depending on whether new keys should overwrite |
192 |
+ keys which already exist. |
193 |
+ @type boolean |
194 |
+ @param incrementals A list of items that should be incremental (-foo removes foo from |
195 |
+ the returned dict). |
196 |
+ @type list |
197 |
+ @param ignore_none Appears to be ignored, but probably was used long long ago. |
198 |
+ @type boolean |
199 |
+ |
200 |
+ """ |
201 |
+ final_dict = {} |
202 |
+ for mydict in original_dicts: |
203 |
+ if mydict is None: |
204 |
+ continue |
205 |
+ for y in mydict: |
206 |
+ if not y in final_dict: |
207 |
+ final_dict[y] = [] |
208 |
+ |
209 |
+ for thing in mydict[y]: |
210 |
+ if thing: |
211 |
+ if incremental or y in incrementals: |
212 |
+ if thing == "-*": |
213 |
+ final_dict[y] = [] |
214 |
+ continue |
215 |
+ elif thing[:1] == '-': |
216 |
+ try: |
217 |
+ final_dict[y].remove(thing[1:]) |
218 |
+ except ValueError: |
219 |
+ pass |
220 |
+ continue |
221 |
+ if thing not in final_dict[y]: |
222 |
+ final_dict[y].append(thing) |
223 |
+ if y in final_dict and not final_dict[y]: |
224 |
+ del final_dict[y] |
225 |
+ return final_dict |
226 |
+ |
227 |
+def stack_dicts(dicts, incremental=0, incrementals=[], ignore_none=0): |
228 |
+ """Stacks an array of dict-types into one array. Optionally merging or |
229 |
+ overwriting matching key/value pairs for the dict[key]->string. |
230 |
+ Returns a single dict.""" |
231 |
+ final_dict = {} |
232 |
+ for mydict in dicts: |
233 |
+ if not mydict: |
234 |
+ continue |
235 |
+ for k, v in mydict.items(): |
236 |
+ if k in final_dict and (incremental or (k in incrementals)): |
237 |
+ final_dict[k] += " " + v |
238 |
+ else: |
239 |
+ final_dict[k] = v |
240 |
+ return final_dict |
241 |
+ |
242 |
+def stack_lists(lists, incremental=1): |
243 |
+ """Stacks an array of list-types into one array. Optionally removing |
244 |
+ distinct values using '-value' notation. Higher index is preferenced. |
245 |
+ |
246 |
+ all elements must be hashable.""" |
247 |
+ |
248 |
+ new_list = {} |
249 |
+ for x in lists: |
250 |
+ for y in filter(None, x): |
251 |
+ if incremental: |
252 |
+ if y == "-*": |
253 |
+ new_list.clear() |
254 |
+ elif y[:1] == '-': |
255 |
+ new_list.pop(y[1:], None) |
256 |
+ else: |
257 |
+ new_list[y] = True |
258 |
+ else: |
259 |
+ new_list[y] = True |
260 |
+ return list(new_list) |
261 |
+ |
262 |
+def grabdict(myfilename, juststrings=0, empty=0, recursive=0, incremental=1): |
263 |
+ """ |
264 |
+ This function grabs the lines in a file, normalizes whitespace and returns lines in a dictionary |
265 |
+ |
266 |
+ @param myfilename: file to process |
267 |
+ @type myfilename: string (path) |
268 |
+ @param juststrings: only return strings |
269 |
+ @type juststrings: Boolean (integer) |
270 |
+ @param empty: Ignore certain lines |
271 |
+ @type empty: Boolean (integer) |
272 |
+ @param recursive: Recursively grab ( support for /etc/portage/package.keywords/* and friends ) |
273 |
+ @type recursive: Boolean (integer) |
274 |
+ @param incremental: Append to the return list, don't overwrite |
275 |
+ @type incremental: Boolean (integer) |
276 |
+ @rtype: Dictionary |
277 |
+ @returns: |
278 |
+ 1. Returns the lines in a file in a dictionary, for example: |
279 |
+ 'sys-apps/portage x86 amd64 ppc' |
280 |
+ would return |
281 |
+ { "sys-apps/portage" : [ 'x86', 'amd64', 'ppc' ] |
282 |
+ the line syntax is key : [list of values] |
283 |
+ """ |
284 |
+ newdict={} |
285 |
+ for x in grablines(myfilename, recursive): |
286 |
+ #the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line |
287 |
+ #into single spaces. |
288 |
+ if x[0] == "#": |
289 |
+ continue |
290 |
+ myline=x.split() |
291 |
+ if len(myline) < 2 and empty == 0: |
292 |
+ continue |
293 |
+ if len(myline) < 1 and empty == 1: |
294 |
+ continue |
295 |
+ if incremental: |
296 |
+ newdict.setdefault(myline[0], []).extend(myline[1:]) |
297 |
+ else: |
298 |
+ newdict[myline[0]] = myline[1:] |
299 |
+ if juststrings: |
300 |
+ for k, v in newdict.items(): |
301 |
+ newdict[k] = " ".join(v) |
302 |
+ return newdict |
303 |
+ |
304 |
+def grabdict_package(myfilename, juststrings=0, recursive=0): |
305 |
+ """ Does the same thing as grabdict except it validates keys |
306 |
+ with isvalidatom()""" |
307 |
+ pkgs=grabdict(myfilename, juststrings, empty=1, recursive=recursive) |
308 |
+ # We need to call keys() here in order to avoid the possibility of |
309 |
+ # "RuntimeError: dictionary changed size during iteration" |
310 |
+ # when an invalid atom is deleted. |
311 |
+ atoms = {} |
312 |
+ for k, v in pkgs.items(): |
313 |
+ try: |
314 |
+ k = Atom(k) |
315 |
+ except InvalidAtom: |
316 |
+ writemsg(_("--- Invalid atom in %s: %s\n") % (myfilename, k), |
317 |
+ noiselevel=-1) |
318 |
+ else: |
319 |
+ atoms[k] = v |
320 |
+ return atoms |
321 |
+ |
322 |
+def grabfile_package(myfilename, compatlevel=0, recursive=0): |
323 |
+ pkgs=grabfile(myfilename, compatlevel, recursive=recursive) |
324 |
+ mybasename = os.path.basename(myfilename) |
325 |
+ atoms = [] |
326 |
+ for pkg in pkgs: |
327 |
+ pkg_orig = pkg |
328 |
+ # for packages and package.mask files |
329 |
+ if pkg[:1] == "-": |
330 |
+ pkg = pkg[1:] |
331 |
+ if pkg[:1] == '*' and mybasename == 'packages': |
332 |
+ pkg = pkg[1:] |
333 |
+ try: |
334 |
+ pkg = Atom(pkg) |
335 |
+ except InvalidAtom: |
336 |
+ writemsg(_("--- Invalid atom in %s: %s\n") % (myfilename, pkg), |
337 |
+ noiselevel=-1) |
338 |
+ else: |
339 |
+ if pkg_orig == str(pkg): |
340 |
+ # normal atom, so return as Atom instance |
341 |
+ atoms.append(pkg) |
342 |
+ else: |
343 |
+ # atom has special prefix, so return as string |
344 |
+ atoms.append(pkg_orig) |
345 |
+ return atoms |
346 |
+ |
347 |
+def grablines(myfilename,recursive=0): |
348 |
+ mylines=[] |
349 |
+ if recursive and os.path.isdir(myfilename): |
350 |
+ if myfilename in ["RCS", "CVS", "SCCS"]: |
351 |
+ return mylines |
352 |
+ dirlist = os.listdir(myfilename) |
353 |
+ dirlist.sort() |
354 |
+ for f in dirlist: |
355 |
+ if not f.startswith(".") and not f.endswith("~"): |
356 |
+ mylines.extend(grablines( |
357 |
+ os.path.join(myfilename, f), recursive)) |
358 |
+ else: |
359 |
+ try: |
360 |
+ myfile = codecs.open(_unicode_encode(myfilename, |
361 |
+ encoding=_encodings['fs'], errors='strict'), |
362 |
+ mode='r', encoding=_encodings['content'], errors='replace') |
363 |
+ mylines = myfile.readlines() |
364 |
+ myfile.close() |
365 |
+ except IOError as e: |
366 |
+ if e.errno == PermissionDenied.errno: |
367 |
+ raise PermissionDenied(myfilename) |
368 |
+ pass |
369 |
+ return mylines |
370 |
+ |
371 |
+def writedict(mydict,myfilename,writekey=True): |
372 |
+ """Writes out a dict to a file; writekey=0 mode doesn't write out |
373 |
+ the key and assumes all values are strings, not lists.""" |
374 |
+ myfile = None |
375 |
+ try: |
376 |
+ myfile = atomic_ofstream(myfilename) |
377 |
+ if not writekey: |
378 |
+ for x in mydict.values(): |
379 |
+ myfile.write(x+"\n") |
380 |
+ else: |
381 |
+ for x in mydict: |
382 |
+ myfile.write("%s %s\n" % (x, " ".join(mydict[x]))) |
383 |
+ myfile.close() |
384 |
+ except IOError: |
385 |
+ if myfile is not None: |
386 |
+ myfile.abort() |
387 |
+ return 0 |
388 |
+ return 1 |
389 |
+ |
390 |
+def shlex_split(s): |
391 |
+ """ |
392 |
+ This is equivalent to shlex.split but it temporarily encodes unicode |
393 |
+ strings to bytes since shlex.split() doesn't handle unicode strings. |
394 |
+ """ |
395 |
+ is_unicode = sys.hexversion < 0x3000000 and isinstance(s, unicode) |
396 |
+ if is_unicode: |
397 |
+ s = _unicode_encode(s) |
398 |
+ rval = shlex.split(s) |
399 |
+ if is_unicode: |
400 |
+ rval = [_unicode_decode(x) for x in rval] |
401 |
+ return rval |
402 |
+ |
403 |
+class _tolerant_shlex(shlex.shlex): |
404 |
+ def sourcehook(self, newfile): |
405 |
+ try: |
406 |
+ return shlex.shlex.sourcehook(self, newfile) |
407 |
+ except EnvironmentError as e: |
408 |
+ writemsg(_("!!! Parse error in '%s': source command failed: %s\n") % \ |
409 |
+ (self.infile, str(e)), noiselevel=-1) |
410 |
+ return (newfile, StringIO()) |
411 |
+ |
412 |
+_invalid_var_name_re = re.compile(r'^\d|\W') |
413 |
+ |
414 |
+def getconfig(mycfg, tolerant=0, allow_sourcing=False, expand=True): |
415 |
+ if isinstance(expand, dict): |
416 |
+ # Some existing variable definitions have been |
417 |
+ # passed in, for use in substitutions. |
418 |
+ expand_map = expand |
419 |
+ expand = True |
420 |
+ else: |
421 |
+ expand_map = {} |
422 |
+ mykeys = {} |
423 |
+ try: |
424 |
+ # Workaround for avoiding a silent error in shlex that |
425 |
+ # is triggered by a source statement at the end of the file without a |
426 |
+ # trailing newline after the source statement |
427 |
+ # NOTE: shex doesn't seem to support unicode objects |
428 |
+ # (produces spurious \0 characters with python-2.6.2) |
429 |
+ if sys.hexversion < 0x3000000: |
430 |
+ content = open(_unicode_encode(mycfg, |
431 |
+ encoding=_encodings['fs'], errors='strict'), 'rb').read() |
432 |
+ else: |
433 |
+ content = open(_unicode_encode(mycfg, |
434 |
+ encoding=_encodings['fs'], errors='strict'), mode='r', |
435 |
+ encoding=_encodings['content'], errors='replace').read() |
436 |
+ if content and content[-1] != '\n': |
437 |
+ content += '\n' |
438 |
+ except IOError as e: |
439 |
+ if e.errno == PermissionDenied.errno: |
440 |
+ raise PermissionDenied(mycfg) |
441 |
+ if e.errno != errno.ENOENT: |
442 |
+ writemsg("open('%s', 'r'): %s\n" % (mycfg, e), noiselevel=-1) |
443 |
+ raise |
444 |
+ return None |
445 |
+ try: |
446 |
+ if tolerant: |
447 |
+ shlex_class = _tolerant_shlex |
448 |
+ else: |
449 |
+ shlex_class = shlex.shlex |
450 |
+ # The default shlex.sourcehook() implementation |
451 |
+ # only joins relative paths when the infile |
452 |
+ # attribute is properly set. |
453 |
+ lex = shlex_class(content, infile=mycfg, posix=True) |
454 |
+ lex.wordchars = string.digits + string.ascii_letters + \ |
455 |
+ "~!@#$%*_\:;?,./-+{}" |
456 |
+ lex.quotes="\"'" |
457 |
+ if allow_sourcing: |
458 |
+ lex.source="source" |
459 |
+ while 1: |
460 |
+ key=lex.get_token() |
461 |
+ if key == "export": |
462 |
+ key = lex.get_token() |
463 |
+ if key is None: |
464 |
+ #normal end of file |
465 |
+ break; |
466 |
+ equ=lex.get_token() |
467 |
+ if (equ==''): |
468 |
+ #unexpected end of file |
469 |
+ #lex.error_leader(self.filename,lex.lineno) |
470 |
+ if not tolerant: |
471 |
+ writemsg(_("!!! Unexpected end of config file: variable %s\n") % key, |
472 |
+ noiselevel=-1) |
473 |
+ raise Exception(_("ParseError: Unexpected EOF: %s: on/before line %s") % (mycfg, lex.lineno)) |
474 |
+ else: |
475 |
+ return mykeys |
476 |
+ elif (equ!='='): |
477 |
+ #invalid token |
478 |
+ #lex.error_leader(self.filename,lex.lineno) |
479 |
+ if not tolerant: |
480 |
+ raise Exception(_("ParseError: Invalid token " |
481 |
+ "'%s' (not '='): %s: line %s") % \ |
482 |
+ (equ, mycfg, lex.lineno)) |
483 |
+ else: |
484 |
+ return mykeys |
485 |
+ val=lex.get_token() |
486 |
+ if val is None: |
487 |
+ #unexpected end of file |
488 |
+ #lex.error_leader(self.filename,lex.lineno) |
489 |
+ if not tolerant: |
490 |
+ writemsg(_("!!! Unexpected end of config file: variable %s\n") % key, |
491 |
+ noiselevel=-1) |
492 |
+ raise portage.exception.CorruptionError(_("ParseError: Unexpected EOF: %s: line %s") % (mycfg, lex.lineno)) |
493 |
+ else: |
494 |
+ return mykeys |
495 |
+ key = _unicode_decode(key) |
496 |
+ val = _unicode_decode(val) |
497 |
+ |
498 |
+ if _invalid_var_name_re.search(key) is not None: |
499 |
+ if not tolerant: |
500 |
+ raise Exception(_( |
501 |
+ "ParseError: Invalid variable name '%s': line %s") % \ |
502 |
+ (key, lex.lineno - 1)) |
503 |
+ writemsg(_("!!! Invalid variable name '%s': line %s in %s\n") \ |
504 |
+ % (key, lex.lineno - 1, mycfg), noiselevel=-1) |
505 |
+ continue |
506 |
+ |
507 |
+ if expand: |
508 |
+ mykeys[key] = varexpand(val, expand_map) |
509 |
+ expand_map[key] = mykeys[key] |
510 |
+ else: |
511 |
+ mykeys[key] = val |
512 |
+ except SystemExit as e: |
513 |
+ raise |
514 |
+ except Exception as e: |
515 |
+ raise portage.exception.ParseError(str(e)+" in "+mycfg) |
516 |
+ return mykeys |
517 |
+ |
518 |
+#cache expansions of constant strings |
519 |
+cexpand={} |
520 |
+def varexpand(mystring, mydict={}): |
521 |
+ newstring = cexpand.get(" "+mystring, None) |
522 |
+ if newstring is not None: |
523 |
+ return newstring |
524 |
+ |
525 |
+ """ |
526 |
+ new variable expansion code. Preserves quotes, handles \n, etc. |
527 |
+ This code is used by the configfile code, as well as others (parser) |
528 |
+ This would be a good bunch of code to port to C. |
529 |
+ """ |
530 |
+ numvars=0 |
531 |
+ mystring=" "+mystring |
532 |
+ #in single, double quotes |
533 |
+ insing=0 |
534 |
+ indoub=0 |
535 |
+ pos=1 |
536 |
+ newstring=" " |
537 |
+ while (pos<len(mystring)): |
538 |
+ if (mystring[pos]=="'") and (mystring[pos-1]!="\\"): |
539 |
+ if (indoub): |
540 |
+ newstring=newstring+"'" |
541 |
+ else: |
542 |
+ newstring += "'" # Quote removal is handled by shlex. |
543 |
+ insing=not insing |
544 |
+ pos=pos+1 |
545 |
+ continue |
546 |
+ elif (mystring[pos]=='"') and (mystring[pos-1]!="\\"): |
547 |
+ if (insing): |
548 |
+ newstring=newstring+'"' |
549 |
+ else: |
550 |
+ newstring += '"' # Quote removal is handled by shlex. |
551 |
+ indoub=not indoub |
552 |
+ pos=pos+1 |
553 |
+ continue |
554 |
+ if (not insing): |
555 |
+ #expansion time |
556 |
+ if (mystring[pos]=="\n"): |
557 |
+ #convert newlines to spaces |
558 |
+ newstring=newstring+" " |
559 |
+ pos=pos+1 |
560 |
+ elif (mystring[pos]=="\\"): |
561 |
+ #backslash expansion time |
562 |
+ if (pos+1>=len(mystring)): |
563 |
+ newstring=newstring+mystring[pos] |
564 |
+ break |
565 |
+ else: |
566 |
+ a=mystring[pos+1] |
567 |
+ pos=pos+2 |
568 |
+ if a=='a': |
569 |
+ newstring=newstring+chr(0o07) |
570 |
+ elif a=='b': |
571 |
+ newstring=newstring+chr(0o10) |
572 |
+ elif a=='e': |
573 |
+ newstring=newstring+chr(0o33) |
574 |
+ elif (a=='f') or (a=='n'): |
575 |
+ newstring=newstring+chr(0o12) |
576 |
+ elif a=='r': |
577 |
+ newstring=newstring+chr(0o15) |
578 |
+ elif a=='t': |
579 |
+ newstring=newstring+chr(0o11) |
580 |
+ elif a=='v': |
581 |
+ newstring=newstring+chr(0o13) |
582 |
+ elif a!='\n': |
583 |
+ #remove backslash only, as bash does: this takes care of \\ and \' and \" as well |
584 |
+ newstring=newstring+mystring[pos-1:pos] |
585 |
+ continue |
586 |
+ elif (mystring[pos]=="$") and (mystring[pos-1]!="\\"): |
587 |
+ pos=pos+1 |
588 |
+ if mystring[pos]=="{": |
589 |
+ pos=pos+1 |
590 |
+ braced=True |
591 |
+ else: |
592 |
+ braced=False |
593 |
+ myvstart=pos |
594 |
+ validchars=string.ascii_letters+string.digits+"_" |
595 |
+ while mystring[pos] in validchars: |
596 |
+ if (pos+1)>=len(mystring): |
597 |
+ if braced: |
598 |
+ cexpand[mystring]="" |
599 |
+ return "" |
600 |
+ else: |
601 |
+ pos=pos+1 |
602 |
+ break |
603 |
+ pos=pos+1 |
604 |
+ myvarname=mystring[myvstart:pos] |
605 |
+ if braced: |
606 |
+ if mystring[pos]!="}": |
607 |
+ cexpand[mystring]="" |
608 |
+ return "" |
609 |
+ else: |
610 |
+ pos=pos+1 |
611 |
+ if len(myvarname)==0: |
612 |
+ cexpand[mystring]="" |
613 |
+ return "" |
614 |
+ numvars=numvars+1 |
615 |
+ if myvarname in mydict: |
616 |
+ newstring=newstring+mydict[myvarname] |
617 |
+ else: |
618 |
+ newstring=newstring+mystring[pos] |
619 |
+ pos=pos+1 |
620 |
+ else: |
621 |
+ newstring=newstring+mystring[pos] |
622 |
+ pos=pos+1 |
623 |
+ if numvars==0: |
624 |
+ cexpand[mystring]=newstring[1:] |
625 |
+ return newstring[1:] |
626 |
+ |
627 |
+# broken and removed, but can still be imported |
628 |
+pickle_write = None |
629 |
+ |
630 |
+def pickle_read(filename,default=None,debug=0): |
631 |
+ import os |
632 |
+ if not os.access(filename, os.R_OK): |
633 |
+ writemsg(_("pickle_read(): File not readable. '")+filename+"'\n",1) |
634 |
+ return default |
635 |
+ data = None |
636 |
+ try: |
637 |
+ myf = open(_unicode_encode(filename, |
638 |
+ encoding=_encodings['fs'], errors='strict'), 'rb') |
639 |
+ mypickle = pickle.Unpickler(myf) |
640 |
+ data = mypickle.load() |
641 |
+ myf.close() |
642 |
+ del mypickle,myf |
643 |
+ writemsg(_("pickle_read(): Loaded pickle. '")+filename+"'\n",1) |
644 |
+ except SystemExit as e: |
645 |
+ raise |
646 |
+ except Exception as e: |
647 |
+ writemsg(_("!!! Failed to load pickle: ")+str(e)+"\n",1) |
648 |
+ data = default |
649 |
+ return data |
650 |
+ |
651 |
+def dump_traceback(msg, noiselevel=1): |
652 |
+ import sys, traceback |
653 |
+ info = sys.exc_info() |
654 |
+ if not info[2]: |
655 |
+ stack = traceback.extract_stack()[:-1] |
656 |
+ error = None |
657 |
+ else: |
658 |
+ stack = traceback.extract_tb(info[2]) |
659 |
+ error = str(info[1]) |
660 |
+ writemsg("\n====================================\n", noiselevel=noiselevel) |
661 |
+ writemsg("%s\n\n" % msg, noiselevel=noiselevel) |
662 |
+ for line in traceback.format_list(stack): |
663 |
+ writemsg(line, noiselevel=noiselevel) |
664 |
+ if error: |
665 |
+ writemsg(error+"\n", noiselevel=noiselevel) |
666 |
+ writemsg("====================================\n\n", noiselevel=noiselevel) |
667 |
+ |
668 |
+class cmp_sort_key(object): |
669 |
+ """ |
670 |
+ In python-3.0 the list.sort() method no longer has a "cmp" keyword |
671 |
+ argument. This class acts as an adapter which converts a cmp function |
672 |
+ into one that's suitable for use as the "key" keyword argument to |
673 |
+ list.sort(), making it easier to port code for python-3.0 compatibility. |
674 |
+ It works by generating key objects which use the given cmp function to |
675 |
+ implement their __lt__ method. |
676 |
+ """ |
677 |
+ __slots__ = ("_cmp_func",) |
678 |
+ |
679 |
+ def __init__(self, cmp_func): |
680 |
+ """ |
681 |
+ @type cmp_func: callable which takes 2 positional arguments |
682 |
+ @param cmp_func: A cmp function. |
683 |
+ """ |
684 |
+ self._cmp_func = cmp_func |
685 |
+ |
686 |
+ def __call__(self, lhs): |
687 |
+ return self._cmp_key(self._cmp_func, lhs) |
688 |
+ |
689 |
+ class _cmp_key(object): |
690 |
+ __slots__ = ("_cmp_func", "_obj") |
691 |
+ |
692 |
+ def __init__(self, cmp_func, obj): |
693 |
+ self._cmp_func = cmp_func |
694 |
+ self._obj = obj |
695 |
+ |
696 |
+ def __lt__(self, other): |
697 |
+ if other.__class__ is not self.__class__: |
698 |
+ raise TypeError("Expected type %s, got %s" % \ |
699 |
+ (self.__class__, other.__class__)) |
700 |
+ return self._cmp_func(self._obj, other._obj) < 0 |
701 |
+ |
702 |
+def unique_array(s): |
703 |
+ """lifted from python cookbook, credit: Tim Peters |
704 |
+ Return a list of the elements in s in arbitrary order, sans duplicates""" |
705 |
+ n = len(s) |
706 |
+ # assume all elements are hashable, if so, it's linear |
707 |
+ try: |
708 |
+ return list(set(s)) |
709 |
+ except TypeError: |
710 |
+ pass |
711 |
+ |
712 |
+ # so much for linear. abuse sort. |
713 |
+ try: |
714 |
+ t = list(s) |
715 |
+ t.sort() |
716 |
+ except TypeError: |
717 |
+ pass |
718 |
+ else: |
719 |
+ assert n > 0 |
720 |
+ last = t[0] |
721 |
+ lasti = i = 1 |
722 |
+ while i < n: |
723 |
+ if t[i] != last: |
724 |
+ t[lasti] = last = t[i] |
725 |
+ lasti += 1 |
726 |
+ i += 1 |
727 |
+ return t[:lasti] |
728 |
+ |
729 |
+ # blah. back to original portage.unique_array |
730 |
+ u = [] |
731 |
+ for x in s: |
732 |
+ if x not in u: |
733 |
+ u.append(x) |
734 |
+ return u |
735 |
+ |
736 |
+def apply_permissions(filename, uid=-1, gid=-1, mode=-1, mask=-1, |
737 |
+ stat_cached=None, follow_links=True): |
738 |
+ """Apply user, group, and mode bits to a file if the existing bits do not |
739 |
+ already match. The default behavior is to force an exact match of mode |
740 |
+ bits. When mask=0 is specified, mode bits on the target file are allowed |
741 |
+ to be a superset of the mode argument (via logical OR). When mask>0, the |
742 |
+ mode bits that the target file is allowed to have are restricted via |
743 |
+ logical XOR. |
744 |
+ Returns True if the permissions were modified and False otherwise.""" |
745 |
+ |
746 |
+ modified = False |
747 |
+ |
748 |
+ if stat_cached is None: |
749 |
+ try: |
750 |
+ if follow_links: |
751 |
+ stat_cached = os.stat(filename) |
752 |
+ else: |
753 |
+ stat_cached = os.lstat(filename) |
754 |
+ except OSError as oe: |
755 |
+ func_call = "stat('%s')" % filename |
756 |
+ if oe.errno == errno.EPERM: |
757 |
+ raise OperationNotPermitted(func_call) |
758 |
+ elif oe.errno == errno.EACCES: |
759 |
+ raise PermissionDenied(func_call) |
760 |
+ elif oe.errno == errno.ENOENT: |
761 |
+ raise FileNotFound(filename) |
762 |
+ else: |
763 |
+ raise |
764 |
+ |
765 |
+ if (uid != -1 and uid != stat_cached.st_uid) or \ |
766 |
+ (gid != -1 and gid != stat_cached.st_gid): |
767 |
+ try: |
768 |
+ if follow_links: |
769 |
+ os.chown(filename, uid, gid) |
770 |
+ else: |
771 |
+ import portage.data |
772 |
+ portage.data.lchown(filename, uid, gid) |
773 |
+ modified = True |
774 |
+ except OSError as oe: |
775 |
+ func_call = "chown('%s', %i, %i)" % (filename, uid, gid) |
776 |
+ if oe.errno == errno.EPERM: |
777 |
+ raise OperationNotPermitted(func_call) |
778 |
+ elif oe.errno == errno.EACCES: |
779 |
+ raise PermissionDenied(func_call) |
780 |
+ elif oe.errno == errno.EROFS: |
781 |
+ raise ReadOnlyFileSystem(func_call) |
782 |
+ elif oe.errno == errno.ENOENT: |
783 |
+ raise FileNotFound(filename) |
784 |
+ else: |
785 |
+ raise |
786 |
+ |
787 |
+ new_mode = -1 |
788 |
+ st_mode = stat_cached.st_mode & 0o7777 # protect from unwanted bits |
789 |
+ if mask >= 0: |
790 |
+ if mode == -1: |
791 |
+ mode = 0 # Don't add any mode bits when mode is unspecified. |
792 |
+ else: |
793 |
+ mode = mode & 0o7777 |
794 |
+ if (mode & st_mode != mode) or \ |
795 |
+ ((mask ^ st_mode) & st_mode != st_mode): |
796 |
+ new_mode = mode | st_mode |
797 |
+ new_mode = (mask ^ new_mode) & new_mode |
798 |
+ elif mode != -1: |
799 |
+ mode = mode & 0o7777 # protect from unwanted bits |
800 |
+ if mode != st_mode: |
801 |
+ new_mode = mode |
802 |
+ |
803 |
+ # The chown system call may clear S_ISUID and S_ISGID |
804 |
+ # bits, so those bits are restored if necessary. |
805 |
+ if modified and new_mode == -1 and \ |
806 |
+ (st_mode & stat.S_ISUID or st_mode & stat.S_ISGID): |
807 |
+ if mode == -1: |
808 |
+ new_mode = st_mode |
809 |
+ else: |
810 |
+ mode = mode & 0o7777 |
811 |
+ if mask >= 0: |
812 |
+ new_mode = mode | st_mode |
813 |
+ new_mode = (mask ^ new_mode) & new_mode |
814 |
+ else: |
815 |
+ new_mode = mode |
816 |
+ if not (new_mode & stat.S_ISUID or new_mode & stat.S_ISGID): |
817 |
+ new_mode = -1 |
818 |
+ |
819 |
+ if not follow_links and stat.S_ISLNK(stat_cached.st_mode): |
820 |
+ # Mode doesn't matter for symlinks. |
821 |
+ new_mode = -1 |
822 |
+ |
823 |
+ if new_mode != -1: |
824 |
+ try: |
825 |
+ os.chmod(filename, new_mode) |
826 |
+ modified = True |
827 |
+ except OSError as oe: |
828 |
+ func_call = "chmod('%s', %s)" % (filename, oct(new_mode)) |
829 |
+ if oe.errno == errno.EPERM: |
830 |
+ raise OperationNotPermitted(func_call) |
831 |
+ elif oe.errno == errno.EACCES: |
832 |
+ raise PermissionDenied(func_call) |
833 |
+ elif oe.errno == errno.EROFS: |
834 |
+ raise ReadOnlyFileSystem(func_call) |
835 |
+ elif oe.errno == errno.ENOENT: |
836 |
+ raise FileNotFound(filename) |
837 |
+ raise |
838 |
+ return modified |
839 |
+ |
840 |
+def apply_stat_permissions(filename, newstat, **kwargs): |
841 |
+ """A wrapper around apply_secpass_permissions that gets |
842 |
+ uid, gid, and mode from a stat object""" |
843 |
+ return apply_secpass_permissions(filename, uid=newstat.st_uid, gid=newstat.st_gid, |
844 |
+ mode=newstat.st_mode, **kwargs) |
845 |
+ |
846 |
+def apply_recursive_permissions(top, uid=-1, gid=-1, |
847 |
+ dirmode=-1, dirmask=-1, filemode=-1, filemask=-1, onerror=None): |
848 |
+ """A wrapper around apply_secpass_permissions that applies permissions |
849 |
+ recursively. If optional argument onerror is specified, it should be a |
850 |
+ function; it will be called with one argument, a PortageException instance. |
851 |
+ Returns True if all permissions are applied and False if some are left |
852 |
+ unapplied.""" |
853 |
+ |
854 |
+ if onerror is None: |
855 |
+ # Default behavior is to dump errors to stderr so they won't |
856 |
+ # go unnoticed. Callers can pass in a quiet instance. |
857 |
+ def onerror(e): |
858 |
+ if isinstance(e, OperationNotPermitted): |
859 |
+ writemsg(_("Operation Not Permitted: %s\n") % str(e), |
860 |
+ noiselevel=-1) |
861 |
+ elif isinstance(e, FileNotFound): |
862 |
+ writemsg(_("File Not Found: '%s'\n") % str(e), noiselevel=-1) |
863 |
+ else: |
864 |
+ raise |
865 |
+ |
866 |
+ all_applied = True |
867 |
+ for dirpath, dirnames, filenames in os.walk(top): |
868 |
+ try: |
869 |
+ applied = apply_secpass_permissions(dirpath, |
870 |
+ uid=uid, gid=gid, mode=dirmode, mask=dirmask) |
871 |
+ if not applied: |
872 |
+ all_applied = False |
873 |
+ except PortageException as e: |
874 |
+ all_applied = False |
875 |
+ onerror(e) |
876 |
+ |
877 |
+ for name in filenames: |
878 |
+ try: |
879 |
+ applied = apply_secpass_permissions(os.path.join(dirpath, name), |
880 |
+ uid=uid, gid=gid, mode=filemode, mask=filemask) |
881 |
+ if not applied: |
882 |
+ all_applied = False |
883 |
+ except PortageException as e: |
884 |
+ # Ignore InvalidLocation exceptions such as FileNotFound |
885 |
+ # and DirectoryNotFound since sometimes things disappear, |
886 |
+ # like when adjusting permissions on DISTCC_DIR. |
887 |
+ if not isinstance(e, portage.exception.InvalidLocation): |
888 |
+ all_applied = False |
889 |
+ onerror(e) |
890 |
+ return all_applied |
891 |
+ |
892 |
+def apply_secpass_permissions(filename, uid=-1, gid=-1, mode=-1, mask=-1, |
893 |
+ stat_cached=None, follow_links=True): |
894 |
+ """A wrapper around apply_permissions that uses secpass and simple |
895 |
+ logic to apply as much of the permissions as possible without |
896 |
+ generating an obviously avoidable permission exception. Despite |
897 |
+ attempts to avoid an exception, it's possible that one will be raised |
898 |
+ anyway, so be prepared. |
899 |
+ Returns True if all permissions are applied and False if some are left |
900 |
+ unapplied.""" |
901 |
+ |
902 |
+ if stat_cached is None: |
903 |
+ try: |
904 |
+ if follow_links: |
905 |
+ stat_cached = os.stat(filename) |
906 |
+ else: |
907 |
+ stat_cached = os.lstat(filename) |
908 |
+ except OSError as oe: |
909 |
+ func_call = "stat('%s')" % filename |
910 |
+ if oe.errno == errno.EPERM: |
911 |
+ raise OperationNotPermitted(func_call) |
912 |
+ elif oe.errno == errno.EACCES: |
913 |
+ raise PermissionDenied(func_call) |
914 |
+ elif oe.errno == errno.ENOENT: |
915 |
+ raise FileNotFound(filename) |
916 |
+ else: |
917 |
+ raise |
918 |
+ |
919 |
+ all_applied = True |
920 |
+ |
921 |
+ import portage.data # not imported globally because of circular dep |
922 |
+ if portage.data.secpass < 2: |
923 |
+ |
924 |
+ if uid != -1 and \ |
925 |
+ uid != stat_cached.st_uid: |
926 |
+ all_applied = False |
927 |
+ uid = -1 |
928 |
+ |
929 |
+ if gid != -1 and \ |
930 |
+ gid != stat_cached.st_gid and \ |
931 |
+ gid not in os.getgroups(): |
932 |
+ all_applied = False |
933 |
+ gid = -1 |
934 |
+ |
935 |
+ apply_permissions(filename, uid=uid, gid=gid, mode=mode, mask=mask, |
936 |
+ stat_cached=stat_cached, follow_links=follow_links) |
937 |
+ return all_applied |
938 |
+ |
939 |
+class atomic_ofstream(ObjectProxy): |
940 |
+ """Write a file atomically via os.rename(). Atomic replacement prevents |
941 |
+ interprocess interference and prevents corruption of the target |
942 |
+ file when the write is interrupted (for example, when an 'out of space' |
943 |
+ error occurs).""" |
944 |
+ |
945 |
+ def __init__(self, filename, mode='w', follow_links=True, **kargs): |
946 |
+ """Opens a temporary filename.pid in the same directory as filename.""" |
947 |
+ ObjectProxy.__init__(self) |
948 |
+ object.__setattr__(self, '_aborted', False) |
949 |
+ if 'b' in mode: |
950 |
+ open_func = open |
951 |
+ else: |
952 |
+ open_func = codecs.open |
953 |
+ kargs.setdefault('encoding', _encodings['content']) |
954 |
+ kargs.setdefault('errors', 'backslashreplace') |
955 |
+ |
956 |
+ if follow_links: |
957 |
+ canonical_path = os.path.realpath(filename) |
958 |
+ object.__setattr__(self, '_real_name', canonical_path) |
959 |
+ tmp_name = "%s.%i" % (canonical_path, os.getpid()) |
960 |
+ try: |
961 |
+ object.__setattr__(self, '_file', |
962 |
+ open_func(_unicode_encode(tmp_name, |
963 |
+ encoding=_encodings['fs'], errors='strict'), |
964 |
+ mode=mode, **kargs)) |
965 |
+ return |
966 |
+ except IOError as e: |
967 |
+ if canonical_path == filename: |
968 |
+ raise |
969 |
+ writemsg(_("!!! Failed to open file: '%s'\n") % tmp_name, |
970 |
+ noiselevel=-1) |
971 |
+ writemsg("!!! %s\n" % str(e), noiselevel=-1) |
972 |
+ |
973 |
+ object.__setattr__(self, '_real_name', filename) |
974 |
+ tmp_name = "%s.%i" % (filename, os.getpid()) |
975 |
+ object.__setattr__(self, '_file', |
976 |
+ open_func(_unicode_encode(tmp_name, |
977 |
+ encoding=_encodings['fs'], errors='strict'), |
978 |
+ mode=mode, **kargs)) |
979 |
+ |
980 |
+ def _get_target(self): |
981 |
+ return object.__getattribute__(self, '_file') |
982 |
+ |
983 |
+ def __getattribute__(self, attr): |
984 |
+ if attr in ('close', 'abort', '__del__'): |
985 |
+ return object.__getattribute__(self, attr) |
986 |
+ return getattr(object.__getattribute__(self, '_file'), attr) |
987 |
+ |
988 |
+ def close(self): |
989 |
+ """Closes the temporary file, copies permissions (if possible), |
990 |
+ and performs the atomic replacement via os.rename(). If the abort() |
991 |
+ method has been called, then the temp file is closed and removed.""" |
992 |
+ f = object.__getattribute__(self, '_file') |
993 |
+ real_name = object.__getattribute__(self, '_real_name') |
994 |
+ if not f.closed: |
995 |
+ try: |
996 |
+ f.close() |
997 |
+ if not object.__getattribute__(self, '_aborted'): |
998 |
+ try: |
999 |
+ apply_stat_permissions(f.name, os.stat(real_name)) |
1000 |
+ except OperationNotPermitted: |
1001 |
+ pass |
1002 |
+ except FileNotFound: |
1003 |
+ pass |
1004 |
+ except OSError as oe: # from the above os.stat call |
1005 |
+ if oe.errno in (errno.ENOENT, errno.EPERM): |
1006 |
+ pass |
1007 |
+ else: |
1008 |
+ raise |
1009 |
+ os.rename(f.name, real_name) |
1010 |
+ finally: |
1011 |
+ # Make sure we cleanup the temp file |
1012 |
+ # even if an exception is raised. |
1013 |
+ try: |
1014 |
+ os.unlink(f.name) |
1015 |
+ except OSError as oe: |
1016 |
+ pass |
1017 |
+ |
1018 |
+ def abort(self): |
1019 |
+ """If an error occurs while writing the file, the user should |
1020 |
+ call this method in order to leave the target file unchanged. |
1021 |
+ This will call close() automatically.""" |
1022 |
+ if not object.__getattribute__(self, '_aborted'): |
1023 |
+ object.__setattr__(self, '_aborted', True) |
1024 |
+ self.close() |
1025 |
+ |
1026 |
+ def __del__(self): |
1027 |
+ """If the user does not explicitely call close(), it is |
1028 |
+ assumed that an error has occurred, so we abort().""" |
1029 |
+ try: |
1030 |
+ f = object.__getattribute__(self, '_file') |
1031 |
+ except AttributeError: |
1032 |
+ pass |
1033 |
+ else: |
1034 |
+ if not f.closed: |
1035 |
+ self.abort() |
1036 |
+ # ensure destructor from the base class is called |
1037 |
+ base_destructor = getattr(ObjectProxy, '__del__', None) |
1038 |
+ if base_destructor is not None: |
1039 |
+ base_destructor(self) |
1040 |
+ |
1041 |
+def write_atomic(file_path, content, **kwargs): |
1042 |
+ f = None |
1043 |
+ try: |
1044 |
+ f = atomic_ofstream(file_path, **kwargs) |
1045 |
+ f.write(content) |
1046 |
+ f.close() |
1047 |
+ except (IOError, OSError) as e: |
1048 |
+ if f: |
1049 |
+ f.abort() |
1050 |
+ func_call = "write_atomic('%s')" % file_path |
1051 |
+ if e.errno == errno.EPERM: |
1052 |
+ raise OperationNotPermitted(func_call) |
1053 |
+ elif e.errno == errno.EACCES: |
1054 |
+ raise PermissionDenied(func_call) |
1055 |
+ elif e.errno == errno.EROFS: |
1056 |
+ raise ReadOnlyFileSystem(func_call) |
1057 |
+ elif e.errno == errno.ENOENT: |
1058 |
+ raise FileNotFound(file_path) |
1059 |
+ else: |
1060 |
+ raise |
1061 |
+ |
1062 |
+def ensure_dirs(dir_path, *args, **kwargs): |
1063 |
+ """Create a directory and call apply_permissions. |
1064 |
+ Returns True if a directory is created or the permissions needed to be |
1065 |
+ modified, and False otherwise.""" |
1066 |
+ |
1067 |
+ created_dir = False |
1068 |
+ |
1069 |
+ try: |
1070 |
+ os.makedirs(dir_path) |
1071 |
+ created_dir = True |
1072 |
+ except OSError as oe: |
1073 |
+ func_call = "makedirs('%s')" % dir_path |
1074 |
+ if oe.errno in (errno.EEXIST, errno.EISDIR): |
1075 |
+ pass |
1076 |
+ elif oe.errno == errno.EPERM: |
1077 |
+ raise OperationNotPermitted(func_call) |
1078 |
+ elif oe.errno == errno.EACCES: |
1079 |
+ raise PermissionDenied(func_call) |
1080 |
+ elif oe.errno == errno.EROFS: |
1081 |
+ raise ReadOnlyFileSystem(func_call) |
1082 |
+ else: |
1083 |
+ raise |
1084 |
+ perms_modified = apply_permissions(dir_path, *args, **kwargs) |
1085 |
+ return created_dir or perms_modified |
1086 |
+ |
1087 |
+class LazyItemsDict(UserDict): |
1088 |
+ """A mapping object that behaves like a standard dict except that it allows |
1089 |
+ for lazy initialization of values via callable objects. Lazy items can be |
1090 |
+ overwritten and deleted just as normal items.""" |
1091 |
+ |
1092 |
+ __slots__ = ('lazy_items',) |
1093 |
+ |
1094 |
+ def __init__(self, *args, **kwargs): |
1095 |
+ |
1096 |
+ self.lazy_items = {} |
1097 |
+ UserDict.__init__(self, *args, **kwargs) |
1098 |
+ |
1099 |
+ def addLazyItem(self, item_key, value_callable, *pargs, **kwargs): |
1100 |
+ """Add a lazy item for the given key. When the item is requested, |
1101 |
+ value_callable will be called with *pargs and **kwargs arguments.""" |
1102 |
+ self.lazy_items[item_key] = \ |
1103 |
+ self._LazyItem(value_callable, pargs, kwargs, False) |
1104 |
+ # make it show up in self.keys(), etc... |
1105 |
+ UserDict.__setitem__(self, item_key, None) |
1106 |
+ |
1107 |
+ def addLazySingleton(self, item_key, value_callable, *pargs, **kwargs): |
1108 |
+ """This is like addLazyItem except value_callable will only be called |
1109 |
+ a maximum of 1 time and the result will be cached for future requests.""" |
1110 |
+ self.lazy_items[item_key] = \ |
1111 |
+ self._LazyItem(value_callable, pargs, kwargs, True) |
1112 |
+ # make it show up in self.keys(), etc... |
1113 |
+ UserDict.__setitem__(self, item_key, None) |
1114 |
+ |
1115 |
+ def update(self, *args, **kwargs): |
1116 |
+ if len(args) > 1: |
1117 |
+ raise TypeError( |
1118 |
+ "expected at most 1 positional argument, got " + \ |
1119 |
+ repr(len(args))) |
1120 |
+ if args: |
1121 |
+ map_obj = args[0] |
1122 |
+ else: |
1123 |
+ map_obj = None |
1124 |
+ if map_obj is None: |
1125 |
+ pass |
1126 |
+ elif isinstance(map_obj, LazyItemsDict): |
1127 |
+ for k in map_obj: |
1128 |
+ if k in map_obj.lazy_items: |
1129 |
+ UserDict.__setitem__(self, k, None) |
1130 |
+ else: |
1131 |
+ UserDict.__setitem__(self, k, map_obj[k]) |
1132 |
+ self.lazy_items.update(map_obj.lazy_items) |
1133 |
+ else: |
1134 |
+ UserDict.update(self, map_obj) |
1135 |
+ if kwargs: |
1136 |
+ UserDict.update(self, kwargs) |
1137 |
+ |
1138 |
+ def __getitem__(self, item_key): |
1139 |
+ if item_key in self.lazy_items: |
1140 |
+ lazy_item = self.lazy_items[item_key] |
1141 |
+ pargs = lazy_item.pargs |
1142 |
+ if pargs is None: |
1143 |
+ pargs = () |
1144 |
+ kwargs = lazy_item.kwargs |
1145 |
+ if kwargs is None: |
1146 |
+ kwargs = {} |
1147 |
+ result = lazy_item.func(*pargs, **kwargs) |
1148 |
+ if lazy_item.singleton: |
1149 |
+ self[item_key] = result |
1150 |
+ return result |
1151 |
+ |
1152 |
+ else: |
1153 |
+ return UserDict.__getitem__(self, item_key) |
1154 |
+ |
1155 |
+ def __setitem__(self, item_key, value): |
1156 |
+ if item_key in self.lazy_items: |
1157 |
+ del self.lazy_items[item_key] |
1158 |
+ UserDict.__setitem__(self, item_key, value) |
1159 |
+ |
1160 |
+ def __delitem__(self, item_key): |
1161 |
+ if item_key in self.lazy_items: |
1162 |
+ del self.lazy_items[item_key] |
1163 |
+ UserDict.__delitem__(self, item_key) |
1164 |
+ |
1165 |
+ def clear(self): |
1166 |
+ self.lazy_items.clear() |
1167 |
+ UserDict.clear(self) |
1168 |
+ |
1169 |
+ def copy(self): |
1170 |
+ return self.__copy__() |
1171 |
+ |
1172 |
+ def __copy__(self): |
1173 |
+ return self.__class__(self) |
1174 |
+ |
1175 |
+ def __deepcopy__(self, memo=None): |
1176 |
+ """ |
1177 |
+ WARNING: If any of the lazy items contains a bound method then it's |
1178 |
+ typical for deepcopy() to raise an exception like this: |
1179 |
+ |
1180 |
+ File "/usr/lib/python2.5/copy.py", line 189, in deepcopy |
1181 |
+ y = _reconstruct(x, rv, 1, memo) |
1182 |
+ File "/usr/lib/python2.5/copy.py", line 322, in _reconstruct |
1183 |
+ y = callable(*args) |
1184 |
+ File "/usr/lib/python2.5/copy_reg.py", line 92, in __newobj__ |
1185 |
+ return cls.__new__(cls, *args) |
1186 |
+ TypeError: instancemethod expected at least 2 arguments, got 0 |
1187 |
+ |
1188 |
+ If deepcopy() needs to work, this problem can be avoided by |
1189 |
+ implementing lazy items with normal (non-bound) functions. |
1190 |
+ |
1191 |
+ If deepcopy() raises a TypeError for a lazy item that has been added |
1192 |
+ via a call to addLazySingleton(), the singleton will be automatically |
1193 |
+ evaluated and deepcopy() will instead be called on the result. |
1194 |
+ """ |
1195 |
+ if memo is None: |
1196 |
+ memo = {} |
1197 |
+ from copy import deepcopy |
1198 |
+ result = self.__class__() |
1199 |
+ memo[id(self)] = result |
1200 |
+ for k in self: |
1201 |
+ k_copy = deepcopy(k, memo) |
1202 |
+ if k in self.lazy_items: |
1203 |
+ lazy_item = self.lazy_items[k] |
1204 |
+ try: |
1205 |
+ result.lazy_items[k_copy] = deepcopy(lazy_item, memo) |
1206 |
+ except TypeError: |
1207 |
+ if not lazy_item.singleton: |
1208 |
+ raise |
1209 |
+ UserDict.__setitem__(result, |
1210 |
+ k_copy, deepcopy(self[k], memo)) |
1211 |
+ else: |
1212 |
+ UserDict.__setitem__(result, k_copy, None) |
1213 |
+ else: |
1214 |
+ UserDict.__setitem__(result, k_copy, deepcopy(self[k], memo)) |
1215 |
+ return result |
1216 |
+ |
1217 |
+ class _LazyItem(object): |
1218 |
+ |
1219 |
+ __slots__ = ('func', 'pargs', 'kwargs', 'singleton') |
1220 |
+ |
1221 |
+ def __init__(self, func, pargs, kwargs, singleton): |
1222 |
+ |
1223 |
+ if not pargs: |
1224 |
+ pargs = None |
1225 |
+ if not kwargs: |
1226 |
+ kwargs = None |
1227 |
+ |
1228 |
+ self.func = func |
1229 |
+ self.pargs = pargs |
1230 |
+ self.kwargs = kwargs |
1231 |
+ self.singleton = singleton |
1232 |
+ |
1233 |
+ def __copy__(self): |
1234 |
+ return self.__class__(self.func, self.pargs, |
1235 |
+ self.kwargs, self.singleton) |
1236 |
+ |
1237 |
+ def __deepcopy__(self, memo=None): |
1238 |
+ """ |
1239 |
+ Override this since the default implementation can fail silently, |
1240 |
+ leaving some attributes unset. |
1241 |
+ """ |
1242 |
+ if memo is None: |
1243 |
+ memo = {} |
1244 |
+ from copy import deepcopy |
1245 |
+ result = self.__copy__() |
1246 |
+ memo[id(self)] = result |
1247 |
+ result.func = deepcopy(self.func, memo) |
1248 |
+ result.pargs = deepcopy(self.pargs, memo) |
1249 |
+ result.kwargs = deepcopy(self.kwargs, memo) |
1250 |
+ result.singleton = deepcopy(self.singleton, memo) |
1251 |
+ return result |
1252 |
+ |
1253 |
+class ConfigProtect(object): |
1254 |
+ def __init__(self, myroot, protect_list, mask_list): |
1255 |
+ self.myroot = myroot |
1256 |
+ self.protect_list = protect_list |
1257 |
+ self.mask_list = mask_list |
1258 |
+ self.updateprotect() |
1259 |
+ |
1260 |
+ def updateprotect(self): |
1261 |
+ """Update internal state for isprotected() calls. Nonexistent paths |
1262 |
+ are ignored.""" |
1263 |
+ |
1264 |
+ os = _os_merge |
1265 |
+ |
1266 |
+ self.protect = [] |
1267 |
+ self._dirs = set() |
1268 |
+ for x in self.protect_list: |
1269 |
+ ppath = normalize_path( |
1270 |
+ os.path.join(self.myroot, x.lstrip(os.path.sep))) |
1271 |
+ mystat = None |
1272 |
+ try: |
1273 |
+ if stat.S_ISDIR(os.stat(ppath).st_mode): |
1274 |
+ self._dirs.add(ppath) |
1275 |
+ self.protect.append(ppath) |
1276 |
+ except OSError: |
1277 |
+ # If it doesn't exist, there's no need to protect it. |
1278 |
+ pass |
1279 |
+ |
1280 |
+ self.protectmask = [] |
1281 |
+ for x in self.mask_list: |
1282 |
+ ppath = normalize_path( |
1283 |
+ os.path.join(self.myroot, x.lstrip(os.path.sep))) |
1284 |
+ mystat = None |
1285 |
+ try: |
1286 |
+ """Use lstat so that anything, even a broken symlink can be |
1287 |
+ protected.""" |
1288 |
+ if stat.S_ISDIR(os.lstat(ppath).st_mode): |
1289 |
+ self._dirs.add(ppath) |
1290 |
+ self.protectmask.append(ppath) |
1291 |
+ """Now use stat in case this is a symlink to a directory.""" |
1292 |
+ if stat.S_ISDIR(os.stat(ppath).st_mode): |
1293 |
+ self._dirs.add(ppath) |
1294 |
+ except OSError: |
1295 |
+ # If it doesn't exist, there's no need to mask it. |
1296 |
+ pass |
1297 |
+ |
1298 |
+ def isprotected(self, obj): |
1299 |
+ """Returns True if obj is protected, False otherwise. The caller must |
1300 |
+ ensure that obj is normalized with a single leading slash. A trailing |
1301 |
+ slash is optional for directories.""" |
1302 |
+ masked = 0 |
1303 |
+ protected = 0 |
1304 |
+ sep = os.path.sep |
1305 |
+ for ppath in self.protect: |
1306 |
+ if len(ppath) > masked and obj.startswith(ppath): |
1307 |
+ if ppath in self._dirs: |
1308 |
+ if obj != ppath and not obj.startswith(ppath + sep): |
1309 |
+ # /etc/foo does not match /etc/foobaz |
1310 |
+ continue |
1311 |
+ elif obj != ppath: |
1312 |
+ # force exact match when CONFIG_PROTECT lists a |
1313 |
+ # non-directory |
1314 |
+ continue |
1315 |
+ protected = len(ppath) |
1316 |
+ #config file management |
1317 |
+ for pmpath in self.protectmask: |
1318 |
+ if len(pmpath) >= protected and obj.startswith(pmpath): |
1319 |
+ if pmpath in self._dirs: |
1320 |
+ if obj != pmpath and \ |
1321 |
+ not obj.startswith(pmpath + sep): |
1322 |
+ # /etc/foo does not match /etc/foobaz |
1323 |
+ continue |
1324 |
+ elif obj != pmpath: |
1325 |
+ # force exact match when CONFIG_PROTECT_MASK lists |
1326 |
+ # a non-directory |
1327 |
+ continue |
1328 |
+ #skip, it's in the mask |
1329 |
+ masked = len(pmpath) |
1330 |
+ return protected > masked |
1331 |
+ |
1332 |
+def new_protect_filename(mydest, newmd5=None): |
1333 |
+ """Resolves a config-protect filename for merging, optionally |
1334 |
+ using the last filename if the md5 matches. |
1335 |
+ (dest,md5) ==> 'string' --- path_to_target_filename |
1336 |
+ (dest) ==> ('next', 'highest') --- next_target and most-recent_target |
1337 |
+ """ |
1338 |
+ |
1339 |
+ # config protection filename format: |
1340 |
+ # ._cfg0000_foo |
1341 |
+ # 0123456789012 |
1342 |
+ |
1343 |
+ os = _os_merge |
1344 |
+ |
1345 |
+ prot_num = -1 |
1346 |
+ last_pfile = "" |
1347 |
+ |
1348 |
+ if not os.path.exists(mydest): |
1349 |
+ return mydest |
1350 |
+ |
1351 |
+ real_filename = os.path.basename(mydest) |
1352 |
+ real_dirname = os.path.dirname(mydest) |
1353 |
+ for pfile in os.listdir(real_dirname): |
1354 |
+ if pfile[0:5] != "._cfg": |
1355 |
+ continue |
1356 |
+ if pfile[10:] != real_filename: |
1357 |
+ continue |
1358 |
+ try: |
1359 |
+ new_prot_num = int(pfile[5:9]) |
1360 |
+ if new_prot_num > prot_num: |
1361 |
+ prot_num = new_prot_num |
1362 |
+ last_pfile = pfile |
1363 |
+ except ValueError: |
1364 |
+ continue |
1365 |
+ prot_num = prot_num + 1 |
1366 |
+ |
1367 |
+ new_pfile = normalize_path(os.path.join(real_dirname, |
1368 |
+ "._cfg" + str(prot_num).zfill(4) + "_" + real_filename)) |
1369 |
+ old_pfile = normalize_path(os.path.join(real_dirname, last_pfile)) |
1370 |
+ if last_pfile and newmd5: |
1371 |
+ import portage.checksum |
1372 |
+ try: |
1373 |
+ last_pfile_md5 = portage.checksum._perform_md5_merge(old_pfile) |
1374 |
+ except FileNotFound: |
1375 |
+ # The file suddenly disappeared or it's a broken symlink. |
1376 |
+ pass |
1377 |
+ else: |
1378 |
+ if last_pfile_md5 == newmd5: |
1379 |
+ return old_pfile |
1380 |
+ return new_pfile |
1381 |
+ |
1382 |
+def find_updated_config_files(target_root, config_protect): |
1383 |
+ """ |
1384 |
+ Return a tuple of configuration files that needs to be updated. |
1385 |
+ The tuple contains lists organized like this: |
1386 |
+ [ protected_dir, file_list ] |
1387 |
+ If the protected config isn't a protected_dir but a procted_file, list is: |
1388 |
+ [ protected_file, None ] |
1389 |
+ If no configuration files needs to be updated, None is returned |
1390 |
+ """ |
1391 |
+ |
1392 |
+ os = _os_merge |
1393 |
+ |
1394 |
+ if config_protect: |
1395 |
+ # directories with some protect files in them |
1396 |
+ for x in config_protect: |
1397 |
+ files = [] |
1398 |
+ |
1399 |
+ x = os.path.join(target_root, x.lstrip(os.path.sep)) |
1400 |
+ if not os.access(x, os.W_OK): |
1401 |
+ continue |
1402 |
+ try: |
1403 |
+ mymode = os.lstat(x).st_mode |
1404 |
+ except OSError: |
1405 |
+ continue |
1406 |
+ |
1407 |
+ if stat.S_ISLNK(mymode): |
1408 |
+ # We want to treat it like a directory if it |
1409 |
+ # is a symlink to an existing directory. |
1410 |
+ try: |
1411 |
+ real_mode = os.stat(x).st_mode |
1412 |
+ if stat.S_ISDIR(real_mode): |
1413 |
+ mymode = real_mode |
1414 |
+ except OSError: |
1415 |
+ pass |
1416 |
+ |
1417 |
+ if stat.S_ISDIR(mymode): |
1418 |
+ mycommand = \ |
1419 |
+ "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x |
1420 |
+ else: |
1421 |
+ mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \ |
1422 |
+ os.path.split(x.rstrip(os.path.sep)) |
1423 |
+ mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0" |
1424 |
+ a = subprocess_getstatusoutput(mycommand) |
1425 |
+ |
1426 |
+ if a[0] == 0: |
1427 |
+ files = a[1].split('\0') |
1428 |
+ # split always produces an empty string as the last element |
1429 |
+ if files and not files[-1]: |
1430 |
+ del files[-1] |
1431 |
+ if files: |
1432 |
+ if stat.S_ISDIR(mymode): |
1433 |
+ yield (x, files) |
1434 |
+ else: |
1435 |
+ yield (x, None) |
1436 |
+ |
1437 |
+def getlibpaths(root): |
1438 |
+ """ Return a list of paths that are used for library lookups """ |
1439 |
+ |
1440 |
+ # the following is based on the information from ld.so(8) |
1441 |
+ rval = os.environ.get("LD_LIBRARY_PATH", "").split(":") |
1442 |
+ rval.extend(grabfile(os.path.join(root, "etc", "ld.so.conf"))) |
1443 |
+ rval.append("/usr/lib") |
1444 |
+ rval.append("/lib") |
1445 |
+ |
1446 |
+ return [normalize_path(x) for x in rval if x] |
1447 |
|
1448 |
Deleted: main/trunk/pym/portage/util.py |
1449 |
=================================================================== |
1450 |
--- main/trunk/pym/portage/util.py 2010-02-22 02:39:16 UTC (rev 15421) |
1451 |
+++ main/trunk/pym/portage/util.py 2010-02-22 02:39:48 UTC (rev 15422) |
1452 |
@@ -1,1429 +0,0 @@ |
1453 |
-# Copyright 2004-2009 Gentoo Foundation |
1454 |
-# Distributed under the terms of the GNU General Public License v2 |
1455 |
-# $Id$ |
1456 |
- |
1457 |
-__all__ = ['apply_permissions', 'apply_recursive_permissions', |
1458 |
- 'apply_secpass_permissions', 'apply_stat_permissions', 'atomic_ofstream', |
1459 |
- 'cmp_sort_key', 'ConfigProtect', 'dump_traceback', 'ensure_dirs', |
1460 |
- 'find_updated_config_files', 'getconfig', 'getlibpaths', 'grabdict', |
1461 |
- 'grabdict_package', 'grabfile', 'grabfile_package', 'grablines', |
1462 |
- 'initialize_logger', 'LazyItemsDict', 'map_dictlist_vals', |
1463 |
- 'new_protect_filename', 'normalize_path', 'pickle_read', 'stack_dictlist', |
1464 |
- 'stack_dicts', 'stack_lists', 'unique_array', 'varexpand', 'write_atomic', |
1465 |
- 'writedict', 'writemsg', 'writemsg_level', 'writemsg_stdout'] |
1466 |
- |
1467 |
-try: |
1468 |
- from subprocess import getstatusoutput as subprocess_getstatusoutput |
1469 |
-except ImportError: |
1470 |
- from commands import getstatusoutput as subprocess_getstatusoutput |
1471 |
-import codecs |
1472 |
-import errno |
1473 |
-import logging |
1474 |
-import re |
1475 |
-import shlex |
1476 |
-import stat |
1477 |
-import string |
1478 |
-import sys |
1479 |
- |
1480 |
-import portage |
1481 |
-from portage import StringIO |
1482 |
-from portage import os |
1483 |
-from portage import _encodings |
1484 |
-from portage import _os_merge |
1485 |
-from portage import _unicode_encode |
1486 |
-from portage import _unicode_decode |
1487 |
-from portage.exception import InvalidAtom, PortageException, FileNotFound, \ |
1488 |
- OperationNotPermitted, PermissionDenied, ReadOnlyFileSystem |
1489 |
-from portage.dep import Atom, isvalidatom |
1490 |
-from portage.localization import _ |
1491 |
-from portage.proxy.objectproxy import ObjectProxy |
1492 |
-from portage.cache.mappings import UserDict |
1493 |
- |
1494 |
-try: |
1495 |
- import cPickle as pickle |
1496 |
-except ImportError: |
1497 |
- import pickle |
1498 |
- |
1499 |
-noiselimit = 0 |
1500 |
- |
1501 |
-def initialize_logger(level=logging.WARN): |
1502 |
- """Sets up basic logging of portage activities |
1503 |
- Args: |
1504 |
- level: the level to emit messages at ('info', 'debug', 'warning' ...) |
1505 |
- Returns: |
1506 |
- None |
1507 |
- """ |
1508 |
- logging.basicConfig(level=logging.WARN, format='[%(levelname)-4s] %(message)s') |
1509 |
- |
1510 |
-def writemsg(mystr,noiselevel=0,fd=None): |
1511 |
- """Prints out warning and debug messages based on the noiselimit setting""" |
1512 |
- global noiselimit |
1513 |
- if fd is None: |
1514 |
- fd = sys.stderr |
1515 |
- if noiselevel <= noiselimit: |
1516 |
- # avoid potential UnicodeEncodeError |
1517 |
- mystr = _unicode_encode(mystr, |
1518 |
- encoding=_encodings['stdio'], errors='backslashreplace') |
1519 |
- if sys.hexversion >= 0x3000000: |
1520 |
- fd = fd.buffer |
1521 |
- fd.write(mystr) |
1522 |
- fd.flush() |
1523 |
- |
1524 |
-def writemsg_stdout(mystr,noiselevel=0): |
1525 |
- """Prints messages stdout based on the noiselimit setting""" |
1526 |
- writemsg(mystr, noiselevel=noiselevel, fd=sys.stdout) |
1527 |
- |
1528 |
-def writemsg_level(msg, level=0, noiselevel=0): |
1529 |
- """ |
1530 |
- Show a message for the given level as defined by the logging module |
1531 |
- (default is 0). When level >= logging.WARNING then the message is |
1532 |
- sent to stderr, otherwise it is sent to stdout. The noiselevel is |
1533 |
- passed directly to writemsg(). |
1534 |
- |
1535 |
- @type msg: str |
1536 |
- @param msg: a message string, including newline if appropriate |
1537 |
- @type level: int |
1538 |
- @param level: a numeric logging level (see the logging module) |
1539 |
- @type noiselevel: int |
1540 |
- @param noiselevel: passed directly to writemsg |
1541 |
- """ |
1542 |
- if level >= logging.WARNING: |
1543 |
- fd = sys.stderr |
1544 |
- else: |
1545 |
- fd = sys.stdout |
1546 |
- writemsg(msg, noiselevel=noiselevel, fd=fd) |
1547 |
- |
1548 |
-def normalize_path(mypath): |
1549 |
- """ |
1550 |
- os.path.normpath("//foo") returns "//foo" instead of "/foo" |
1551 |
- We dislike this behavior so we create our own normpath func |
1552 |
- to fix it. |
1553 |
- """ |
1554 |
- if sys.hexversion >= 0x3000000 and isinstance(mypath, bytes): |
1555 |
- path_sep = os.path.sep.encode() |
1556 |
- else: |
1557 |
- path_sep = os.path.sep |
1558 |
- |
1559 |
- if mypath.startswith(path_sep): |
1560 |
- # posixpath.normpath collapses 3 or more leading slashes to just 1. |
1561 |
- return os.path.normpath(2*path_sep + mypath) |
1562 |
- else: |
1563 |
- return os.path.normpath(mypath) |
1564 |
- |
1565 |
-def grabfile(myfilename, compat_level=0, recursive=0): |
1566 |
- """This function grabs the lines in a file, normalizes whitespace and returns lines in a list; if a line |
1567 |
- begins with a #, it is ignored, as are empty lines""" |
1568 |
- |
1569 |
- mylines=grablines(myfilename, recursive) |
1570 |
- newlines=[] |
1571 |
- for x in mylines: |
1572 |
- #the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line |
1573 |
- #into single spaces. |
1574 |
- myline = _unicode_decode(' ').join(x.split()) |
1575 |
- if not len(myline): |
1576 |
- continue |
1577 |
- if myline[0]=="#": |
1578 |
- # Check if we have a compat-level string. BC-integration data. |
1579 |
- # '##COMPAT==>N<==' 'some string attached to it' |
1580 |
- mylinetest = myline.split("<==",1) |
1581 |
- if len(mylinetest) == 2: |
1582 |
- myline_potential = mylinetest[1] |
1583 |
- mylinetest = mylinetest[0].split("##COMPAT==>") |
1584 |
- if len(mylinetest) == 2: |
1585 |
- if compat_level >= int(mylinetest[1]): |
1586 |
- # It's a compat line, and the key matches. |
1587 |
- newlines.append(myline_potential) |
1588 |
- continue |
1589 |
- else: |
1590 |
- continue |
1591 |
- newlines.append(myline) |
1592 |
- return newlines |
1593 |
- |
1594 |
-def map_dictlist_vals(func,myDict): |
1595 |
- """Performs a function on each value of each key in a dictlist. |
1596 |
- Returns a new dictlist.""" |
1597 |
- new_dl = {} |
1598 |
- for key in myDict: |
1599 |
- new_dl[key] = [] |
1600 |
- new_dl[key] = [func(x) for x in myDict[key]] |
1601 |
- return new_dl |
1602 |
- |
1603 |
-def stack_dictlist(original_dicts, incremental=0, incrementals=[], ignore_none=0): |
1604 |
- """ |
1605 |
- Stacks an array of dict-types into one array. Optionally merging or |
1606 |
- overwriting matching key/value pairs for the dict[key]->list. |
1607 |
- Returns a single dict. Higher index in lists is preferenced. |
1608 |
- |
1609 |
- Example usage: |
1610 |
- >>> from portage.util import stack_dictlist |
1611 |
- >>> print stack_dictlist( [{'a':'b'},{'x':'y'}]) |
1612 |
- >>> {'a':'b','x':'y'} |
1613 |
- >>> print stack_dictlist( [{'a':'b'},{'a':'c'}], incremental = True ) |
1614 |
- >>> {'a':['b','c'] } |
1615 |
- >>> a = {'KEYWORDS':['x86','alpha']} |
1616 |
- >>> b = {'KEYWORDS':['-x86']} |
1617 |
- >>> print stack_dictlist( [a,b] ) |
1618 |
- >>> { 'KEYWORDS':['x86','alpha','-x86']} |
1619 |
- >>> print stack_dictlist( [a,b], incremental=True) |
1620 |
- >>> { 'KEYWORDS':['alpha'] } |
1621 |
- >>> print stack_dictlist( [a,b], incrementals=['KEYWORDS']) |
1622 |
- >>> { 'KEYWORDS':['alpha'] } |
1623 |
- |
1624 |
- @param original_dicts a list of (dictionary objects or None) |
1625 |
- @type list |
1626 |
- @param incremental True or false depending on whether new keys should overwrite |
1627 |
- keys which already exist. |
1628 |
- @type boolean |
1629 |
- @param incrementals A list of items that should be incremental (-foo removes foo from |
1630 |
- the returned dict). |
1631 |
- @type list |
1632 |
- @param ignore_none Appears to be ignored, but probably was used long long ago. |
1633 |
- @type boolean |
1634 |
- |
1635 |
- """ |
1636 |
- final_dict = {} |
1637 |
- for mydict in original_dicts: |
1638 |
- if mydict is None: |
1639 |
- continue |
1640 |
- for y in mydict: |
1641 |
- if not y in final_dict: |
1642 |
- final_dict[y] = [] |
1643 |
- |
1644 |
- for thing in mydict[y]: |
1645 |
- if thing: |
1646 |
- if incremental or y in incrementals: |
1647 |
- if thing == "-*": |
1648 |
- final_dict[y] = [] |
1649 |
- continue |
1650 |
- elif thing[:1] == '-': |
1651 |
- try: |
1652 |
- final_dict[y].remove(thing[1:]) |
1653 |
- except ValueError: |
1654 |
- pass |
1655 |
- continue |
1656 |
- if thing not in final_dict[y]: |
1657 |
- final_dict[y].append(thing) |
1658 |
- if y in final_dict and not final_dict[y]: |
1659 |
- del final_dict[y] |
1660 |
- return final_dict |
1661 |
- |
1662 |
-def stack_dicts(dicts, incremental=0, incrementals=[], ignore_none=0): |
1663 |
- """Stacks an array of dict-types into one array. Optionally merging or |
1664 |
- overwriting matching key/value pairs for the dict[key]->string. |
1665 |
- Returns a single dict.""" |
1666 |
- final_dict = {} |
1667 |
- for mydict in dicts: |
1668 |
- if not mydict: |
1669 |
- continue |
1670 |
- for k, v in mydict.items(): |
1671 |
- if k in final_dict and (incremental or (k in incrementals)): |
1672 |
- final_dict[k] += " " + v |
1673 |
- else: |
1674 |
- final_dict[k] = v |
1675 |
- return final_dict |
1676 |
- |
1677 |
-def stack_lists(lists, incremental=1): |
1678 |
- """Stacks an array of list-types into one array. Optionally removing |
1679 |
- distinct values using '-value' notation. Higher index is preferenced. |
1680 |
- |
1681 |
- all elements must be hashable.""" |
1682 |
- |
1683 |
- new_list = {} |
1684 |
- for x in lists: |
1685 |
- for y in filter(None, x): |
1686 |
- if incremental: |
1687 |
- if y == "-*": |
1688 |
- new_list.clear() |
1689 |
- elif y[:1] == '-': |
1690 |
- new_list.pop(y[1:], None) |
1691 |
- else: |
1692 |
- new_list[y] = True |
1693 |
- else: |
1694 |
- new_list[y] = True |
1695 |
- return list(new_list) |
1696 |
- |
1697 |
-def grabdict(myfilename, juststrings=0, empty=0, recursive=0, incremental=1): |
1698 |
- """ |
1699 |
- This function grabs the lines in a file, normalizes whitespace and returns lines in a dictionary |
1700 |
- |
1701 |
- @param myfilename: file to process |
1702 |
- @type myfilename: string (path) |
1703 |
- @param juststrings: only return strings |
1704 |
- @type juststrings: Boolean (integer) |
1705 |
- @param empty: Ignore certain lines |
1706 |
- @type empty: Boolean (integer) |
1707 |
- @param recursive: Recursively grab ( support for /etc/portage/package.keywords/* and friends ) |
1708 |
- @type recursive: Boolean (integer) |
1709 |
- @param incremental: Append to the return list, don't overwrite |
1710 |
- @type incremental: Boolean (integer) |
1711 |
- @rtype: Dictionary |
1712 |
- @returns: |
1713 |
- 1. Returns the lines in a file in a dictionary, for example: |
1714 |
- 'sys-apps/portage x86 amd64 ppc' |
1715 |
- would return |
1716 |
- { "sys-apps/portage" : [ 'x86', 'amd64', 'ppc' ] |
1717 |
- the line syntax is key : [list of values] |
1718 |
- """ |
1719 |
- newdict={} |
1720 |
- for x in grablines(myfilename, recursive): |
1721 |
- #the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line |
1722 |
- #into single spaces. |
1723 |
- if x[0] == "#": |
1724 |
- continue |
1725 |
- myline=x.split() |
1726 |
- if len(myline) < 2 and empty == 0: |
1727 |
- continue |
1728 |
- if len(myline) < 1 and empty == 1: |
1729 |
- continue |
1730 |
- if incremental: |
1731 |
- newdict.setdefault(myline[0], []).extend(myline[1:]) |
1732 |
- else: |
1733 |
- newdict[myline[0]] = myline[1:] |
1734 |
- if juststrings: |
1735 |
- for k, v in newdict.items(): |
1736 |
- newdict[k] = " ".join(v) |
1737 |
- return newdict |
1738 |
- |
1739 |
-def grabdict_package(myfilename, juststrings=0, recursive=0): |
1740 |
- """ Does the same thing as grabdict except it validates keys |
1741 |
- with isvalidatom()""" |
1742 |
- pkgs=grabdict(myfilename, juststrings, empty=1, recursive=recursive) |
1743 |
- # We need to call keys() here in order to avoid the possibility of |
1744 |
- # "RuntimeError: dictionary changed size during iteration" |
1745 |
- # when an invalid atom is deleted. |
1746 |
- atoms = {} |
1747 |
- for k, v in pkgs.items(): |
1748 |
- try: |
1749 |
- k = Atom(k) |
1750 |
- except InvalidAtom: |
1751 |
- writemsg(_("--- Invalid atom in %s: %s\n") % (myfilename, k), |
1752 |
- noiselevel=-1) |
1753 |
- else: |
1754 |
- atoms[k] = v |
1755 |
- return atoms |
1756 |
- |
1757 |
-def grabfile_package(myfilename, compatlevel=0, recursive=0): |
1758 |
- pkgs=grabfile(myfilename, compatlevel, recursive=recursive) |
1759 |
- mybasename = os.path.basename(myfilename) |
1760 |
- atoms = [] |
1761 |
- for pkg in pkgs: |
1762 |
- pkg_orig = pkg |
1763 |
- # for packages and package.mask files |
1764 |
- if pkg[:1] == "-": |
1765 |
- pkg = pkg[1:] |
1766 |
- if pkg[:1] == '*' and mybasename == 'packages': |
1767 |
- pkg = pkg[1:] |
1768 |
- try: |
1769 |
- pkg = Atom(pkg) |
1770 |
- except InvalidAtom: |
1771 |
- writemsg(_("--- Invalid atom in %s: %s\n") % (myfilename, pkg), |
1772 |
- noiselevel=-1) |
1773 |
- else: |
1774 |
- if pkg_orig == str(pkg): |
1775 |
- # normal atom, so return as Atom instance |
1776 |
- atoms.append(pkg) |
1777 |
- else: |
1778 |
- # atom has special prefix, so return as string |
1779 |
- atoms.append(pkg_orig) |
1780 |
- return atoms |
1781 |
- |
1782 |
-def grablines(myfilename,recursive=0): |
1783 |
- mylines=[] |
1784 |
- if recursive and os.path.isdir(myfilename): |
1785 |
- if myfilename in ["RCS", "CVS", "SCCS"]: |
1786 |
- return mylines |
1787 |
- dirlist = os.listdir(myfilename) |
1788 |
- dirlist.sort() |
1789 |
- for f in dirlist: |
1790 |
- if not f.startswith(".") and not f.endswith("~"): |
1791 |
- mylines.extend(grablines( |
1792 |
- os.path.join(myfilename, f), recursive)) |
1793 |
- else: |
1794 |
- try: |
1795 |
- myfile = codecs.open(_unicode_encode(myfilename, |
1796 |
- encoding=_encodings['fs'], errors='strict'), |
1797 |
- mode='r', encoding=_encodings['content'], errors='replace') |
1798 |
- mylines = myfile.readlines() |
1799 |
- myfile.close() |
1800 |
- except IOError as e: |
1801 |
- if e.errno == PermissionDenied.errno: |
1802 |
- raise PermissionDenied(myfilename) |
1803 |
- pass |
1804 |
- return mylines |
1805 |
- |
1806 |
-def writedict(mydict,myfilename,writekey=True): |
1807 |
- """Writes out a dict to a file; writekey=0 mode doesn't write out |
1808 |
- the key and assumes all values are strings, not lists.""" |
1809 |
- myfile = None |
1810 |
- try: |
1811 |
- myfile = atomic_ofstream(myfilename) |
1812 |
- if not writekey: |
1813 |
- for x in mydict.values(): |
1814 |
- myfile.write(x+"\n") |
1815 |
- else: |
1816 |
- for x in mydict: |
1817 |
- myfile.write("%s %s\n" % (x, " ".join(mydict[x]))) |
1818 |
- myfile.close() |
1819 |
- except IOError: |
1820 |
- if myfile is not None: |
1821 |
- myfile.abort() |
1822 |
- return 0 |
1823 |
- return 1 |
1824 |
- |
1825 |
-def shlex_split(s): |
1826 |
- """ |
1827 |
- This is equivalent to shlex.split but it temporarily encodes unicode |
1828 |
- strings to bytes since shlex.split() doesn't handle unicode strings. |
1829 |
- """ |
1830 |
- is_unicode = sys.hexversion < 0x3000000 and isinstance(s, unicode) |
1831 |
- if is_unicode: |
1832 |
- s = _unicode_encode(s) |
1833 |
- rval = shlex.split(s) |
1834 |
- if is_unicode: |
1835 |
- rval = [_unicode_decode(x) for x in rval] |
1836 |
- return rval |
1837 |
- |
1838 |
-class _tolerant_shlex(shlex.shlex): |
1839 |
- def sourcehook(self, newfile): |
1840 |
- try: |
1841 |
- return shlex.shlex.sourcehook(self, newfile) |
1842 |
- except EnvironmentError as e: |
1843 |
- writemsg(_("!!! Parse error in '%s': source command failed: %s\n") % \ |
1844 |
- (self.infile, str(e)), noiselevel=-1) |
1845 |
- return (newfile, StringIO()) |
1846 |
- |
1847 |
-_invalid_var_name_re = re.compile(r'^\d|\W') |
1848 |
- |
1849 |
-def getconfig(mycfg, tolerant=0, allow_sourcing=False, expand=True): |
1850 |
- if isinstance(expand, dict): |
1851 |
- # Some existing variable definitions have been |
1852 |
- # passed in, for use in substitutions. |
1853 |
- expand_map = expand |
1854 |
- expand = True |
1855 |
- else: |
1856 |
- expand_map = {} |
1857 |
- mykeys = {} |
1858 |
- try: |
1859 |
- # Workaround for avoiding a silent error in shlex that |
1860 |
- # is triggered by a source statement at the end of the file without a |
1861 |
- # trailing newline after the source statement |
1862 |
- # NOTE: shex doesn't seem to support unicode objects |
1863 |
- # (produces spurious \0 characters with python-2.6.2) |
1864 |
- if sys.hexversion < 0x3000000: |
1865 |
- content = open(_unicode_encode(mycfg, |
1866 |
- encoding=_encodings['fs'], errors='strict'), 'rb').read() |
1867 |
- else: |
1868 |
- content = open(_unicode_encode(mycfg, |
1869 |
- encoding=_encodings['fs'], errors='strict'), mode='r', |
1870 |
- encoding=_encodings['content'], errors='replace').read() |
1871 |
- if content and content[-1] != '\n': |
1872 |
- content += '\n' |
1873 |
- except IOError as e: |
1874 |
- if e.errno == PermissionDenied.errno: |
1875 |
- raise PermissionDenied(mycfg) |
1876 |
- if e.errno != errno.ENOENT: |
1877 |
- writemsg("open('%s', 'r'): %s\n" % (mycfg, e), noiselevel=-1) |
1878 |
- raise |
1879 |
- return None |
1880 |
- try: |
1881 |
- if tolerant: |
1882 |
- shlex_class = _tolerant_shlex |
1883 |
- else: |
1884 |
- shlex_class = shlex.shlex |
1885 |
- # The default shlex.sourcehook() implementation |
1886 |
- # only joins relative paths when the infile |
1887 |
- # attribute is properly set. |
1888 |
- lex = shlex_class(content, infile=mycfg, posix=True) |
1889 |
- lex.wordchars = string.digits + string.ascii_letters + \ |
1890 |
- "~!@#$%*_\:;?,./-+{}" |
1891 |
- lex.quotes="\"'" |
1892 |
- if allow_sourcing: |
1893 |
- lex.source="source" |
1894 |
- while 1: |
1895 |
- key=lex.get_token() |
1896 |
- if key == "export": |
1897 |
- key = lex.get_token() |
1898 |
- if key is None: |
1899 |
- #normal end of file |
1900 |
- break; |
1901 |
- equ=lex.get_token() |
1902 |
- if (equ==''): |
1903 |
- #unexpected end of file |
1904 |
- #lex.error_leader(self.filename,lex.lineno) |
1905 |
- if not tolerant: |
1906 |
- writemsg(_("!!! Unexpected end of config file: variable %s\n") % key, |
1907 |
- noiselevel=-1) |
1908 |
- raise Exception(_("ParseError: Unexpected EOF: %s: on/before line %s") % (mycfg, lex.lineno)) |
1909 |
- else: |
1910 |
- return mykeys |
1911 |
- elif (equ!='='): |
1912 |
- #invalid token |
1913 |
- #lex.error_leader(self.filename,lex.lineno) |
1914 |
- if not tolerant: |
1915 |
- raise Exception(_("ParseError: Invalid token " |
1916 |
- "'%s' (not '='): %s: line %s") % \ |
1917 |
- (equ, mycfg, lex.lineno)) |
1918 |
- else: |
1919 |
- return mykeys |
1920 |
- val=lex.get_token() |
1921 |
- if val is None: |
1922 |
- #unexpected end of file |
1923 |
- #lex.error_leader(self.filename,lex.lineno) |
1924 |
- if not tolerant: |
1925 |
- writemsg(_("!!! Unexpected end of config file: variable %s\n") % key, |
1926 |
- noiselevel=-1) |
1927 |
- raise portage.exception.CorruptionError(_("ParseError: Unexpected EOF: %s: line %s") % (mycfg, lex.lineno)) |
1928 |
- else: |
1929 |
- return mykeys |
1930 |
- key = _unicode_decode(key) |
1931 |
- val = _unicode_decode(val) |
1932 |
- |
1933 |
- if _invalid_var_name_re.search(key) is not None: |
1934 |
- if not tolerant: |
1935 |
- raise Exception(_( |
1936 |
- "ParseError: Invalid variable name '%s': line %s") % \ |
1937 |
- (key, lex.lineno - 1)) |
1938 |
- writemsg(_("!!! Invalid variable name '%s': line %s in %s\n") \ |
1939 |
- % (key, lex.lineno - 1, mycfg), noiselevel=-1) |
1940 |
- continue |
1941 |
- |
1942 |
- if expand: |
1943 |
- mykeys[key] = varexpand(val, expand_map) |
1944 |
- expand_map[key] = mykeys[key] |
1945 |
- else: |
1946 |
- mykeys[key] = val |
1947 |
- except SystemExit as e: |
1948 |
- raise |
1949 |
- except Exception as e: |
1950 |
- raise portage.exception.ParseError(str(e)+" in "+mycfg) |
1951 |
- return mykeys |
1952 |
- |
1953 |
-#cache expansions of constant strings |
1954 |
-cexpand={} |
1955 |
-def varexpand(mystring, mydict={}): |
1956 |
- newstring = cexpand.get(" "+mystring, None) |
1957 |
- if newstring is not None: |
1958 |
- return newstring |
1959 |
- |
1960 |
- """ |
1961 |
- new variable expansion code. Preserves quotes, handles \n, etc. |
1962 |
- This code is used by the configfile code, as well as others (parser) |
1963 |
- This would be a good bunch of code to port to C. |
1964 |
- """ |
1965 |
- numvars=0 |
1966 |
- mystring=" "+mystring |
1967 |
- #in single, double quotes |
1968 |
- insing=0 |
1969 |
- indoub=0 |
1970 |
- pos=1 |
1971 |
- newstring=" " |
1972 |
- while (pos<len(mystring)): |
1973 |
- if (mystring[pos]=="'") and (mystring[pos-1]!="\\"): |
1974 |
- if (indoub): |
1975 |
- newstring=newstring+"'" |
1976 |
- else: |
1977 |
- newstring += "'" # Quote removal is handled by shlex. |
1978 |
- insing=not insing |
1979 |
- pos=pos+1 |
1980 |
- continue |
1981 |
- elif (mystring[pos]=='"') and (mystring[pos-1]!="\\"): |
1982 |
- if (insing): |
1983 |
- newstring=newstring+'"' |
1984 |
- else: |
1985 |
- newstring += '"' # Quote removal is handled by shlex. |
1986 |
- indoub=not indoub |
1987 |
- pos=pos+1 |
1988 |
- continue |
1989 |
- if (not insing): |
1990 |
- #expansion time |
1991 |
- if (mystring[pos]=="\n"): |
1992 |
- #convert newlines to spaces |
1993 |
- newstring=newstring+" " |
1994 |
- pos=pos+1 |
1995 |
- elif (mystring[pos]=="\\"): |
1996 |
- #backslash expansion time |
1997 |
- if (pos+1>=len(mystring)): |
1998 |
- newstring=newstring+mystring[pos] |
1999 |
- break |
2000 |
- else: |
2001 |
- a=mystring[pos+1] |
2002 |
- pos=pos+2 |
2003 |
- if a=='a': |
2004 |
- newstring=newstring+chr(0o07) |
2005 |
- elif a=='b': |
2006 |
- newstring=newstring+chr(0o10) |
2007 |
- elif a=='e': |
2008 |
- newstring=newstring+chr(0o33) |
2009 |
- elif (a=='f') or (a=='n'): |
2010 |
- newstring=newstring+chr(0o12) |
2011 |
- elif a=='r': |
2012 |
- newstring=newstring+chr(0o15) |
2013 |
- elif a=='t': |
2014 |
- newstring=newstring+chr(0o11) |
2015 |
- elif a=='v': |
2016 |
- newstring=newstring+chr(0o13) |
2017 |
- elif a!='\n': |
2018 |
- #remove backslash only, as bash does: this takes care of \\ and \' and \" as well |
2019 |
- newstring=newstring+mystring[pos-1:pos] |
2020 |
- continue |
2021 |
- elif (mystring[pos]=="$") and (mystring[pos-1]!="\\"): |
2022 |
- pos=pos+1 |
2023 |
- if mystring[pos]=="{": |
2024 |
- pos=pos+1 |
2025 |
- braced=True |
2026 |
- else: |
2027 |
- braced=False |
2028 |
- myvstart=pos |
2029 |
- validchars=string.ascii_letters+string.digits+"_" |
2030 |
- while mystring[pos] in validchars: |
2031 |
- if (pos+1)>=len(mystring): |
2032 |
- if braced: |
2033 |
- cexpand[mystring]="" |
2034 |
- return "" |
2035 |
- else: |
2036 |
- pos=pos+1 |
2037 |
- break |
2038 |
- pos=pos+1 |
2039 |
- myvarname=mystring[myvstart:pos] |
2040 |
- if braced: |
2041 |
- if mystring[pos]!="}": |
2042 |
- cexpand[mystring]="" |
2043 |
- return "" |
2044 |
- else: |
2045 |
- pos=pos+1 |
2046 |
- if len(myvarname)==0: |
2047 |
- cexpand[mystring]="" |
2048 |
- return "" |
2049 |
- numvars=numvars+1 |
2050 |
- if myvarname in mydict: |
2051 |
- newstring=newstring+mydict[myvarname] |
2052 |
- else: |
2053 |
- newstring=newstring+mystring[pos] |
2054 |
- pos=pos+1 |
2055 |
- else: |
2056 |
- newstring=newstring+mystring[pos] |
2057 |
- pos=pos+1 |
2058 |
- if numvars==0: |
2059 |
- cexpand[mystring]=newstring[1:] |
2060 |
- return newstring[1:] |
2061 |
- |
2062 |
-# broken and removed, but can still be imported |
2063 |
-pickle_write = None |
2064 |
- |
2065 |
-def pickle_read(filename,default=None,debug=0): |
2066 |
- import os |
2067 |
- if not os.access(filename, os.R_OK): |
2068 |
- writemsg(_("pickle_read(): File not readable. '")+filename+"'\n",1) |
2069 |
- return default |
2070 |
- data = None |
2071 |
- try: |
2072 |
- myf = open(_unicode_encode(filename, |
2073 |
- encoding=_encodings['fs'], errors='strict'), 'rb') |
2074 |
- mypickle = pickle.Unpickler(myf) |
2075 |
- data = mypickle.load() |
2076 |
- myf.close() |
2077 |
- del mypickle,myf |
2078 |
- writemsg(_("pickle_read(): Loaded pickle. '")+filename+"'\n",1) |
2079 |
- except SystemExit as e: |
2080 |
- raise |
2081 |
- except Exception as e: |
2082 |
- writemsg(_("!!! Failed to load pickle: ")+str(e)+"\n",1) |
2083 |
- data = default |
2084 |
- return data |
2085 |
- |
2086 |
-def dump_traceback(msg, noiselevel=1): |
2087 |
- import sys, traceback |
2088 |
- info = sys.exc_info() |
2089 |
- if not info[2]: |
2090 |
- stack = traceback.extract_stack()[:-1] |
2091 |
- error = None |
2092 |
- else: |
2093 |
- stack = traceback.extract_tb(info[2]) |
2094 |
- error = str(info[1]) |
2095 |
- writemsg("\n====================================\n", noiselevel=noiselevel) |
2096 |
- writemsg("%s\n\n" % msg, noiselevel=noiselevel) |
2097 |
- for line in traceback.format_list(stack): |
2098 |
- writemsg(line, noiselevel=noiselevel) |
2099 |
- if error: |
2100 |
- writemsg(error+"\n", noiselevel=noiselevel) |
2101 |
- writemsg("====================================\n\n", noiselevel=noiselevel) |
2102 |
- |
2103 |
-class cmp_sort_key(object): |
2104 |
- """ |
2105 |
- In python-3.0 the list.sort() method no longer has a "cmp" keyword |
2106 |
- argument. This class acts as an adapter which converts a cmp function |
2107 |
- into one that's suitable for use as the "key" keyword argument to |
2108 |
- list.sort(), making it easier to port code for python-3.0 compatibility. |
2109 |
- It works by generating key objects which use the given cmp function to |
2110 |
- implement their __lt__ method. |
2111 |
- """ |
2112 |
- __slots__ = ("_cmp_func",) |
2113 |
- |
2114 |
- def __init__(self, cmp_func): |
2115 |
- """ |
2116 |
- @type cmp_func: callable which takes 2 positional arguments |
2117 |
- @param cmp_func: A cmp function. |
2118 |
- """ |
2119 |
- self._cmp_func = cmp_func |
2120 |
- |
2121 |
- def __call__(self, lhs): |
2122 |
- return self._cmp_key(self._cmp_func, lhs) |
2123 |
- |
2124 |
- class _cmp_key(object): |
2125 |
- __slots__ = ("_cmp_func", "_obj") |
2126 |
- |
2127 |
- def __init__(self, cmp_func, obj): |
2128 |
- self._cmp_func = cmp_func |
2129 |
- self._obj = obj |
2130 |
- |
2131 |
- def __lt__(self, other): |
2132 |
- if other.__class__ is not self.__class__: |
2133 |
- raise TypeError("Expected type %s, got %s" % \ |
2134 |
- (self.__class__, other.__class__)) |
2135 |
- return self._cmp_func(self._obj, other._obj) < 0 |
2136 |
- |
2137 |
-def unique_array(s): |
2138 |
- """lifted from python cookbook, credit: Tim Peters |
2139 |
- Return a list of the elements in s in arbitrary order, sans duplicates""" |
2140 |
- n = len(s) |
2141 |
- # assume all elements are hashable, if so, it's linear |
2142 |
- try: |
2143 |
- return list(set(s)) |
2144 |
- except TypeError: |
2145 |
- pass |
2146 |
- |
2147 |
- # so much for linear. abuse sort. |
2148 |
- try: |
2149 |
- t = list(s) |
2150 |
- t.sort() |
2151 |
- except TypeError: |
2152 |
- pass |
2153 |
- else: |
2154 |
- assert n > 0 |
2155 |
- last = t[0] |
2156 |
- lasti = i = 1 |
2157 |
- while i < n: |
2158 |
- if t[i] != last: |
2159 |
- t[lasti] = last = t[i] |
2160 |
- lasti += 1 |
2161 |
- i += 1 |
2162 |
- return t[:lasti] |
2163 |
- |
2164 |
- # blah. back to original portage.unique_array |
2165 |
- u = [] |
2166 |
- for x in s: |
2167 |
- if x not in u: |
2168 |
- u.append(x) |
2169 |
- return u |
2170 |
- |
2171 |
-def apply_permissions(filename, uid=-1, gid=-1, mode=-1, mask=-1, |
2172 |
- stat_cached=None, follow_links=True): |
2173 |
- """Apply user, group, and mode bits to a file if the existing bits do not |
2174 |
- already match. The default behavior is to force an exact match of mode |
2175 |
- bits. When mask=0 is specified, mode bits on the target file are allowed |
2176 |
- to be a superset of the mode argument (via logical OR). When mask>0, the |
2177 |
- mode bits that the target file is allowed to have are restricted via |
2178 |
- logical XOR. |
2179 |
- Returns True if the permissions were modified and False otherwise.""" |
2180 |
- |
2181 |
- modified = False |
2182 |
- |
2183 |
- if stat_cached is None: |
2184 |
- try: |
2185 |
- if follow_links: |
2186 |
- stat_cached = os.stat(filename) |
2187 |
- else: |
2188 |
- stat_cached = os.lstat(filename) |
2189 |
- except OSError as oe: |
2190 |
- func_call = "stat('%s')" % filename |
2191 |
- if oe.errno == errno.EPERM: |
2192 |
- raise OperationNotPermitted(func_call) |
2193 |
- elif oe.errno == errno.EACCES: |
2194 |
- raise PermissionDenied(func_call) |
2195 |
- elif oe.errno == errno.ENOENT: |
2196 |
- raise FileNotFound(filename) |
2197 |
- else: |
2198 |
- raise |
2199 |
- |
2200 |
- if (uid != -1 and uid != stat_cached.st_uid) or \ |
2201 |
- (gid != -1 and gid != stat_cached.st_gid): |
2202 |
- try: |
2203 |
- if follow_links: |
2204 |
- os.chown(filename, uid, gid) |
2205 |
- else: |
2206 |
- import portage.data |
2207 |
- portage.data.lchown(filename, uid, gid) |
2208 |
- modified = True |
2209 |
- except OSError as oe: |
2210 |
- func_call = "chown('%s', %i, %i)" % (filename, uid, gid) |
2211 |
- if oe.errno == errno.EPERM: |
2212 |
- raise OperationNotPermitted(func_call) |
2213 |
- elif oe.errno == errno.EACCES: |
2214 |
- raise PermissionDenied(func_call) |
2215 |
- elif oe.errno == errno.EROFS: |
2216 |
- raise ReadOnlyFileSystem(func_call) |
2217 |
- elif oe.errno == errno.ENOENT: |
2218 |
- raise FileNotFound(filename) |
2219 |
- else: |
2220 |
- raise |
2221 |
- |
2222 |
- new_mode = -1 |
2223 |
- st_mode = stat_cached.st_mode & 0o7777 # protect from unwanted bits |
2224 |
- if mask >= 0: |
2225 |
- if mode == -1: |
2226 |
- mode = 0 # Don't add any mode bits when mode is unspecified. |
2227 |
- else: |
2228 |
- mode = mode & 0o7777 |
2229 |
- if (mode & st_mode != mode) or \ |
2230 |
- ((mask ^ st_mode) & st_mode != st_mode): |
2231 |
- new_mode = mode | st_mode |
2232 |
- new_mode = (mask ^ new_mode) & new_mode |
2233 |
- elif mode != -1: |
2234 |
- mode = mode & 0o7777 # protect from unwanted bits |
2235 |
- if mode != st_mode: |
2236 |
- new_mode = mode |
2237 |
- |
2238 |
- # The chown system call may clear S_ISUID and S_ISGID |
2239 |
- # bits, so those bits are restored if necessary. |
2240 |
- if modified and new_mode == -1 and \ |
2241 |
- (st_mode & stat.S_ISUID or st_mode & stat.S_ISGID): |
2242 |
- if mode == -1: |
2243 |
- new_mode = st_mode |
2244 |
- else: |
2245 |
- mode = mode & 0o7777 |
2246 |
- if mask >= 0: |
2247 |
- new_mode = mode | st_mode |
2248 |
- new_mode = (mask ^ new_mode) & new_mode |
2249 |
- else: |
2250 |
- new_mode = mode |
2251 |
- if not (new_mode & stat.S_ISUID or new_mode & stat.S_ISGID): |
2252 |
- new_mode = -1 |
2253 |
- |
2254 |
- if not follow_links and stat.S_ISLNK(stat_cached.st_mode): |
2255 |
- # Mode doesn't matter for symlinks. |
2256 |
- new_mode = -1 |
2257 |
- |
2258 |
- if new_mode != -1: |
2259 |
- try: |
2260 |
- os.chmod(filename, new_mode) |
2261 |
- modified = True |
2262 |
- except OSError as oe: |
2263 |
- func_call = "chmod('%s', %s)" % (filename, oct(new_mode)) |
2264 |
- if oe.errno == errno.EPERM: |
2265 |
- raise OperationNotPermitted(func_call) |
2266 |
- elif oe.errno == errno.EACCES: |
2267 |
- raise PermissionDenied(func_call) |
2268 |
- elif oe.errno == errno.EROFS: |
2269 |
- raise ReadOnlyFileSystem(func_call) |
2270 |
- elif oe.errno == errno.ENOENT: |
2271 |
- raise FileNotFound(filename) |
2272 |
- raise |
2273 |
- return modified |
2274 |
- |
2275 |
-def apply_stat_permissions(filename, newstat, **kwargs): |
2276 |
- """A wrapper around apply_secpass_permissions that gets |
2277 |
- uid, gid, and mode from a stat object""" |
2278 |
- return apply_secpass_permissions(filename, uid=newstat.st_uid, gid=newstat.st_gid, |
2279 |
- mode=newstat.st_mode, **kwargs) |
2280 |
- |
2281 |
-def apply_recursive_permissions(top, uid=-1, gid=-1, |
2282 |
- dirmode=-1, dirmask=-1, filemode=-1, filemask=-1, onerror=None): |
2283 |
- """A wrapper around apply_secpass_permissions that applies permissions |
2284 |
- recursively. If optional argument onerror is specified, it should be a |
2285 |
- function; it will be called with one argument, a PortageException instance. |
2286 |
- Returns True if all permissions are applied and False if some are left |
2287 |
- unapplied.""" |
2288 |
- |
2289 |
- if onerror is None: |
2290 |
- # Default behavior is to dump errors to stderr so they won't |
2291 |
- # go unnoticed. Callers can pass in a quiet instance. |
2292 |
- def onerror(e): |
2293 |
- if isinstance(e, OperationNotPermitted): |
2294 |
- writemsg(_("Operation Not Permitted: %s\n") % str(e), |
2295 |
- noiselevel=-1) |
2296 |
- elif isinstance(e, FileNotFound): |
2297 |
- writemsg(_("File Not Found: '%s'\n") % str(e), noiselevel=-1) |
2298 |
- else: |
2299 |
- raise |
2300 |
- |
2301 |
- all_applied = True |
2302 |
- for dirpath, dirnames, filenames in os.walk(top): |
2303 |
- try: |
2304 |
- applied = apply_secpass_permissions(dirpath, |
2305 |
- uid=uid, gid=gid, mode=dirmode, mask=dirmask) |
2306 |
- if not applied: |
2307 |
- all_applied = False |
2308 |
- except PortageException as e: |
2309 |
- all_applied = False |
2310 |
- onerror(e) |
2311 |
- |
2312 |
- for name in filenames: |
2313 |
- try: |
2314 |
- applied = apply_secpass_permissions(os.path.join(dirpath, name), |
2315 |
- uid=uid, gid=gid, mode=filemode, mask=filemask) |
2316 |
- if not applied: |
2317 |
- all_applied = False |
2318 |
- except PortageException as e: |
2319 |
- # Ignore InvalidLocation exceptions such as FileNotFound |
2320 |
- # and DirectoryNotFound since sometimes things disappear, |
2321 |
- # like when adjusting permissions on DISTCC_DIR. |
2322 |
- if not isinstance(e, portage.exception.InvalidLocation): |
2323 |
- all_applied = False |
2324 |
- onerror(e) |
2325 |
- return all_applied |
2326 |
- |
2327 |
-def apply_secpass_permissions(filename, uid=-1, gid=-1, mode=-1, mask=-1, |
2328 |
- stat_cached=None, follow_links=True): |
2329 |
- """A wrapper around apply_permissions that uses secpass and simple |
2330 |
- logic to apply as much of the permissions as possible without |
2331 |
- generating an obviously avoidable permission exception. Despite |
2332 |
- attempts to avoid an exception, it's possible that one will be raised |
2333 |
- anyway, so be prepared. |
2334 |
- Returns True if all permissions are applied and False if some are left |
2335 |
- unapplied.""" |
2336 |
- |
2337 |
- if stat_cached is None: |
2338 |
- try: |
2339 |
- if follow_links: |
2340 |
- stat_cached = os.stat(filename) |
2341 |
- else: |
2342 |
- stat_cached = os.lstat(filename) |
2343 |
- except OSError as oe: |
2344 |
- func_call = "stat('%s')" % filename |
2345 |
- if oe.errno == errno.EPERM: |
2346 |
- raise OperationNotPermitted(func_call) |
2347 |
- elif oe.errno == errno.EACCES: |
2348 |
- raise PermissionDenied(func_call) |
2349 |
- elif oe.errno == errno.ENOENT: |
2350 |
- raise FileNotFound(filename) |
2351 |
- else: |
2352 |
- raise |
2353 |
- |
2354 |
- all_applied = True |
2355 |
- |
2356 |
- import portage.data # not imported globally because of circular dep |
2357 |
- if portage.data.secpass < 2: |
2358 |
- |
2359 |
- if uid != -1 and \ |
2360 |
- uid != stat_cached.st_uid: |
2361 |
- all_applied = False |
2362 |
- uid = -1 |
2363 |
- |
2364 |
- if gid != -1 and \ |
2365 |
- gid != stat_cached.st_gid and \ |
2366 |
- gid not in os.getgroups(): |
2367 |
- all_applied = False |
2368 |
- gid = -1 |
2369 |
- |
2370 |
- apply_permissions(filename, uid=uid, gid=gid, mode=mode, mask=mask, |
2371 |
- stat_cached=stat_cached, follow_links=follow_links) |
2372 |
- return all_applied |
2373 |
- |
2374 |
-class atomic_ofstream(ObjectProxy): |
2375 |
- """Write a file atomically via os.rename(). Atomic replacement prevents |
2376 |
- interprocess interference and prevents corruption of the target |
2377 |
- file when the write is interrupted (for example, when an 'out of space' |
2378 |
- error occurs).""" |
2379 |
- |
2380 |
- def __init__(self, filename, mode='w', follow_links=True, **kargs): |
2381 |
- """Opens a temporary filename.pid in the same directory as filename.""" |
2382 |
- ObjectProxy.__init__(self) |
2383 |
- object.__setattr__(self, '_aborted', False) |
2384 |
- if 'b' in mode: |
2385 |
- open_func = open |
2386 |
- else: |
2387 |
- open_func = codecs.open |
2388 |
- kargs.setdefault('encoding', _encodings['content']) |
2389 |
- kargs.setdefault('errors', 'backslashreplace') |
2390 |
- |
2391 |
- if follow_links: |
2392 |
- canonical_path = os.path.realpath(filename) |
2393 |
- object.__setattr__(self, '_real_name', canonical_path) |
2394 |
- tmp_name = "%s.%i" % (canonical_path, os.getpid()) |
2395 |
- try: |
2396 |
- object.__setattr__(self, '_file', |
2397 |
- open_func(_unicode_encode(tmp_name, |
2398 |
- encoding=_encodings['fs'], errors='strict'), |
2399 |
- mode=mode, **kargs)) |
2400 |
- return |
2401 |
- except IOError as e: |
2402 |
- if canonical_path == filename: |
2403 |
- raise |
2404 |
- writemsg(_("!!! Failed to open file: '%s'\n") % tmp_name, |
2405 |
- noiselevel=-1) |
2406 |
- writemsg("!!! %s\n" % str(e), noiselevel=-1) |
2407 |
- |
2408 |
- object.__setattr__(self, '_real_name', filename) |
2409 |
- tmp_name = "%s.%i" % (filename, os.getpid()) |
2410 |
- object.__setattr__(self, '_file', |
2411 |
- open_func(_unicode_encode(tmp_name, |
2412 |
- encoding=_encodings['fs'], errors='strict'), |
2413 |
- mode=mode, **kargs)) |
2414 |
- |
2415 |
- def _get_target(self): |
2416 |
- return object.__getattribute__(self, '_file') |
2417 |
- |
2418 |
- def __getattribute__(self, attr): |
2419 |
- if attr in ('close', 'abort', '__del__'): |
2420 |
- return object.__getattribute__(self, attr) |
2421 |
- return getattr(object.__getattribute__(self, '_file'), attr) |
2422 |
- |
2423 |
- def close(self): |
2424 |
- """Closes the temporary file, copies permissions (if possible), |
2425 |
- and performs the atomic replacement via os.rename(). If the abort() |
2426 |
- method has been called, then the temp file is closed and removed.""" |
2427 |
- f = object.__getattribute__(self, '_file') |
2428 |
- real_name = object.__getattribute__(self, '_real_name') |
2429 |
- if not f.closed: |
2430 |
- try: |
2431 |
- f.close() |
2432 |
- if not object.__getattribute__(self, '_aborted'): |
2433 |
- try: |
2434 |
- apply_stat_permissions(f.name, os.stat(real_name)) |
2435 |
- except OperationNotPermitted: |
2436 |
- pass |
2437 |
- except FileNotFound: |
2438 |
- pass |
2439 |
- except OSError as oe: # from the above os.stat call |
2440 |
- if oe.errno in (errno.ENOENT, errno.EPERM): |
2441 |
- pass |
2442 |
- else: |
2443 |
- raise |
2444 |
- os.rename(f.name, real_name) |
2445 |
- finally: |
2446 |
- # Make sure we cleanup the temp file |
2447 |
- # even if an exception is raised. |
2448 |
- try: |
2449 |
- os.unlink(f.name) |
2450 |
- except OSError as oe: |
2451 |
- pass |
2452 |
- |
2453 |
- def abort(self): |
2454 |
- """If an error occurs while writing the file, the user should |
2455 |
- call this method in order to leave the target file unchanged. |
2456 |
- This will call close() automatically.""" |
2457 |
- if not object.__getattribute__(self, '_aborted'): |
2458 |
- object.__setattr__(self, '_aborted', True) |
2459 |
- self.close() |
2460 |
- |
2461 |
- def __del__(self): |
2462 |
- """If the user does not explicitely call close(), it is |
2463 |
- assumed that an error has occurred, so we abort().""" |
2464 |
- try: |
2465 |
- f = object.__getattribute__(self, '_file') |
2466 |
- except AttributeError: |
2467 |
- pass |
2468 |
- else: |
2469 |
- if not f.closed: |
2470 |
- self.abort() |
2471 |
- # ensure destructor from the base class is called |
2472 |
- base_destructor = getattr(ObjectProxy, '__del__', None) |
2473 |
- if base_destructor is not None: |
2474 |
- base_destructor(self) |
2475 |
- |
2476 |
-def write_atomic(file_path, content, **kwargs): |
2477 |
- f = None |
2478 |
- try: |
2479 |
- f = atomic_ofstream(file_path, **kwargs) |
2480 |
- f.write(content) |
2481 |
- f.close() |
2482 |
- except (IOError, OSError) as e: |
2483 |
- if f: |
2484 |
- f.abort() |
2485 |
- func_call = "write_atomic('%s')" % file_path |
2486 |
- if e.errno == errno.EPERM: |
2487 |
- raise OperationNotPermitted(func_call) |
2488 |
- elif e.errno == errno.EACCES: |
2489 |
- raise PermissionDenied(func_call) |
2490 |
- elif e.errno == errno.EROFS: |
2491 |
- raise ReadOnlyFileSystem(func_call) |
2492 |
- elif e.errno == errno.ENOENT: |
2493 |
- raise FileNotFound(file_path) |
2494 |
- else: |
2495 |
- raise |
2496 |
- |
2497 |
-def ensure_dirs(dir_path, *args, **kwargs): |
2498 |
- """Create a directory and call apply_permissions. |
2499 |
- Returns True if a directory is created or the permissions needed to be |
2500 |
- modified, and False otherwise.""" |
2501 |
- |
2502 |
- created_dir = False |
2503 |
- |
2504 |
- try: |
2505 |
- os.makedirs(dir_path) |
2506 |
- created_dir = True |
2507 |
- except OSError as oe: |
2508 |
- func_call = "makedirs('%s')" % dir_path |
2509 |
- if oe.errno in (errno.EEXIST, errno.EISDIR): |
2510 |
- pass |
2511 |
- elif oe.errno == errno.EPERM: |
2512 |
- raise OperationNotPermitted(func_call) |
2513 |
- elif oe.errno == errno.EACCES: |
2514 |
- raise PermissionDenied(func_call) |
2515 |
- elif oe.errno == errno.EROFS: |
2516 |
- raise ReadOnlyFileSystem(func_call) |
2517 |
- else: |
2518 |
- raise |
2519 |
- perms_modified = apply_permissions(dir_path, *args, **kwargs) |
2520 |
- return created_dir or perms_modified |
2521 |
- |
2522 |
-class LazyItemsDict(UserDict): |
2523 |
- """A mapping object that behaves like a standard dict except that it allows |
2524 |
- for lazy initialization of values via callable objects. Lazy items can be |
2525 |
- overwritten and deleted just as normal items.""" |
2526 |
- |
2527 |
- __slots__ = ('lazy_items',) |
2528 |
- |
2529 |
- def __init__(self, *args, **kwargs): |
2530 |
- |
2531 |
- self.lazy_items = {} |
2532 |
- UserDict.__init__(self, *args, **kwargs) |
2533 |
- |
2534 |
- def addLazyItem(self, item_key, value_callable, *pargs, **kwargs): |
2535 |
- """Add a lazy item for the given key. When the item is requested, |
2536 |
- value_callable will be called with *pargs and **kwargs arguments.""" |
2537 |
- self.lazy_items[item_key] = \ |
2538 |
- self._LazyItem(value_callable, pargs, kwargs, False) |
2539 |
- # make it show up in self.keys(), etc... |
2540 |
- UserDict.__setitem__(self, item_key, None) |
2541 |
- |
2542 |
- def addLazySingleton(self, item_key, value_callable, *pargs, **kwargs): |
2543 |
- """This is like addLazyItem except value_callable will only be called |
2544 |
- a maximum of 1 time and the result will be cached for future requests.""" |
2545 |
- self.lazy_items[item_key] = \ |
2546 |
- self._LazyItem(value_callable, pargs, kwargs, True) |
2547 |
- # make it show up in self.keys(), etc... |
2548 |
- UserDict.__setitem__(self, item_key, None) |
2549 |
- |
2550 |
- def update(self, *args, **kwargs): |
2551 |
- if len(args) > 1: |
2552 |
- raise TypeError( |
2553 |
- "expected at most 1 positional argument, got " + \ |
2554 |
- repr(len(args))) |
2555 |
- if args: |
2556 |
- map_obj = args[0] |
2557 |
- else: |
2558 |
- map_obj = None |
2559 |
- if map_obj is None: |
2560 |
- pass |
2561 |
- elif isinstance(map_obj, LazyItemsDict): |
2562 |
- for k in map_obj: |
2563 |
- if k in map_obj.lazy_items: |
2564 |
- UserDict.__setitem__(self, k, None) |
2565 |
- else: |
2566 |
- UserDict.__setitem__(self, k, map_obj[k]) |
2567 |
- self.lazy_items.update(map_obj.lazy_items) |
2568 |
- else: |
2569 |
- UserDict.update(self, map_obj) |
2570 |
- if kwargs: |
2571 |
- UserDict.update(self, kwargs) |
2572 |
- |
2573 |
- def __getitem__(self, item_key): |
2574 |
- if item_key in self.lazy_items: |
2575 |
- lazy_item = self.lazy_items[item_key] |
2576 |
- pargs = lazy_item.pargs |
2577 |
- if pargs is None: |
2578 |
- pargs = () |
2579 |
- kwargs = lazy_item.kwargs |
2580 |
- if kwargs is None: |
2581 |
- kwargs = {} |
2582 |
- result = lazy_item.func(*pargs, **kwargs) |
2583 |
- if lazy_item.singleton: |
2584 |
- self[item_key] = result |
2585 |
- return result |
2586 |
- |
2587 |
- else: |
2588 |
- return UserDict.__getitem__(self, item_key) |
2589 |
- |
2590 |
- def __setitem__(self, item_key, value): |
2591 |
- if item_key in self.lazy_items: |
2592 |
- del self.lazy_items[item_key] |
2593 |
- UserDict.__setitem__(self, item_key, value) |
2594 |
- |
2595 |
- def __delitem__(self, item_key): |
2596 |
- if item_key in self.lazy_items: |
2597 |
- del self.lazy_items[item_key] |
2598 |
- UserDict.__delitem__(self, item_key) |
2599 |
- |
2600 |
- def clear(self): |
2601 |
- self.lazy_items.clear() |
2602 |
- UserDict.clear(self) |
2603 |
- |
2604 |
- def copy(self): |
2605 |
- return self.__copy__() |
2606 |
- |
2607 |
- def __copy__(self): |
2608 |
- return self.__class__(self) |
2609 |
- |
2610 |
- def __deepcopy__(self, memo=None): |
2611 |
- """ |
2612 |
- WARNING: If any of the lazy items contains a bound method then it's |
2613 |
- typical for deepcopy() to raise an exception like this: |
2614 |
- |
2615 |
- File "/usr/lib/python2.5/copy.py", line 189, in deepcopy |
2616 |
- y = _reconstruct(x, rv, 1, memo) |
2617 |
- File "/usr/lib/python2.5/copy.py", line 322, in _reconstruct |
2618 |
- y = callable(*args) |
2619 |
- File "/usr/lib/python2.5/copy_reg.py", line 92, in __newobj__ |
2620 |
- return cls.__new__(cls, *args) |
2621 |
- TypeError: instancemethod expected at least 2 arguments, got 0 |
2622 |
- |
2623 |
- If deepcopy() needs to work, this problem can be avoided by |
2624 |
- implementing lazy items with normal (non-bound) functions. |
2625 |
- |
2626 |
- If deepcopy() raises a TypeError for a lazy item that has been added |
2627 |
- via a call to addLazySingleton(), the singleton will be automatically |
2628 |
- evaluated and deepcopy() will instead be called on the result. |
2629 |
- """ |
2630 |
- if memo is None: |
2631 |
- memo = {} |
2632 |
- from copy import deepcopy |
2633 |
- result = self.__class__() |
2634 |
- memo[id(self)] = result |
2635 |
- for k in self: |
2636 |
- k_copy = deepcopy(k, memo) |
2637 |
- if k in self.lazy_items: |
2638 |
- lazy_item = self.lazy_items[k] |
2639 |
- try: |
2640 |
- result.lazy_items[k_copy] = deepcopy(lazy_item, memo) |
2641 |
- except TypeError: |
2642 |
- if not lazy_item.singleton: |
2643 |
- raise |
2644 |
- UserDict.__setitem__(result, |
2645 |
- k_copy, deepcopy(self[k], memo)) |
2646 |
- else: |
2647 |
- UserDict.__setitem__(result, k_copy, None) |
2648 |
- else: |
2649 |
- UserDict.__setitem__(result, k_copy, deepcopy(self[k], memo)) |
2650 |
- return result |
2651 |
- |
2652 |
- class _LazyItem(object): |
2653 |
- |
2654 |
- __slots__ = ('func', 'pargs', 'kwargs', 'singleton') |
2655 |
- |
2656 |
- def __init__(self, func, pargs, kwargs, singleton): |
2657 |
- |
2658 |
- if not pargs: |
2659 |
- pargs = None |
2660 |
- if not kwargs: |
2661 |
- kwargs = None |
2662 |
- |
2663 |
- self.func = func |
2664 |
- self.pargs = pargs |
2665 |
- self.kwargs = kwargs |
2666 |
- self.singleton = singleton |
2667 |
- |
2668 |
- def __copy__(self): |
2669 |
- return self.__class__(self.func, self.pargs, |
2670 |
- self.kwargs, self.singleton) |
2671 |
- |
2672 |
- def __deepcopy__(self, memo=None): |
2673 |
- """ |
2674 |
- Override this since the default implementation can fail silently, |
2675 |
- leaving some attributes unset. |
2676 |
- """ |
2677 |
- if memo is None: |
2678 |
- memo = {} |
2679 |
- from copy import deepcopy |
2680 |
- result = self.__copy__() |
2681 |
- memo[id(self)] = result |
2682 |
- result.func = deepcopy(self.func, memo) |
2683 |
- result.pargs = deepcopy(self.pargs, memo) |
2684 |
- result.kwargs = deepcopy(self.kwargs, memo) |
2685 |
- result.singleton = deepcopy(self.singleton, memo) |
2686 |
- return result |
2687 |
- |
2688 |
-class ConfigProtect(object): |
2689 |
- def __init__(self, myroot, protect_list, mask_list): |
2690 |
- self.myroot = myroot |
2691 |
- self.protect_list = protect_list |
2692 |
- self.mask_list = mask_list |
2693 |
- self.updateprotect() |
2694 |
- |
2695 |
- def updateprotect(self): |
2696 |
- """Update internal state for isprotected() calls. Nonexistent paths |
2697 |
- are ignored.""" |
2698 |
- |
2699 |
- os = _os_merge |
2700 |
- |
2701 |
- self.protect = [] |
2702 |
- self._dirs = set() |
2703 |
- for x in self.protect_list: |
2704 |
- ppath = normalize_path( |
2705 |
- os.path.join(self.myroot, x.lstrip(os.path.sep))) |
2706 |
- mystat = None |
2707 |
- try: |
2708 |
- if stat.S_ISDIR(os.stat(ppath).st_mode): |
2709 |
- self._dirs.add(ppath) |
2710 |
- self.protect.append(ppath) |
2711 |
- except OSError: |
2712 |
- # If it doesn't exist, there's no need to protect it. |
2713 |
- pass |
2714 |
- |
2715 |
- self.protectmask = [] |
2716 |
- for x in self.mask_list: |
2717 |
- ppath = normalize_path( |
2718 |
- os.path.join(self.myroot, x.lstrip(os.path.sep))) |
2719 |
- mystat = None |
2720 |
- try: |
2721 |
- """Use lstat so that anything, even a broken symlink can be |
2722 |
- protected.""" |
2723 |
- if stat.S_ISDIR(os.lstat(ppath).st_mode): |
2724 |
- self._dirs.add(ppath) |
2725 |
- self.protectmask.append(ppath) |
2726 |
- """Now use stat in case this is a symlink to a directory.""" |
2727 |
- if stat.S_ISDIR(os.stat(ppath).st_mode): |
2728 |
- self._dirs.add(ppath) |
2729 |
- except OSError: |
2730 |
- # If it doesn't exist, there's no need to mask it. |
2731 |
- pass |
2732 |
- |
2733 |
- def isprotected(self, obj): |
2734 |
- """Returns True if obj is protected, False otherwise. The caller must |
2735 |
- ensure that obj is normalized with a single leading slash. A trailing |
2736 |
- slash is optional for directories.""" |
2737 |
- masked = 0 |
2738 |
- protected = 0 |
2739 |
- sep = os.path.sep |
2740 |
- for ppath in self.protect: |
2741 |
- if len(ppath) > masked and obj.startswith(ppath): |
2742 |
- if ppath in self._dirs: |
2743 |
- if obj != ppath and not obj.startswith(ppath + sep): |
2744 |
- # /etc/foo does not match /etc/foobaz |
2745 |
- continue |
2746 |
- elif obj != ppath: |
2747 |
- # force exact match when CONFIG_PROTECT lists a |
2748 |
- # non-directory |
2749 |
- continue |
2750 |
- protected = len(ppath) |
2751 |
- #config file management |
2752 |
- for pmpath in self.protectmask: |
2753 |
- if len(pmpath) >= protected and obj.startswith(pmpath): |
2754 |
- if pmpath in self._dirs: |
2755 |
- if obj != pmpath and \ |
2756 |
- not obj.startswith(pmpath + sep): |
2757 |
- # /etc/foo does not match /etc/foobaz |
2758 |
- continue |
2759 |
- elif obj != pmpath: |
2760 |
- # force exact match when CONFIG_PROTECT_MASK lists |
2761 |
- # a non-directory |
2762 |
- continue |
2763 |
- #skip, it's in the mask |
2764 |
- masked = len(pmpath) |
2765 |
- return protected > masked |
2766 |
- |
2767 |
-def new_protect_filename(mydest, newmd5=None): |
2768 |
- """Resolves a config-protect filename for merging, optionally |
2769 |
- using the last filename if the md5 matches. |
2770 |
- (dest,md5) ==> 'string' --- path_to_target_filename |
2771 |
- (dest) ==> ('next', 'highest') --- next_target and most-recent_target |
2772 |
- """ |
2773 |
- |
2774 |
- # config protection filename format: |
2775 |
- # ._cfg0000_foo |
2776 |
- # 0123456789012 |
2777 |
- |
2778 |
- os = _os_merge |
2779 |
- |
2780 |
- prot_num = -1 |
2781 |
- last_pfile = "" |
2782 |
- |
2783 |
- if not os.path.exists(mydest): |
2784 |
- return mydest |
2785 |
- |
2786 |
- real_filename = os.path.basename(mydest) |
2787 |
- real_dirname = os.path.dirname(mydest) |
2788 |
- for pfile in os.listdir(real_dirname): |
2789 |
- if pfile[0:5] != "._cfg": |
2790 |
- continue |
2791 |
- if pfile[10:] != real_filename: |
2792 |
- continue |
2793 |
- try: |
2794 |
- new_prot_num = int(pfile[5:9]) |
2795 |
- if new_prot_num > prot_num: |
2796 |
- prot_num = new_prot_num |
2797 |
- last_pfile = pfile |
2798 |
- except ValueError: |
2799 |
- continue |
2800 |
- prot_num = prot_num + 1 |
2801 |
- |
2802 |
- new_pfile = normalize_path(os.path.join(real_dirname, |
2803 |
- "._cfg" + str(prot_num).zfill(4) + "_" + real_filename)) |
2804 |
- old_pfile = normalize_path(os.path.join(real_dirname, last_pfile)) |
2805 |
- if last_pfile and newmd5: |
2806 |
- import portage.checksum |
2807 |
- try: |
2808 |
- last_pfile_md5 = portage.checksum._perform_md5_merge(old_pfile) |
2809 |
- except FileNotFound: |
2810 |
- # The file suddenly disappeared or it's a broken symlink. |
2811 |
- pass |
2812 |
- else: |
2813 |
- if last_pfile_md5 == newmd5: |
2814 |
- return old_pfile |
2815 |
- return new_pfile |
2816 |
- |
2817 |
-def find_updated_config_files(target_root, config_protect): |
2818 |
- """ |
2819 |
- Return a tuple of configuration files that needs to be updated. |
2820 |
- The tuple contains lists organized like this: |
2821 |
- [ protected_dir, file_list ] |
2822 |
- If the protected config isn't a protected_dir but a procted_file, list is: |
2823 |
- [ protected_file, None ] |
2824 |
- If no configuration files needs to be updated, None is returned |
2825 |
- """ |
2826 |
- |
2827 |
- os = _os_merge |
2828 |
- |
2829 |
- if config_protect: |
2830 |
- # directories with some protect files in them |
2831 |
- for x in config_protect: |
2832 |
- files = [] |
2833 |
- |
2834 |
- x = os.path.join(target_root, x.lstrip(os.path.sep)) |
2835 |
- if not os.access(x, os.W_OK): |
2836 |
- continue |
2837 |
- try: |
2838 |
- mymode = os.lstat(x).st_mode |
2839 |
- except OSError: |
2840 |
- continue |
2841 |
- |
2842 |
- if stat.S_ISLNK(mymode): |
2843 |
- # We want to treat it like a directory if it |
2844 |
- # is a symlink to an existing directory. |
2845 |
- try: |
2846 |
- real_mode = os.stat(x).st_mode |
2847 |
- if stat.S_ISDIR(real_mode): |
2848 |
- mymode = real_mode |
2849 |
- except OSError: |
2850 |
- pass |
2851 |
- |
2852 |
- if stat.S_ISDIR(mymode): |
2853 |
- mycommand = \ |
2854 |
- "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x |
2855 |
- else: |
2856 |
- mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \ |
2857 |
- os.path.split(x.rstrip(os.path.sep)) |
2858 |
- mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0" |
2859 |
- a = subprocess_getstatusoutput(mycommand) |
2860 |
- |
2861 |
- if a[0] == 0: |
2862 |
- files = a[1].split('\0') |
2863 |
- # split always produces an empty string as the last element |
2864 |
- if files and not files[-1]: |
2865 |
- del files[-1] |
2866 |
- if files: |
2867 |
- if stat.S_ISDIR(mymode): |
2868 |
- yield (x, files) |
2869 |
- else: |
2870 |
- yield (x, None) |
2871 |
- |
2872 |
-def getlibpaths(root): |
2873 |
- """ Return a list of paths that are used for library lookups """ |
2874 |
- |
2875 |
- # the following is based on the information from ld.so(8) |
2876 |
- rval = os.environ.get("LD_LIBRARY_PATH", "").split(":") |
2877 |
- rval.extend(grabfile(os.path.join(root, "etc", "ld.so.conf"))) |
2878 |
- rval.append("/usr/lib") |
2879 |
- rval.append("/lib") |
2880 |
- |
2881 |
- return [normalize_path(x) for x in rval if x] |