Gentoo Archives: gentoo-commits

From: "Fabian Groffen (grobian)" <grobian@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] portage r15434 - in main/branches/prefix: . man pym/_emerge pym/portage pym/portage/dbapi pym/portage/package/ebuild pym/portage/util
Date: Mon, 22 Feb 2010 13:26:36
Message-Id: E1NjYIn-00038C-0I@stork.gentoo.org
1 Author: grobian
2 Date: 2010-02-22 13:26:20 +0000 (Mon, 22 Feb 2010)
3 New Revision: 15434
4
5 Added:
6 main/branches/prefix/pym/portage/package/
7 main/branches/prefix/pym/portage/util/__init__.py
8 main/branches/prefix/pym/portage/util/digraph.py
9 Removed:
10 main/branches/prefix/pym/portage/util.py
11 Modified:
12 main/branches/prefix/RELEASE-NOTES
13 main/branches/prefix/man/emerge.1
14 main/branches/prefix/pym/_emerge/BinpkgVerifier.py
15 main/branches/prefix/pym/_emerge/create_depgraph_params.py
16 main/branches/prefix/pym/_emerge/help.py
17 main/branches/prefix/pym/portage/__init__.py
18 main/branches/prefix/pym/portage/dbapi/bintree.py
19 main/branches/prefix/pym/portage/dbapi/porttree.py
20 main/branches/prefix/pym/portage/package/ebuild/config.py
21 Log:
22 Merged from trunk -r15421:15429
23
24 | 15422 | Move the portage.util module into a directory, for |
25 | zmedico | splitting into smaller files. |
26
27 | 15423 | Move portage.digraph class to portage.util.digraph.digraph. |
28 | zmedico | |
29
30 | 15424 | Move portage.config class to |
31 | zmedico | portage.package.ebuild.config.config. |
32
33 | 15425 | Move portage.fetch() to |
34 | zmedico | portage.package.ebuild.fetch.fetch(). |
35
36 | 15426 | Use lazy import for portage.Manifest. |
37 | zmedico | |
38
39 | 15427 | Enable --rebuilt-binaries automatically only when in |
40 | zmedico | --usepkgonly or --getbinpkgonly mode, since --usepkgonly |
41 | | behaves better in cases when the portage tree is not |
42 | | exactly the same revision that was used to build the |
43 | | packages. |
44
45 | 15428 | Fix "NameError: global name 'basestring' is not defined" |
46 | arfrever | with Python 3. |
47
48 | 15429 | Fix "AttributeError: 'module' object has no attribute |
49 | arfrever | 'mappings'". Also fix some typos. |
50
51
52 Modified: main/branches/prefix/RELEASE-NOTES
53 ===================================================================
54 --- main/branches/prefix/RELEASE-NOTES 2010-02-22 13:02:22 UTC (rev 15433)
55 +++ main/branches/prefix/RELEASE-NOTES 2010-02-22 13:26:20 UTC (rev 15434)
56 @@ -24,7 +24,8 @@
57 * The new --rebuilt-binaries option will replace installed packages with binary
58 packages that have been rebuilt. Rebuilds are detected by comparison of
59 BUILD_TIME package metadata. This option is enabled automatically when using
60 - binary packages (--usepkg or --getbinpkg) together with --update and --deep.
61 + binary packages (--usepkgonly or --getbinpkgonly) together with --update and
62 + --deep.
63
64 portage-2.1.7
65 ==================================
66
67 Modified: main/branches/prefix/man/emerge.1
68 ===================================================================
69 --- main/branches/prefix/man/emerge.1 2010-02-22 13:02:22 UTC (rev 15433)
70 +++ main/branches/prefix/man/emerge.1 2010-02-22 13:26:20 UTC (rev 15434)
71 @@ -482,7 +482,7 @@
72 been rebuilt. Rebuilds are detected by comparison of
73 BUILD_TIME package metadata. This option is enabled
74 automatically when using binary packages
75 -(\fB\-\-usepkg\fR or \fB\-\-getbinpkg\fR) together with
76 +(\fB\-\-usepkgonly\fR or \fB\-\-getbinpkgonly\fR) together with
77 \fB\-\-update\fR and \fB\-\-deep\fR.
78 .TP
79 .BR "\-\-reinstall changed\-use"
80
81 Modified: main/branches/prefix/pym/_emerge/BinpkgVerifier.py
82 ===================================================================
83 --- main/branches/prefix/pym/_emerge/BinpkgVerifier.py 2010-02-22 13:02:22 UTC (rev 15433)
84 +++ main/branches/prefix/pym/_emerge/BinpkgVerifier.py 2010-02-22 13:26:20 UTC (rev 15434)
85 @@ -9,6 +9,7 @@
86 from portage import os
87 from portage import _encodings
88 from portage import _unicode_encode
89 +from portage.package.ebuild.fetch import _checksum_failure_temp_file
90 import codecs
91
92 class BinpkgVerifier(AsynchronousTask):
93 @@ -77,7 +78,7 @@
94 else:
95 pkg_path = bintree.getname(pkg.cpv)
96 head, tail = os.path.split(pkg_path)
97 - temp_filename = portage._checksum_failure_temp_file(head, tail)
98 + temp_filename = _checksum_failure_temp_file(head, tail)
99 writemsg("File renamed to '%s'\n" % (temp_filename,),
100 noiselevel=-1)
101 finally:
102
103 Modified: main/branches/prefix/pym/_emerge/create_depgraph_params.py
104 ===================================================================
105 --- main/branches/prefix/pym/_emerge/create_depgraph_params.py 2010-02-22 13:02:22 UTC (rev 15433)
106 +++ main/branches/prefix/pym/_emerge/create_depgraph_params.py 2010-02-22 13:26:20 UTC (rev 15434)
107 @@ -42,7 +42,7 @@
108 rebuilt_binaries = myopts.get('--rebuilt-binaries')
109 if rebuilt_binaries is True or \
110 rebuilt_binaries != 'n' and \
111 - '--usepkg' in myopts and \
112 + '--usepkgonly' in myopts and \
113 myopts.get('--deep') is True and \
114 '--update' in myopts:
115 myparams['rebuilt_binaries'] = True
116
117 Modified: main/branches/prefix/pym/_emerge/help.py
118 ===================================================================
119 --- main/branches/prefix/pym/_emerge/help.py 2010-02-22 13:02:22 UTC (rev 15433)
120 +++ main/branches/prefix/pym/_emerge/help.py 2010-02-22 13:26:20 UTC (rev 15434)
121 @@ -511,7 +511,7 @@
122 "been rebuilt. Rebuilds are detected by comparison of " + \
123 "BUILD_TIME package metadata. This option is enabled " + \
124 "automatically when using binary packages " + \
125 - "(--usepkg or --getbinpkg) together with " + \
126 + "(--usepkgonly or --getbinpkgonly) together with " + \
127 "--update and --deep."
128 for line in wrap(desc, desc_width):
129 print(desc_indent + line)
130
131 Modified: main/branches/prefix/pym/portage/__init__.py
132 ===================================================================
133 --- main/branches/prefix/pym/portage/__init__.py 2010-02-22 13:02:22 UTC (rev 15433)
134 +++ main/branches/prefix/pym/portage/__init__.py 2010-02-22 13:26:20 UTC (rev 15434)
135 @@ -43,7 +43,6 @@
136 from StringIO import StringIO
137
138 from time import sleep
139 - from random import shuffle
140 from itertools import chain
141 import platform
142 import warnings
143 @@ -100,8 +99,12 @@
144 'portage.locks',
145 'portage.locks:lockdir,lockfile,unlockdir,unlockfile',
146 'portage.mail',
147 + 'portage.manifest:Manifest',
148 'portage.output',
149 'portage.output:bold,colorize',
150 + 'portage.package.ebuild.config:autouse,best_from_dict,' + \
151 + 'check_config_instance,config',
152 + 'portage.package.ebuild.fetch:fetch',
153 'portage.process',
154 'portage.process:atexit_register,run_exitfuncs',
155 'portage.update:dep_transform,fixdbentries,grab_updates,' + \
156 @@ -115,6 +118,7 @@
157 'pickle_read,pickle_write,stack_dictlist,stack_dicts,' + \
158 'stack_lists,unique_array,varexpand,writedict,writemsg,' + \
159 'writemsg_stdout,write_atomic',
160 + 'portage.util.digraph:digraph',
161 'portage.versions',
162 'portage.versions:best,catpkgsplit,catsplit,cpv_getkey,' + \
163 'cpv_getkey@getCPFromCPV,endversion_keys,' + \
164 @@ -310,8 +314,6 @@
165 selinux = None
166 _selinux_merge = None
167
168 -from portage.manifest import Manifest
169 -
170 # ===========================================================================
171 # END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END
172 # ===========================================================================
173 @@ -512,18 +514,6 @@
174 mod = getattr(mod, comp)
175 return mod
176
177 -def best_from_dict(key, top_dict, key_order, EmptyOnError=1, FullCopy=1, AllowEmpty=1):
178 - for x in key_order:
179 - if x in top_dict and key in top_dict[x]:
180 - if FullCopy:
181 - return copy.deepcopy(top_dict[x][key])
182 - else:
183 - return top_dict[x][key]
184 - if EmptyOnError:
185 - return ""
186 - else:
187 - raise KeyError("Key not found in list; '%s'" % key)
188 -
189 def getcwd():
190 "this fixes situations where the current directory doesn't exist"
191 try:
192 @@ -680,282 +670,6 @@
193
194 return rlist
195
196 -#beautiful directed graph object
197 -
198 -class digraph(object):
199 - def __init__(self):
200 - """Create an empty digraph"""
201 -
202 - # { node : ( { child : priority } , { parent : priority } ) }
203 - self.nodes = {}
204 - self.order = []
205 -
206 - def add(self, node, parent, priority=0):
207 - """Adds the specified node with the specified parent.
208 -
209 - If the dep is a soft-dep and the node already has a hard
210 - relationship to the parent, the relationship is left as hard."""
211 -
212 - if node not in self.nodes:
213 - self.nodes[node] = ({}, {}, node)
214 - self.order.append(node)
215 -
216 - if not parent:
217 - return
218 -
219 - if parent not in self.nodes:
220 - self.nodes[parent] = ({}, {}, parent)
221 - self.order.append(parent)
222 -
223 - priorities = self.nodes[node][1].get(parent)
224 - if priorities is None:
225 - priorities = []
226 - self.nodes[node][1][parent] = priorities
227 - self.nodes[parent][0][node] = priorities
228 - priorities.append(priority)
229 - priorities.sort()
230 -
231 - def remove(self, node):
232 - """Removes the specified node from the digraph, also removing
233 - and ties to other nodes in the digraph. Raises KeyError if the
234 - node doesn't exist."""
235 -
236 - if node not in self.nodes:
237 - raise KeyError(node)
238 -
239 - for parent in self.nodes[node][1]:
240 - del self.nodes[parent][0][node]
241 - for child in self.nodes[node][0]:
242 - del self.nodes[child][1][node]
243 -
244 - del self.nodes[node]
245 - self.order.remove(node)
246 -
247 - def difference_update(self, t):
248 - """
249 - Remove all given nodes from node_set. This is more efficient
250 - than multiple calls to the remove() method.
251 - """
252 - if isinstance(t, (list, tuple)) or \
253 - not hasattr(t, "__contains__"):
254 - t = frozenset(t)
255 - order = []
256 - for node in self.order:
257 - if node not in t:
258 - order.append(node)
259 - continue
260 - for parent in self.nodes[node][1]:
261 - del self.nodes[parent][0][node]
262 - for child in self.nodes[node][0]:
263 - del self.nodes[child][1][node]
264 - del self.nodes[node]
265 - self.order = order
266 -
267 - def remove_edge(self, child, parent):
268 - """
269 - Remove edge in the direction from child to parent. Note that it is
270 - possible for a remaining edge to exist in the opposite direction.
271 - Any endpoint vertices that become isolated will remain in the graph.
272 - """
273 -
274 - # Nothing should be modified when a KeyError is raised.
275 - for k in parent, child:
276 - if k not in self.nodes:
277 - raise KeyError(k)
278 -
279 - # Make sure the edge exists.
280 - if child not in self.nodes[parent][0]:
281 - raise KeyError(child)
282 - if parent not in self.nodes[child][1]:
283 - raise KeyError(parent)
284 -
285 - # Remove the edge.
286 - del self.nodes[child][1][parent]
287 - del self.nodes[parent][0][child]
288 -
289 - def __iter__(self):
290 - return iter(self.order)
291 -
292 - def contains(self, node):
293 - """Checks if the digraph contains mynode"""
294 - return node in self.nodes
295 -
296 - def get(self, key, default=None):
297 - node_data = self.nodes.get(key, self)
298 - if node_data is self:
299 - return default
300 - return node_data[2]
301 -
302 - def all_nodes(self):
303 - """Return a list of all nodes in the graph"""
304 - return self.order[:]
305 -
306 - def child_nodes(self, node, ignore_priority=None):
307 - """Return all children of the specified node"""
308 - if ignore_priority is None:
309 - return list(self.nodes[node][0])
310 - children = []
311 - if hasattr(ignore_priority, '__call__'):
312 - for child, priorities in self.nodes[node][0].items():
313 - for priority in priorities:
314 - if not ignore_priority(priority):
315 - children.append(child)
316 - break
317 - else:
318 - for child, priorities in self.nodes[node][0].items():
319 - if ignore_priority < priorities[-1]:
320 - children.append(child)
321 - return children
322 -
323 - def parent_nodes(self, node, ignore_priority=None):
324 - """Return all parents of the specified node"""
325 - if ignore_priority is None:
326 - return list(self.nodes[node][1])
327 - parents = []
328 - if hasattr(ignore_priority, '__call__'):
329 - for parent, priorities in self.nodes[node][1].items():
330 - for priority in priorities:
331 - if not ignore_priority(priority):
332 - parents.append(parent)
333 - break
334 - else:
335 - for parent, priorities in self.nodes[node][1].items():
336 - if ignore_priority < priorities[-1]:
337 - parents.append(parent)
338 - return parents
339 -
340 - def leaf_nodes(self, ignore_priority=None):
341 - """Return all nodes that have no children
342 -
343 - If ignore_soft_deps is True, soft deps are not counted as
344 - children in calculations."""
345 -
346 - leaf_nodes = []
347 - if ignore_priority is None:
348 - for node in self.order:
349 - if not self.nodes[node][0]:
350 - leaf_nodes.append(node)
351 - elif hasattr(ignore_priority, '__call__'):
352 - for node in self.order:
353 - is_leaf_node = True
354 - for child, priorities in self.nodes[node][0].items():
355 - for priority in priorities:
356 - if not ignore_priority(priority):
357 - is_leaf_node = False
358 - break
359 - if not is_leaf_node:
360 - break
361 - if is_leaf_node:
362 - leaf_nodes.append(node)
363 - else:
364 - for node in self.order:
365 - is_leaf_node = True
366 - for child, priorities in self.nodes[node][0].items():
367 - if ignore_priority < priorities[-1]:
368 - is_leaf_node = False
369 - break
370 - if is_leaf_node:
371 - leaf_nodes.append(node)
372 - return leaf_nodes
373 -
374 - def root_nodes(self, ignore_priority=None):
375 - """Return all nodes that have no parents.
376 -
377 - If ignore_soft_deps is True, soft deps are not counted as
378 - parents in calculations."""
379 -
380 - root_nodes = []
381 - if ignore_priority is None:
382 - for node in self.order:
383 - if not self.nodes[node][1]:
384 - root_nodes.append(node)
385 - elif hasattr(ignore_priority, '__call__'):
386 - for node in self.order:
387 - is_root_node = True
388 - for parent, priorities in self.nodes[node][1].items():
389 - for priority in priorities:
390 - if not ignore_priority(priority):
391 - is_root_node = False
392 - break
393 - if not is_root_node:
394 - break
395 - if is_root_node:
396 - root_nodes.append(node)
397 - else:
398 - for node in self.order:
399 - is_root_node = True
400 - for parent, priorities in self.nodes[node][1].items():
401 - if ignore_priority < priorities[-1]:
402 - is_root_node = False
403 - break
404 - if is_root_node:
405 - root_nodes.append(node)
406 - return root_nodes
407 -
408 - def is_empty(self):
409 - """Checks if the digraph is empty"""
410 - return len(self.nodes) == 0
411 -
412 - def clone(self):
413 - clone = digraph()
414 - clone.nodes = {}
415 - memo = {}
416 - for children, parents, node in self.nodes.values():
417 - children_clone = {}
418 - for child, priorities in children.items():
419 - priorities_clone = memo.get(id(priorities))
420 - if priorities_clone is None:
421 - priorities_clone = priorities[:]
422 - memo[id(priorities)] = priorities_clone
423 - children_clone[child] = priorities_clone
424 - parents_clone = {}
425 - for parent, priorities in parents.items():
426 - priorities_clone = memo.get(id(priorities))
427 - if priorities_clone is None:
428 - priorities_clone = priorities[:]
429 - memo[id(priorities)] = priorities_clone
430 - parents_clone[parent] = priorities_clone
431 - clone.nodes[node] = (children_clone, parents_clone, node)
432 - clone.order = self.order[:]
433 - return clone
434 -
435 - # Backward compatibility
436 - addnode = add
437 - allnodes = all_nodes
438 - allzeros = leaf_nodes
439 - hasnode = contains
440 - __contains__ = contains
441 - empty = is_empty
442 - copy = clone
443 -
444 - def delnode(self, node):
445 - try:
446 - self.remove(node)
447 - except KeyError:
448 - pass
449 -
450 - def firstzero(self):
451 - leaf_nodes = self.leaf_nodes()
452 - if leaf_nodes:
453 - return leaf_nodes[0]
454 - return None
455 -
456 - def hasallzeros(self, ignore_priority=None):
457 - return len(self.leaf_nodes(ignore_priority=ignore_priority)) == \
458 - len(self.order)
459 -
460 - def debug_print(self):
461 - def output(s):
462 - writemsg(s, noiselevel=-1)
463 - for node in self.nodes:
464 - output("%s " % (node,))
465 - if self.nodes[node][0]:
466 - output("depends on\n")
467 - else:
468 - output("(no children)\n")
469 - for child, priorities in self.nodes[node][0].items():
470 - output(" %s (%s)\n" % (child, priorities[-1],))
471 -
472 #parse /etc/env.d and generate /etc/profile.env
473
474 def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None,
475 @@ -1300,2559 +1014,9 @@
476
477 return (version,None)
478
479 -def autouse(myvartree, use_cache=1, mysettings=None):
480 - """
481 - autuse returns a list of USE variables auto-enabled to packages being installed
482
483 - @param myvartree: Instance of the vartree class (from /var/db/pkg...)
484 - @type myvartree: vartree
485 - @param use_cache: read values from cache
486 - @type use_cache: Boolean
487 - @param mysettings: Instance of config
488 - @type mysettings: config
489 - @rtype: string
490 - @returns: A string containing a list of USE variables that are enabled via use.defaults
491 - """
492 - if mysettings is None:
493 - global settings
494 - mysettings = settings
495 - if mysettings.profile_path is None:
496 - return ""
497 - myusevars=""
498 - usedefaults = mysettings.use_defs
499 - for myuse in usedefaults:
500 - dep_met = True
501 - for mydep in usedefaults[myuse]:
502 - if not myvartree.dep_match(mydep,use_cache=True):
503 - dep_met = False
504 - break
505 - if dep_met:
506 - myusevars += " "+myuse
507 - return myusevars
508
509 -def check_config_instance(test):
510 - if not isinstance(test, config):
511 - raise TypeError("Invalid type for config object: %s (should be %s)" % (test.__class__, config))
512
513 -def _lazy_iuse_regex(iuse_implicit):
514 - """
515 - The PORTAGE_IUSE value is lazily evaluated since re.escape() is slow
516 - and the value is only used when an ebuild phase needs to be executed
517 - (it's used only to generate QA notices).
518 - """
519 - # Escape anything except ".*" which is supposed to pass through from
520 - # _get_implicit_iuse().
521 - regex = sorted(re.escape(x) for x in iuse_implicit)
522 - regex = "^(%s)$" % "|".join(regex)
523 - regex = regex.replace("\\.\\*", ".*")
524 - return regex
525 -
526 -class _local_repo_config(object):
527 - __slots__ = ('aliases', 'eclass_overrides', 'masters', 'name',)
528 - def __init__(self, name, repo_opts):
529 - self.name = name
530 -
531 - aliases = repo_opts.get('aliases')
532 - if aliases is not None:
533 - aliases = tuple(aliases.split())
534 - self.aliases = aliases
535 -
536 - eclass_overrides = repo_opts.get('eclass-overrides')
537 - if eclass_overrides is not None:
538 - eclass_overrides = tuple(eclass_overrides.split())
539 - self.eclass_overrides = eclass_overrides
540 -
541 - masters = repo_opts.get('masters')
542 - if masters is not None:
543 - masters = tuple(masters.split())
544 - self.masters = masters
545 -
546 -class config(object):
547 - """
548 - This class encompasses the main portage configuration. Data is pulled from
549 - ROOT/PORTDIR/profiles/, from ROOT/etc/make.profile incrementally through all
550 - parent profiles as well as from ROOT/PORTAGE_CONFIGROOT/* for user specified
551 - overrides.
552 -
553 - Generally if you need data like USE flags, FEATURES, environment variables,
554 - virtuals ...etc you look in here.
555 - """
556 -
557 - _setcpv_aux_keys = ('DEFINED_PHASES', 'DEPEND', 'EAPI',
558 - 'INHERITED', 'IUSE', 'KEYWORDS', 'LICENSE', 'PDEPEND',
559 - 'PROPERTIES', 'PROVIDE', 'RDEPEND', 'SLOT',
560 - 'repository', 'RESTRICT', 'LICENSE',)
561 -
562 - _env_blacklist = [
563 - "A", "AA", "CATEGORY", "DEPEND", "DESCRIPTION", "EAPI",
564 - "EBUILD_PHASE", "EMERGE_FROM", "EPREFIX", "EROOT",
565 - "HOMEPAGE", "INHERITED", "IUSE",
566 - "KEYWORDS", "LICENSE", "PDEPEND", "PF", "PKGUSE",
567 - "PORTAGE_CONFIGROOT", "PORTAGE_IUSE",
568 - "PORTAGE_NONFATAL", "PORTAGE_REPO_NAME",
569 - "PORTAGE_USE", "PROPERTIES", "PROVIDE", "RDEPEND", "RESTRICT",
570 - "ROOT", "SLOT", "SRC_URI"
571 - ]
572 -
573 - _environ_whitelist = []
574 -
575 - # Whitelisted variables are always allowed to enter the ebuild
576 - # environment. Generally, this only includes special portage
577 - # variables. Ebuilds can unset variables that are not whitelisted
578 - # and rely on them remaining unset for future phases, without them
579 - # leaking back in from various locations (bug #189417). It's very
580 - # important to set our special BASH_ENV variable in the ebuild
581 - # environment in order to prevent sandbox from sourcing /etc/profile
582 - # in it's bashrc (causing major leakage).
583 - _environ_whitelist += [
584 - "ACCEPT_LICENSE", "BASH_ENV", "BUILD_PREFIX", "D",
585 - "DISTDIR", "DOC_SYMLINKS_DIR", "EAPI", "EBUILD",
586 - "EBUILD_EXIT_STATUS_FILE", "EBUILD_FORCE_TEST",
587 - "EBUILD_PHASE", "ECLASSDIR", "ECLASS_DEPTH", "ED",
588 - "EMERGE_FROM", "EPREFIX", "EROOT",
589 - "FEATURES", "FILESDIR", "HOME", "NOCOLOR", "PATH",
590 - "PKGDIR",
591 - "PKGUSE", "PKG_LOGDIR", "PKG_TMPDIR",
592 - "PORTAGE_ACTUAL_DISTDIR", "PORTAGE_ARCHLIST",
593 - "PORTAGE_BASHRC",
594 - "PORTAGE_BINPKG_FILE", "PORTAGE_BINPKG_TAR_OPTS",
595 - "PORTAGE_BINPKG_TMPFILE",
596 - "PORTAGE_BIN_PATH",
597 - "PORTAGE_BUILDDIR", "PORTAGE_COLORMAP",
598 - "PORTAGE_CONFIGROOT", "PORTAGE_DEBUG", "PORTAGE_DEPCACHEDIR",
599 - "PORTAGE_GID", "PORTAGE_INST_GID", "PORTAGE_INST_UID",
600 - "PORTAGE_IUSE",
601 - "PORTAGE_LOG_FILE", "PORTAGE_MASTER_PID",
602 - "PORTAGE_PYM_PATH", "PORTAGE_QUIET",
603 - "PORTAGE_REPO_NAME", "PORTAGE_RESTRICT",
604 - "PORTAGE_TMPDIR", "PORTAGE_UPDATE_ENV",
605 - "PORTAGE_VERBOSE", "PORTAGE_WORKDIR_MODE",
606 - "PORTDIR", "PORTDIR_OVERLAY", "PREROOTPATH", "PROFILE_PATHS",
607 - "ROOT", "ROOTPATH", "T", "TMP", "TMPDIR",
608 - "USE_EXPAND", "USE_ORDER", "WORKDIR",
609 - "XARGS",
610 - "BPREFIX", "DEFAULT_PATH", "EXTRA_PATH",
611 - "PORTAGE_GROUP", "PORTAGE_USER",
612 - ]
613 -
614 - # user config variables
615 - _environ_whitelist += [
616 - "DOC_SYMLINKS_DIR", "INSTALL_MASK", "PKG_INSTALL_MASK"
617 - ]
618 -
619 - _environ_whitelist += [
620 - "A", "AA", "CATEGORY", "P", "PF", "PN", "PR", "PV", "PVR"
621 - ]
622 -
623 - # misc variables inherited from the calling environment
624 - _environ_whitelist += [
625 - "COLORTERM", "DISPLAY", "EDITOR", "LESS",
626 - "LESSOPEN", "LOGNAME", "LS_COLORS", "PAGER",
627 - "TERM", "TERMCAP", "USER",
628 - ]
629 -
630 - # tempdir settings
631 - _environ_whitelist += [
632 - "TMPDIR", "TEMP", "TMP",
633 - ]
634 -
635 - # localization settings
636 - _environ_whitelist += [
637 - "LANG", "LC_COLLATE", "LC_CTYPE", "LC_MESSAGES",
638 - "LC_MONETARY", "LC_NUMERIC", "LC_TIME", "LC_PAPER",
639 - "LC_ALL",
640 - ]
641 -
642 - # other variables inherited from the calling environment
643 - # UNIXMODE is necessary for MiNT
644 - _environ_whitelist += [
645 - "CVS_RSH", "ECHANGELOG_USER",
646 - "GPG_AGENT_INFO",
647 - "SSH_AGENT_PID", "SSH_AUTH_SOCK",
648 - "STY", "WINDOW", "XAUTHORITY", "UNIXMODE",
649 - ]
650 -
651 - _environ_whitelist = frozenset(_environ_whitelist)
652 -
653 - _environ_whitelist_re = re.compile(r'^(CCACHE_|DISTCC_).*')
654 -
655 - # Filter selected variables in the config.environ() method so that
656 - # they don't needlessly propagate down into the ebuild environment.
657 - _environ_filter = []
658 -
659 - # Exclude anything that could be extremely long here (like SRC_URI)
660 - # since that could cause execve() calls to fail with E2BIG errors. For
661 - # example, see bug #262647.
662 - _environ_filter += [
663 - 'DEPEND', 'RDEPEND', 'PDEPEND', 'SRC_URI',
664 - ]
665 -
666 - # misc variables inherited from the calling environment
667 - _environ_filter += [
668 - "INFOPATH", "MANPATH", "USER",
669 - "HOST", "GROUP", "LOGNAME", "MAIL", "REMOTEHOST",
670 - "SECURITYSESSIONID",
671 - "TERMINFO", "TERM_PROGRAM", "TERM_PROGRAM_VERSION",
672 - "VENDOR", "__CF_USER_TEXT_ENCODING",
673 - ]
674 -
675 - # variables that break bash
676 - _environ_filter += [
677 - "HISTFILE", "POSIXLY_CORRECT",
678 - ]
679 -
680 - # portage config variables and variables set directly by portage
681 - _environ_filter += [
682 - "ACCEPT_KEYWORDS", "ACCEPT_PROPERTIES", "AUTOCLEAN",
683 - "CLEAN_DELAY", "COLLISION_IGNORE", "CONFIG_PROTECT",
684 - "CONFIG_PROTECT_MASK", "EGENCACHE_DEFAULT_OPTS", "EMERGE_DEFAULT_OPTS",
685 - "EMERGE_LOG_DIR",
686 - "EMERGE_WARNING_DELAY", "FETCHCOMMAND", "FETCHCOMMAND_FTP",
687 - "FETCHCOMMAND_HTTP", "FETCHCOMMAND_SFTP",
688 - "GENTOO_MIRRORS", "NOCONFMEM", "O",
689 - "PORTAGE_BACKGROUND",
690 - "PORTAGE_BINHOST_CHUNKSIZE", "PORTAGE_CALLER",
691 - "PORTAGE_ELOG_CLASSES",
692 - "PORTAGE_ELOG_MAILFROM", "PORTAGE_ELOG_MAILSUBJECT",
693 - "PORTAGE_ELOG_MAILURI", "PORTAGE_ELOG_SYSTEM",
694 - "PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS", "PORTAGE_FETCH_RESUME_MIN_SIZE",
695 - "PORTAGE_GPG_DIR",
696 - "PORTAGE_GPG_KEY", "PORTAGE_IONICE_COMMAND",
697 - "PORTAGE_PACKAGE_EMPTY_ABORT",
698 - "PORTAGE_REPO_DUPLICATE_WARN",
699 - "PORTAGE_RO_DISTDIRS",
700 - "PORTAGE_RSYNC_EXTRA_OPTS", "PORTAGE_RSYNC_OPTS",
701 - "PORTAGE_RSYNC_RETRIES", "PORTAGE_USE", "PORT_LOGDIR",
702 - "QUICKPKG_DEFAULT_OPTS",
703 - "RESUMECOMMAND", "RESUMECOMMAND_HTTP", "RESUMECOMMAND_HTTP",
704 - "RESUMECOMMAND_SFTP", "SYNC", "USE_EXPAND_HIDDEN", "USE_ORDER",
705 - ]
706 -
707 - _environ_filter = frozenset(_environ_filter)
708 -
709 - _undef_lic_groups = set()
710 - _default_globals = (
711 - ('ACCEPT_LICENSE', '* -@EULA'),
712 - ('ACCEPT_PROPERTIES', '*'),
713 - )
714 -
715 - # To enhance usability, make some vars case insensitive
716 - # by forcing them to lower case.
717 - _case_insensitive_vars = ('AUTOCLEAN', 'NOCOLOR',)
718 -
719 - def __init__(self, clone=None, mycpv=None, config_profile_path=None,
720 - config_incrementals=None, config_root=None, target_root=None,
721 - local_config=True, env=None):
722 - """
723 - @param clone: If provided, init will use deepcopy to copy by value the instance.
724 - @type clone: Instance of config class.
725 - @param mycpv: CPV to load up (see setcpv), this is the same as calling init with mycpv=None
726 - and then calling instance.setcpv(mycpv).
727 - @type mycpv: String
728 - @param config_profile_path: Configurable path to the profile (usually PROFILE_PATH from portage.const)
729 - @type config_profile_path: String
730 - @param config_incrementals: List of incremental variables
731 - (defaults to portage.const.INCREMENTALS)
732 - @type config_incrementals: List
733 - @param config_root: path to read local config from (defaults to "/", see PORTAGE_CONFIGROOT)
734 - @type config_root: String
735 - @param target_root: __init__ override of $ROOT env variable.
736 - @type target_root: String
737 - @param local_config: Enables loading of local config (/etc/portage); used most by repoman to
738 - ignore local config (keywording and unmasking)
739 - @type local_config: Boolean
740 - @param env: The calling environment which is used to override settings.
741 - Defaults to os.environ if unspecified.
742 - @type env: dict
743 - """
744 -
745 - # When initializing the global portage.settings instance, avoid
746 - # raising exceptions whenever possible since exceptions thrown
747 - # from 'import portage' or 'import portage.exceptions' statements
748 - # can practically render the api unusable for api consumers.
749 - tolerant = "_initializing_globals" in globals()
750 -
751 - self.already_in_regenerate = 0
752 -
753 - self.locked = 0
754 - self.mycpv = None
755 - self._setcpv_args_hash = None
756 - self.puse = []
757 - self.modifiedkeys = []
758 - self.uvlist = []
759 - self._accept_chost_re = None
760 - self._accept_license = None
761 - self._accept_license_str = None
762 - self._license_groups = {}
763 - self._accept_properties = None
764 -
765 - self.virtuals = {}
766 - self.virts_p = {}
767 - self.dirVirtuals = None
768 - self.v_count = 0
769 -
770 - # Virtuals obtained from the vartree
771 - self.treeVirtuals = {}
772 - # Virtuals by user specification. Includes negatives.
773 - self.userVirtuals = {}
774 - # Virtual negatives from user specifications.
775 - self.negVirtuals = {}
776 - # Virtuals added by the depgraph via self.setinst().
777 - self._depgraphVirtuals = {}
778 -
779 - self.user_profile_dir = None
780 - self.local_config = local_config
781 - self._local_repo_configs = None
782 - self._local_repo_conf_path = None
783 -
784 - if clone:
785 - # For immutable attributes, use shallow copy for
786 - # speed and memory conservation.
787 - self.categories = clone.categories
788 - self.depcachedir = clone.depcachedir
789 - self.incrementals = clone.incrementals
790 - self.module_priority = clone.module_priority
791 - self.profile_path = clone.profile_path
792 - self.profiles = clone.profiles
793 - self.packages = clone.packages
794 - self.useforce_list = clone.useforce_list
795 - self.usemask_list = clone.usemask_list
796 -
797 - self.user_profile_dir = copy.deepcopy(clone.user_profile_dir)
798 - self.local_config = copy.deepcopy(clone.local_config)
799 - self._local_repo_configs = \
800 - copy.deepcopy(clone._local_repo_configs)
801 - self._local_repo_conf_path = \
802 - copy.deepcopy(clone._local_repo_conf_path)
803 - self.modules = copy.deepcopy(clone.modules)
804 - self.virtuals = copy.deepcopy(clone.virtuals)
805 - self.dirVirtuals = copy.deepcopy(clone.dirVirtuals)
806 - self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
807 - self.userVirtuals = copy.deepcopy(clone.userVirtuals)
808 - self.negVirtuals = copy.deepcopy(clone.negVirtuals)
809 - self._depgraphVirtuals = copy.deepcopy(clone._depgraphVirtuals)
810 -
811 - self.use_defs = copy.deepcopy(clone.use_defs)
812 - self.usemask = copy.deepcopy(clone.usemask)
813 - self.pusemask_list = copy.deepcopy(clone.pusemask_list)
814 - self.useforce = copy.deepcopy(clone.useforce)
815 - self.puseforce_list = copy.deepcopy(clone.puseforce_list)
816 - self.puse = copy.deepcopy(clone.puse)
817 - self.make_defaults_use = copy.deepcopy(clone.make_defaults_use)
818 - self.pkgprofileuse = copy.deepcopy(clone.pkgprofileuse)
819 - self.mycpv = copy.deepcopy(clone.mycpv)
820 - self._setcpv_args_hash = copy.deepcopy(clone._setcpv_args_hash)
821 -
822 - self.configdict = copy.deepcopy(clone.configdict)
823 - self.configlist = [
824 - self.configdict['env.d'],
825 - self.configdict['pkginternal'],
826 - self.configdict['globals'],
827 - self.configdict['defaults'],
828 - self.configdict['conf'],
829 - self.configdict['pkg'],
830 - self.configdict['auto'],
831 - self.configdict['env'],
832 - ]
833 - self.lookuplist = self.configlist[:]
834 - self.lookuplist.reverse()
835 - self._use_expand_dict = copy.deepcopy(clone._use_expand_dict)
836 - self.backupenv = self.configdict["backupenv"]
837 - self.pusedict = copy.deepcopy(clone.pusedict)
838 - self.pkeywordsdict = copy.deepcopy(clone.pkeywordsdict)
839 - self._pkeywords_list = copy.deepcopy(clone._pkeywords_list)
840 - self.pmaskdict = copy.deepcopy(clone.pmaskdict)
841 - self.punmaskdict = copy.deepcopy(clone.punmaskdict)
842 - self.prevmaskdict = copy.deepcopy(clone.prevmaskdict)
843 - self.pprovideddict = copy.deepcopy(clone.pprovideddict)
844 - self.features = copy.deepcopy(clone.features)
845 -
846 - self._accept_license = copy.deepcopy(clone._accept_license)
847 - self._plicensedict = copy.deepcopy(clone._plicensedict)
848 - self._license_groups = copy.deepcopy(clone._license_groups)
849 - self._accept_properties = copy.deepcopy(clone._accept_properties)
850 - self._ppropertiesdict = copy.deepcopy(clone._ppropertiesdict)
851 - else:
852 -
853 - def check_var_directory(varname, var):
854 - if not os.path.isdir(var):
855 - writemsg(_("!!! Error: %s='%s' is not a directory. "
856 - "Please correct this.\n") % (varname, var),
857 - noiselevel=-1)
858 - raise portage.exception.DirectoryNotFound(var)
859 -
860 - if config_root is None:
861 - config_root = EPREFIX + os.path.sep
862 -
863 - config_root = normalize_path(os.path.abspath(
864 - config_root)).rstrip(os.path.sep) + os.path.sep
865 -
866 - check_var_directory("PORTAGE_CONFIGROOT", config_root)
867 -
868 - self.depcachedir = DEPCACHE_PATH
869 -
870 - if not config_profile_path:
871 - config_profile_path = \
872 - os.path.join(config_root, PROFILE_PATH)
873 - if os.path.isdir(config_profile_path):
874 - self.profile_path = config_profile_path
875 - else:
876 - self.profile_path = None
877 - else:
878 - self.profile_path = config_profile_path
879 -
880 - if config_incrementals is None:
881 - self.incrementals = portage.const.INCREMENTALS
882 - else:
883 - self.incrementals = config_incrementals
884 - if not isinstance(self.incrementals, tuple):
885 - self.incrementals = tuple(self.incrementals)
886 -
887 - self.module_priority = ("user", "default")
888 - self.modules = {}
889 - modules_loader = portage.env.loaders.KeyValuePairFileLoader(
890 - os.path.join(config_root, MODULES_FILE_PATH), None, None)
891 - modules_dict, modules_errors = modules_loader.load()
892 - self.modules["user"] = modules_dict
893 - if self.modules["user"] is None:
894 - self.modules["user"] = {}
895 - self.modules["default"] = {
896 - "portdbapi.metadbmodule": "portage.cache.metadata.database",
897 - "portdbapi.auxdbmodule": "portage.cache.flat_hash.database",
898 - }
899 -
900 - self.usemask=[]
901 - self.configlist=[]
902 -
903 - # back up our incremental variables:
904 - self.configdict={}
905 - self._use_expand_dict = {}
906 - # configlist will contain: [ env.d, globals, defaults, conf, pkg, auto, backupenv, env ]
907 - self.configlist.append({})
908 - self.configdict["env.d"] = self.configlist[-1]
909 -
910 - self.configlist.append({})
911 - self.configdict["pkginternal"] = self.configlist[-1]
912 -
913 - # The symlink might not exist or might not be a symlink.
914 - if self.profile_path is None:
915 - self.profiles = []
916 - else:
917 - self.profiles = []
918 - def addProfile(currentPath):
919 - parentsFile = os.path.join(currentPath, "parent")
920 - eapi_file = os.path.join(currentPath, "eapi")
921 - try:
922 - eapi = codecs.open(_unicode_encode(eapi_file,
923 - encoding=_encodings['fs'], errors='strict'),
924 - mode='r', encoding=_encodings['content'], errors='replace'
925 - ).readline().strip()
926 - except IOError:
927 - pass
928 - else:
929 - if not eapi_is_supported(eapi):
930 - raise portage.exception.ParseError(_(
931 - "Profile contains unsupported "
932 - "EAPI '%s': '%s'") % \
933 - (eapi, os.path.realpath(eapi_file),))
934 - if os.path.exists(parentsFile):
935 - parents = grabfile(parentsFile)
936 - if not parents:
937 - raise portage.exception.ParseError(
938 - _("Empty parent file: '%s'") % parentsFile)
939 - for parentPath in parents:
940 - parentPath = normalize_path(os.path.join(
941 - currentPath, parentPath))
942 - if os.path.exists(parentPath):
943 - addProfile(parentPath)
944 - else:
945 - raise portage.exception.ParseError(
946 - _("Parent '%s' not found: '%s'") % \
947 - (parentPath, parentsFile))
948 - self.profiles.append(currentPath)
949 - try:
950 - addProfile(os.path.realpath(self.profile_path))
951 - except portage.exception.ParseError as e:
952 - writemsg(_("!!! Unable to parse profile: '%s'\n") % \
953 - self.profile_path, noiselevel=-1)
954 - writemsg("!!! ParseError: %s\n" % str(e), noiselevel=-1)
955 - del e
956 - self.profiles = []
957 - if local_config and self.profiles:
958 - custom_prof = os.path.join(
959 - config_root, CUSTOM_PROFILE_PATH)
960 - if os.path.exists(custom_prof):
961 - self.user_profile_dir = custom_prof
962 - self.profiles.append(custom_prof)
963 - del custom_prof
964 -
965 - self.profiles = tuple(self.profiles)
966 - self.packages_list = [grabfile_package(os.path.join(x, "packages")) for x in self.profiles]
967 - self.packages = tuple(stack_lists(self.packages_list, incremental=1))
968 - del self.packages_list
969 - #self.packages = grab_stacked("packages", self.profiles, grabfile, incremental_lines=1)
970 -
971 - # revmaskdict
972 - self.prevmaskdict={}
973 - for x in self.packages:
974 - # Negative atoms are filtered by the above stack_lists() call.
975 - if not isinstance(x, dep.Atom):
976 - x = dep.Atom(x.lstrip('*'))
977 - self.prevmaskdict.setdefault(x.cp, []).append(x)
978 -
979 - self._pkeywords_list = []
980 - rawpkeywords = [grabdict_package(
981 - os.path.join(x, "package.keywords"), recursive=1) \
982 - for x in self.profiles]
983 - for pkeyworddict in rawpkeywords:
984 - cpdict = {}
985 - for k, v in pkeyworddict.items():
986 - cpdict.setdefault(k.cp, {})[k] = v
987 - self._pkeywords_list.append(cpdict)
988 -
989 - # get profile-masked use flags -- INCREMENTAL Child over parent
990 - self.usemask_list = tuple(
991 - tuple(grabfile(os.path.join(x, "use.mask"), recursive=1))
992 - for x in self.profiles)
993 - self.usemask = set(stack_lists(
994 - self.usemask_list, incremental=True))
995 - use_defs_lists = [grabdict(os.path.join(x, "use.defaults")) for x in self.profiles]
996 - self.use_defs = stack_dictlist(use_defs_lists, incremental=True)
997 - del use_defs_lists
998 -
999 - self.pusemask_list = []
1000 - rawpusemask = [grabdict_package(os.path.join(x, "package.use.mask"),
1001 - recursive=1) for x in self.profiles]
1002 - for pusemaskdict in rawpusemask:
1003 - cpdict = {}
1004 - for k, v in pusemaskdict.items():
1005 - cpdict.setdefault(k.cp, {})[k] = v
1006 - self.pusemask_list.append(cpdict)
1007 - del rawpusemask
1008 -
1009 - self.pkgprofileuse = []
1010 - rawprofileuse = [grabdict_package(os.path.join(x, "package.use"),
1011 - juststrings=True, recursive=1) for x in self.profiles]
1012 - for rawpusedict in rawprofileuse:
1013 - cpdict = {}
1014 - for k, v in rawpusedict.items():
1015 - cpdict.setdefault(k.cp, {})[k] = v
1016 - self.pkgprofileuse.append(cpdict)
1017 - del rawprofileuse
1018 -
1019 - self.useforce_list = tuple(
1020 - tuple(grabfile(os.path.join(x, "use.force"), recursive=1))
1021 - for x in self.profiles)
1022 - self.useforce = set(stack_lists(
1023 - self.useforce_list, incremental=True))
1024 -
1025 - self.puseforce_list = []
1026 - rawpuseforce = [grabdict_package(
1027 - os.path.join(x, "package.use.force"), recursive=1) \
1028 - for x in self.profiles]
1029 - for rawpusefdict in rawpuseforce:
1030 - cpdict = {}
1031 - for k, v in rawpusefdict.items():
1032 - cpdict.setdefault(k.cp, {})[k] = v
1033 - self.puseforce_list.append(cpdict)
1034 - del rawpuseforce
1035 -
1036 - make_conf = getconfig(
1037 - os.path.join(config_root, MAKE_CONF_FILE),
1038 - tolerant=tolerant, allow_sourcing=True)
1039 - if make_conf is None:
1040 - make_conf = {}
1041 -
1042 - # Allow ROOT setting to come from make.conf if it's not overridden
1043 - # by the constructor argument (from the calling environment).
1044 - if target_root is None and "ROOT" in make_conf:
1045 - target_root = make_conf["ROOT"]
1046 - if not target_root.strip():
1047 - target_root = None
1048 - if target_root is None:
1049 - target_root = "/"
1050 -
1051 - target_root = normalize_path(os.path.abspath(
1052 - target_root)).rstrip(os.path.sep) + os.path.sep
1053 -
1054 - portage.util.ensure_dirs(target_root + EPREFIX_LSTRIP)
1055 - check_var_directory("EROOT", target_root + EPREFIX_LSTRIP)
1056 -
1057 - # The expand_map is used for variable substitution
1058 - # in getconfig() calls, and the getconfig() calls
1059 - # update expand_map with the value of each variable
1060 - # assignment that occurs. Variable substitution occurs
1061 - # in the following order, which corresponds to the
1062 - # order of appearance in self.lookuplist:
1063 - #
1064 - # * env.d
1065 - # * make.globals
1066 - # * make.defaults
1067 - # * make.conf
1068 - #
1069 - # Notably absent is "env", since we want to avoid any
1070 - # interaction with the calling environment that might
1071 - # lead to unexpected results.
1072 - expand_map = {}
1073 -
1074 - env_d = getconfig(os.path.join(target_root, EPREFIX_LSTRIP, "etc", "profile.env"),
1075 - expand=expand_map)
1076 - # env_d will be None if profile.env doesn't exist.
1077 - if env_d:
1078 - self.configdict["env.d"].update(env_d)
1079 - expand_map.update(env_d)
1080 -
1081 - # backupenv is used for calculating incremental variables.
1082 - if env is None:
1083 - env = os.environ
1084 -
1085 - # Avoid potential UnicodeDecodeError exceptions later.
1086 - env_unicode = dict((_unicode_decode(k), _unicode_decode(v))
1087 - for k, v in env.items())
1088 -
1089 - self.backupenv = env_unicode
1090 -
1091 - if env_d:
1092 - # Remove duplicate values so they don't override updated
1093 - # profile.env values later (profile.env is reloaded in each
1094 - # call to self.regenerate).
1095 - for k, v in env_d.items():
1096 - try:
1097 - if self.backupenv[k] == v:
1098 - del self.backupenv[k]
1099 - except KeyError:
1100 - pass
1101 - del k, v
1102 -
1103 - self.configdict["env"] = util.LazyItemsDict(self.backupenv)
1104 -
1105 - # make.globals should not be relative to config_root
1106 - # because it only contains constants.
1107 - for x in (portage.const.GLOBAL_CONFIG_PATH, BPREFIX+"/etc"):
1108 - self.mygcfg = getconfig(os.path.join(x, "make.globals"),
1109 - expand=expand_map)
1110 - if self.mygcfg:
1111 - break
1112 -
1113 - if self.mygcfg is None:
1114 - self.mygcfg = {}
1115 -
1116 - for k, v in self._default_globals:
1117 - self.mygcfg.setdefault(k, v)
1118 -
1119 - self.configlist.append(self.mygcfg)
1120 - self.configdict["globals"]=self.configlist[-1]
1121 -
1122 - self.make_defaults_use = []
1123 - self.mygcfg = {}
1124 - if self.profiles:
1125 - mygcfg_dlists = [getconfig(os.path.join(x, "make.defaults"),
1126 - expand=expand_map) for x in self.profiles]
1127 -
1128 - for cfg in mygcfg_dlists:
1129 - if cfg:
1130 - self.make_defaults_use.append(cfg.get("USE", ""))
1131 - else:
1132 - self.make_defaults_use.append("")
1133 - self.mygcfg = stack_dicts(mygcfg_dlists,
1134 - incrementals=portage.const.INCREMENTALS)
1135 - if self.mygcfg is None:
1136 - self.mygcfg = {}
1137 - self.configlist.append(self.mygcfg)
1138 - self.configdict["defaults"]=self.configlist[-1]
1139 -
1140 - self.mygcfg = getconfig(
1141 - os.path.join(config_root, MAKE_CONF_FILE),
1142 - tolerant=tolerant, allow_sourcing=True, expand=expand_map)
1143 - if self.mygcfg is None:
1144 - self.mygcfg = {}
1145 -
1146 - # Don't allow the user to override certain variables in make.conf
1147 - profile_only_variables = self.configdict["defaults"].get(
1148 - "PROFILE_ONLY_VARIABLES", "").split()
1149 - for k in profile_only_variables:
1150 - self.mygcfg.pop(k, None)
1151 -
1152 - self.configlist.append(self.mygcfg)
1153 - self.configdict["conf"]=self.configlist[-1]
1154 -
1155 - self.configlist.append(util.LazyItemsDict())
1156 - self.configdict["pkg"]=self.configlist[-1]
1157 -
1158 - #auto-use:
1159 - self.configlist.append({})
1160 - self.configdict["auto"]=self.configlist[-1]
1161 -
1162 - self.configdict["backupenv"] = self.backupenv
1163 -
1164 - # Don't allow the user to override certain variables in the env
1165 - for k in profile_only_variables:
1166 - self.backupenv.pop(k, None)
1167 -
1168 - self.configlist.append(self.configdict["env"])
1169 -
1170 - # make lookuplist for loading package.*
1171 - self.lookuplist=self.configlist[:]
1172 - self.lookuplist.reverse()
1173 -
1174 - # Blacklist vars that could interfere with portage internals.
1175 - for blacklisted in self._env_blacklist:
1176 - for cfg in self.lookuplist:
1177 - cfg.pop(blacklisted, None)
1178 - self.backupenv.pop(blacklisted, None)
1179 - del blacklisted, cfg
1180 -
1181 - self["PORTAGE_CONFIGROOT"] = config_root
1182 - self.backup_changes("PORTAGE_CONFIGROOT")
1183 - self["ROOT"] = target_root
1184 - self.backup_changes("ROOT")
1185 - self["EPREFIX"] = EPREFIX
1186 - self.backup_changes("EPREFIX")
1187 - self["EROOT"] = target_root + EPREFIX_LSTRIP + os.path.sep
1188 - self.backup_changes("EROOT")
1189 -
1190 - self.pusedict = {}
1191 - self.pkeywordsdict = {}
1192 - self._plicensedict = {}
1193 - self._ppropertiesdict = {}
1194 - self.punmaskdict = {}
1195 - abs_user_config = os.path.join(config_root, USER_CONFIG_PATH)
1196 -
1197 - # locations for "categories" and "arch.list" files
1198 - locations = [os.path.join(self["PORTDIR"], "profiles")]
1199 - pmask_locations = [os.path.join(self["PORTDIR"], "profiles")]
1200 - pmask_locations.extend(self.profiles)
1201 -
1202 - """ repoman controls PORTDIR_OVERLAY via the environment, so no
1203 - special cases are needed here."""
1204 - overlay_profiles = []
1205 - for ov in self["PORTDIR_OVERLAY"].split():
1206 - ov = normalize_path(ov)
1207 - profiles_dir = os.path.join(ov, "profiles")
1208 - if os.path.isdir(profiles_dir):
1209 - overlay_profiles.append(profiles_dir)
1210 - locations += overlay_profiles
1211 -
1212 - pmask_locations.extend(overlay_profiles)
1213 -
1214 - if local_config:
1215 - locations.append(abs_user_config)
1216 - pmask_locations.append(abs_user_config)
1217 - pusedict = grabdict_package(
1218 - os.path.join(abs_user_config, "package.use"), recursive=1)
1219 - for k, v in pusedict.items():
1220 - self.pusedict.setdefault(k.cp, {})[k] = v
1221 -
1222 - #package.keywords
1223 - pkgdict = grabdict_package(
1224 - os.path.join(abs_user_config, "package.keywords"),
1225 - recursive=1)
1226 - for k, v in pkgdict.items():
1227 - # default to ~arch if no specific keyword is given
1228 - if not v:
1229 - mykeywordlist = []
1230 - if self.configdict["defaults"] and \
1231 - "ACCEPT_KEYWORDS" in self.configdict["defaults"]:
1232 - groups = self.configdict["defaults"]["ACCEPT_KEYWORDS"].split()
1233 - else:
1234 - groups = []
1235 - for keyword in groups:
1236 - if not keyword[0] in "~-":
1237 - mykeywordlist.append("~"+keyword)
1238 - v = mykeywordlist
1239 - self.pkeywordsdict.setdefault(k.cp, {})[k] = v
1240 -
1241 - #package.license
1242 - licdict = grabdict_package(os.path.join(
1243 - abs_user_config, "package.license"), recursive=1)
1244 - for k, v in licdict.items():
1245 - cp = k.cp
1246 - cp_dict = self._plicensedict.get(cp)
1247 - if not cp_dict:
1248 - cp_dict = {}
1249 - self._plicensedict[cp] = cp_dict
1250 - cp_dict[k] = self.expandLicenseTokens(v)
1251 -
1252 - #package.properties
1253 - propdict = grabdict_package(os.path.join(
1254 - abs_user_config, "package.properties"), recursive=1)
1255 - for k, v in propdict.items():
1256 - cp = k.cp
1257 - cp_dict = self._ppropertiesdict.get(cp)
1258 - if not cp_dict:
1259 - cp_dict = {}
1260 - self._ppropertiesdict[cp] = cp_dict
1261 - cp_dict[k] = v
1262 -
1263 - self._local_repo_configs = {}
1264 - self._local_repo_conf_path = \
1265 - os.path.join(abs_user_config, 'repos.conf')
1266 - try:
1267 - from configparser import SafeConfigParser, ParsingError
1268 - except ImportError:
1269 - from ConfigParser import SafeConfigParser, ParsingError
1270 - repo_conf_parser = SafeConfigParser()
1271 - try:
1272 - repo_conf_parser.readfp(
1273 - codecs.open(
1274 - _unicode_encode(self._local_repo_conf_path,
1275 - encoding=_encodings['fs'], errors='strict'),
1276 - mode='r', encoding=_encodings['content'], errors='replace')
1277 - )
1278 - except EnvironmentError as e:
1279 - if e.errno != errno.ENOENT:
1280 - raise
1281 - del e
1282 - except ParsingError as e:
1283 - portage.util.writemsg_level(
1284 - _("!!! Error parsing '%s': %s\n") % \
1285 - (self._local_repo_conf_path, e),
1286 - level=logging.ERROR, noiselevel=-1)
1287 - del e
1288 - else:
1289 - repo_defaults = repo_conf_parser.defaults()
1290 - if repo_defaults:
1291 - self._local_repo_configs['DEFAULT'] = \
1292 - _local_repo_config('DEFAULT', repo_defaults)
1293 - for repo_name in repo_conf_parser.sections():
1294 - repo_opts = repo_defaults.copy()
1295 - for opt_name in repo_conf_parser.options(repo_name):
1296 - repo_opts[opt_name] = \
1297 - repo_conf_parser.get(repo_name, opt_name)
1298 - self._local_repo_configs[repo_name] = \
1299 - _local_repo_config(repo_name, repo_opts)
1300 -
1301 - #getting categories from an external file now
1302 - categories = [grabfile(os.path.join(x, "categories")) for x in locations]
1303 - category_re = dbapi.dbapi._category_re
1304 - self.categories = tuple(sorted(
1305 - x for x in stack_lists(categories, incremental=1)
1306 - if category_re.match(x) is not None))
1307 - del categories
1308 -
1309 - archlist = [grabfile(os.path.join(x, "arch.list")) for x in locations]
1310 - archlist = stack_lists(archlist, incremental=1)
1311 - self.configdict["conf"]["PORTAGE_ARCHLIST"] = " ".join(archlist)
1312 -
1313 - # package.mask and package.unmask
1314 - pkgmasklines = []
1315 - pkgunmasklines = []
1316 - for x in pmask_locations:
1317 - pkgmasklines.append(grabfile_package(
1318 - os.path.join(x, "package.mask"), recursive=1))
1319 - pkgunmasklines.append(grabfile_package(
1320 - os.path.join(x, "package.unmask"), recursive=1))
1321 - pkgmasklines = stack_lists(pkgmasklines, incremental=1)
1322 - pkgunmasklines = stack_lists(pkgunmasklines, incremental=1)
1323 -
1324 - self.pmaskdict = {}
1325 - for x in pkgmasklines:
1326 - self.pmaskdict.setdefault(x.cp, []).append(x)
1327 -
1328 - for x in pkgunmasklines:
1329 - self.punmaskdict.setdefault(x.cp, []).append(x)
1330 -
1331 - pkgprovidedlines = [grabfile(os.path.join(x, "package.provided"), recursive=1) for x in self.profiles]
1332 - pkgprovidedlines = stack_lists(pkgprovidedlines, incremental=1)
1333 - has_invalid_data = False
1334 - for x in range(len(pkgprovidedlines)-1, -1, -1):
1335 - myline = pkgprovidedlines[x]
1336 - if not isvalidatom("=" + myline):
1337 - writemsg(_("Invalid package name in package.provided: %s\n") % \
1338 - myline, noiselevel=-1)
1339 - has_invalid_data = True
1340 - del pkgprovidedlines[x]
1341 - continue
1342 - cpvr = catpkgsplit(pkgprovidedlines[x])
1343 - if not cpvr or cpvr[0] == "null":
1344 - writemsg(_("Invalid package name in package.provided: ")+pkgprovidedlines[x]+"\n",
1345 - noiselevel=-1)
1346 - has_invalid_data = True
1347 - del pkgprovidedlines[x]
1348 - continue
1349 - if cpvr[0] == "virtual":
1350 - writemsg(_("Virtual package in package.provided: %s\n") % \
1351 - myline, noiselevel=-1)
1352 - has_invalid_data = True
1353 - del pkgprovidedlines[x]
1354 - continue
1355 - if has_invalid_data:
1356 - writemsg(_("See portage(5) for correct package.provided usage.\n"),
1357 - noiselevel=-1)
1358 - self.pprovideddict = {}
1359 - for x in pkgprovidedlines:
1360 - cpv=catpkgsplit(x)
1361 - if not x:
1362 - continue
1363 - mycatpkg = cpv_getkey(x)
1364 - if mycatpkg in self.pprovideddict:
1365 - self.pprovideddict[mycatpkg].append(x)
1366 - else:
1367 - self.pprovideddict[mycatpkg]=[x]
1368 -
1369 - # parse licensegroups
1370 - license_groups = self._license_groups
1371 - for x in locations:
1372 - for k, v in grabdict(
1373 - os.path.join(x, "license_groups")).items():
1374 - license_groups.setdefault(k, []).extend(v)
1375 -
1376 - # reasonable defaults; this is important as without USE_ORDER,
1377 - # USE will always be "" (nothing set)!
1378 - if "USE_ORDER" not in self:
1379 - self.backupenv["USE_ORDER"] = "env:pkg:conf:defaults:pkginternal:env.d"
1380 -
1381 - self["PORTAGE_GID"] = str(portage_gid)
1382 - self.backup_changes("PORTAGE_GID")
1383 -
1384 - if self.get("PORTAGE_DEPCACHEDIR", None):
1385 - self.depcachedir = self["PORTAGE_DEPCACHEDIR"]
1386 - self["PORTAGE_DEPCACHEDIR"] = self.depcachedir
1387 - self.backup_changes("PORTAGE_DEPCACHEDIR")
1388 -
1389 - overlays = self.get("PORTDIR_OVERLAY","").split()
1390 - if overlays:
1391 - new_ov = []
1392 - for ov in overlays:
1393 - ov = normalize_path(ov)
1394 - if os.path.isdir(ov):
1395 - new_ov.append(ov)
1396 - else:
1397 - writemsg(_("!!! Invalid PORTDIR_OVERLAY"
1398 - " (not a dir): '%s'\n") % ov, noiselevel=-1)
1399 - self["PORTDIR_OVERLAY"] = " ".join(new_ov)
1400 - self.backup_changes("PORTDIR_OVERLAY")
1401 -
1402 - if "CBUILD" not in self and "CHOST" in self:
1403 - self["CBUILD"] = self["CHOST"]
1404 - self.backup_changes("CBUILD")
1405 -
1406 - self["PORTAGE_BIN_PATH"] = PORTAGE_BIN_PATH
1407 - self.backup_changes("PORTAGE_BIN_PATH")
1408 - self["PORTAGE_PYM_PATH"] = PORTAGE_PYM_PATH
1409 - self.backup_changes("PORTAGE_PYM_PATH")
1410 -
1411 - for var in ("PORTAGE_INST_UID", "PORTAGE_INST_GID"):
1412 - try:
1413 - self[var] = str(int(self.get(var, "0")))
1414 - except ValueError:
1415 - writemsg(_("!!! %s='%s' is not a valid integer. "
1416 - "Falling back to '0'.\n") % (var, self[var]),
1417 - noiselevel=-1)
1418 - self[var] = "0"
1419 - self.backup_changes(var)
1420 -
1421 - # initialize self.features
1422 - self.regenerate()
1423 -
1424 - if bsd_chflags:
1425 - self.features.add('chflags')
1426 -
1427 - self["FEATURES"] = " ".join(sorted(self.features))
1428 - self.backup_changes("FEATURES")
1429 - global _glep_55_enabled, _validate_cache_for_unsupported_eapis
1430 - if 'parse-eapi-ebuild-head' in self.features:
1431 - _validate_cache_for_unsupported_eapis = False
1432 - if 'parse-eapi-glep-55' in self.features:
1433 - _validate_cache_for_unsupported_eapis = False
1434 - _glep_55_enabled = True
1435 -
1436 - for k in self._case_insensitive_vars:
1437 - if k in self:
1438 - self[k] = self[k].lower()
1439 - self.backup_changes(k)
1440 -
1441 - if mycpv:
1442 - self.setcpv(mycpv)
1443 -
1444 - def _init_dirs(self):
1445 - """
1446 - Create a few directories that are critical to portage operation
1447 - """
1448 - if not os.access(self["ROOT"] + EPREFIX_LSTRIP, os.W_OK):
1449 - return
1450 -
1451 - # gid, mode, mask, preserve_perms
1452 - dir_mode_map = {
1453 - EPREFIX_LSTRIP+"/tmp" : ( -1, 0o1777, 0, True),
1454 - EPREFIX_LSTRIP+"/var/tmp" : ( -1, 0o1777, 0, True),
1455 - PRIVATE_PATH : (portage_gid, 0o2750, 0o2, False),
1456 - CACHE_PATH : (portage_gid, 0o755, 0o2, False)
1457 - }
1458 -
1459 - for mypath, (gid, mode, modemask, preserve_perms) \
1460 - in dir_mode_map.items():
1461 - mydir = os.path.join(self["ROOT"], mypath)
1462 - if preserve_perms and os.path.isdir(mydir):
1463 - # Only adjust permissions on some directories if
1464 - # they don't exist yet. This gives freedom to the
1465 - # user to adjust permissions to suit their taste.
1466 - continue
1467 - try:
1468 - portage.util.ensure_dirs(mydir, gid=gid, mode=mode, mask=modemask)
1469 - except portage.exception.PortageException as e:
1470 - writemsg(_("!!! Directory initialization failed: '%s'\n") % mydir,
1471 - noiselevel=-1)
1472 - writemsg("!!! %s\n" % str(e),
1473 - noiselevel=-1)
1474 -
1475 - def expandLicenseTokens(self, tokens):
1476 - """ Take a token from ACCEPT_LICENSE or package.license and expand it
1477 - if it's a group token (indicated by @) or just return it if it's not a
1478 - group. If a group is negated then negate all group elements."""
1479 - expanded_tokens = []
1480 - for x in tokens:
1481 - expanded_tokens.extend(self._expandLicenseToken(x, None))
1482 - return expanded_tokens
1483 -
1484 - def _expandLicenseToken(self, token, traversed_groups):
1485 - negate = False
1486 - rValue = []
1487 - if token.startswith("-"):
1488 - negate = True
1489 - license_name = token[1:]
1490 - else:
1491 - license_name = token
1492 - if not license_name.startswith("@"):
1493 - rValue.append(token)
1494 - return rValue
1495 - group_name = license_name[1:]
1496 - if traversed_groups is None:
1497 - traversed_groups = set()
1498 - license_group = self._license_groups.get(group_name)
1499 - if group_name in traversed_groups:
1500 - writemsg(_("Circular license group reference"
1501 - " detected in '%s'\n") % group_name, noiselevel=-1)
1502 - rValue.append("@"+group_name)
1503 - elif license_group:
1504 - traversed_groups.add(group_name)
1505 - for l in license_group:
1506 - if l.startswith("-"):
1507 - writemsg(_("Skipping invalid element %s"
1508 - " in license group '%s'\n") % (l, group_name),
1509 - noiselevel=-1)
1510 - else:
1511 - rValue.extend(self._expandLicenseToken(l, traversed_groups))
1512 - else:
1513 - if self._license_groups and \
1514 - group_name not in self._undef_lic_groups:
1515 - self._undef_lic_groups.add(group_name)
1516 - writemsg(_("Undefined license group '%s'\n") % group_name,
1517 - noiselevel=-1)
1518 - rValue.append("@"+group_name)
1519 - if negate:
1520 - rValue = ["-" + token for token in rValue]
1521 - return rValue
1522 -
1523 - def validate(self):
1524 - """Validate miscellaneous settings and display warnings if necessary.
1525 - (This code was previously in the global scope of portage.py)"""
1526 -
1527 - groups = self["ACCEPT_KEYWORDS"].split()
1528 - archlist = self.archlist()
1529 - if not archlist:
1530 - writemsg(_("--- 'profiles/arch.list' is empty or "
1531 - "not available. Empty portage tree?\n"), noiselevel=1)
1532 - else:
1533 - for group in groups:
1534 - if group not in archlist and \
1535 - not (group.startswith("-") and group[1:] in archlist) and \
1536 - group not in ("*", "~*", "**"):
1537 - writemsg(_("!!! INVALID ACCEPT_KEYWORDS: %s\n") % str(group),
1538 - noiselevel=-1)
1539 -
1540 - abs_profile_path = os.path.join(self["PORTAGE_CONFIGROOT"],
1541 - PROFILE_PATH)
1542 - if not self.profile_path or (not os.path.islink(abs_profile_path) and \
1543 - not os.path.exists(os.path.join(abs_profile_path, "parent")) and \
1544 - os.path.exists(os.path.join(self["PORTDIR"], "profiles"))):
1545 - writemsg(_("\a\n\n!!! %s is not a symlink and will probably prevent most merges.\n") % abs_profile_path,
1546 - noiselevel=-1)
1547 - writemsg(_("!!! It should point into a profile within %s/profiles/\n") % self["PORTDIR"])
1548 - writemsg(_("!!! (You can safely ignore this message when syncing. It's harmless.)\n\n\n"))
1549 -
1550 - abs_user_virtuals = os.path.join(self["PORTAGE_CONFIGROOT"],
1551 - USER_VIRTUALS_FILE)
1552 - if os.path.exists(abs_user_virtuals):
1553 - writemsg("\n!!! /etc/portage/virtuals is deprecated in favor of\n")
1554 - writemsg("!!! /etc/portage/profile/virtuals. Please move it to\n")
1555 - writemsg("!!! this new location.\n\n")
1556 -
1557 - if not process.sandbox_capable and \
1558 - ("sandbox" in self.features or "usersandbox" in self.features):
1559 - if self.profile_path is not None and \
1560 - os.path.realpath(self.profile_path) == \
1561 - os.path.realpath(os.path.join(
1562 - self["PORTAGE_CONFIGROOT"], PROFILE_PATH)):
1563 - # Don't show this warning when running repoman and the
1564 - # sandbox feature came from a profile that doesn't belong
1565 - # to the user.
1566 - writemsg(colorize("BAD", _("!!! Problem with sandbox"
1567 - " binary. Disabling...\n\n")), noiselevel=-1)
1568 -
1569 - if "fakeroot" in self.features and \
1570 - not portage.process.fakeroot_capable:
1571 - writemsg(_("!!! FEATURES=fakeroot is enabled, but the "
1572 - "fakeroot binary is not installed.\n"), noiselevel=-1)
1573 -
1574 - def loadVirtuals(self,root):
1575 - """Not currently used by portage."""
1576 - writemsg("DEPRECATED: portage.config.loadVirtuals\n")
1577 - self.getvirtuals(root)
1578 -
1579 - def load_best_module(self,property_string):
1580 - best_mod = best_from_dict(property_string,self.modules,self.module_priority)
1581 - mod = None
1582 - try:
1583 - mod = load_mod(best_mod)
1584 - except ImportError:
1585 - if best_mod.startswith("cache."):
1586 - best_mod = "portage." + best_mod
1587 - try:
1588 - mod = load_mod(best_mod)
1589 - except ImportError:
1590 - pass
1591 - if mod is None:
1592 - raise
1593 - return mod
1594 -
1595 - def lock(self):
1596 - self.locked = 1
1597 -
1598 - def unlock(self):
1599 - self.locked = 0
1600 -
1601 - def modifying(self):
1602 - if self.locked:
1603 - raise Exception(_("Configuration is locked."))
1604 -
1605 - def backup_changes(self,key=None):
1606 - self.modifying()
1607 - if key and key in self.configdict["env"]:
1608 - self.backupenv[key] = copy.deepcopy(self.configdict["env"][key])
1609 - else:
1610 - raise KeyError(_("No such key defined in environment: %s") % key)
1611 -
1612 - def reset(self,keeping_pkg=0,use_cache=1):
1613 - """
1614 - Restore environment from self.backupenv, call self.regenerate()
1615 - @param keeping_pkg: Should we keep the set_cpv() data or delete it.
1616 - @type keeping_pkg: Boolean
1617 - @param use_cache: Should self.regenerate use the cache or not
1618 - @type use_cache: Boolean
1619 - @rype: None
1620 - """
1621 - self.modifying()
1622 - self.configdict["env"].clear()
1623 - self.configdict["env"].update(self.backupenv)
1624 -
1625 - self.modifiedkeys = []
1626 - if not keeping_pkg:
1627 - self.mycpv = None
1628 - self.puse = ""
1629 - self.configdict["pkg"].clear()
1630 - self.configdict["pkginternal"].clear()
1631 - self.configdict["defaults"]["USE"] = \
1632 - " ".join(self.make_defaults_use)
1633 - self.usemask = set(stack_lists(
1634 - self.usemask_list, incremental=True))
1635 - self.useforce = set(stack_lists(
1636 - self.useforce_list, incremental=True))
1637 - self.regenerate(use_cache=use_cache)
1638 -
1639 - class _lazy_vars(object):
1640 -
1641 - __slots__ = ('built_use', 'settings', 'values')
1642 -
1643 - def __init__(self, built_use, settings):
1644 - self.built_use = built_use
1645 - self.settings = settings
1646 - self.values = None
1647 -
1648 - def __getitem__(self, k):
1649 - if self.values is None:
1650 - self.values = self._init_values()
1651 - return self.values[k]
1652 -
1653 - def _init_values(self):
1654 - values = {}
1655 - settings = self.settings
1656 - use = self.built_use
1657 - if use is None:
1658 - use = frozenset(settings['PORTAGE_USE'].split())
1659 - values['ACCEPT_LICENSE'] = self._accept_license(use, settings)
1660 - values['PORTAGE_RESTRICT'] = self._restrict(use, settings)
1661 - return values
1662 -
1663 - def _accept_license(self, use, settings):
1664 - """
1665 - Generate a pruned version of ACCEPT_LICENSE, by intersection with
1666 - LICENSE. This is required since otherwise ACCEPT_LICENSE might be
1667 - too big (bigger than ARG_MAX), causing execve() calls to fail with
1668 - E2BIG errors as in bug #262647.
1669 - """
1670 - try:
1671 - licenses = set(flatten(
1672 - dep.use_reduce(dep.paren_reduce(
1673 - settings['LICENSE']),
1674 - uselist=use)))
1675 - except exception.InvalidDependString:
1676 - licenses = set()
1677 - licenses.discard('||')
1678 - if settings._accept_license:
1679 - acceptable_licenses = set()
1680 - for x in settings._accept_license:
1681 - if x == '*':
1682 - acceptable_licenses.update(licenses)
1683 - elif x == '-*':
1684 - acceptable_licenses.clear()
1685 - elif x[:1] == '-':
1686 - acceptable_licenses.discard(x[1:])
1687 - elif x in licenses:
1688 - acceptable_licenses.add(x)
1689 -
1690 - licenses = acceptable_licenses
1691 - return ' '.join(sorted(licenses))
1692 -
1693 - def _restrict(self, use, settings):
1694 - try:
1695 - restrict = set(flatten(
1696 - dep.use_reduce(dep.paren_reduce(
1697 - settings['RESTRICT']),
1698 - uselist=use)))
1699 - except exception.InvalidDependString:
1700 - restrict = set()
1701 - return ' '.join(sorted(restrict))
1702 -
1703 - class _lazy_use_expand(object):
1704 - """
1705 - Lazily evaluate USE_EXPAND variables since they are only needed when
1706 - an ebuild shell is spawned. Variables values are made consistent with
1707 - the previously calculated USE settings.
1708 - """
1709 -
1710 - def __init__(self, use, usemask, iuse_implicit,
1711 - use_expand_split, use_expand_dict):
1712 - self._use = use
1713 - self._usemask = usemask
1714 - self._iuse_implicit = iuse_implicit
1715 - self._use_expand_split = use_expand_split
1716 - self._use_expand_dict = use_expand_dict
1717 -
1718 - def __getitem__(self, key):
1719 - prefix = key.lower() + '_'
1720 - prefix_len = len(prefix)
1721 - expand_flags = set( x[prefix_len:] for x in self._use \
1722 - if x[:prefix_len] == prefix )
1723 - var_split = self._use_expand_dict.get(key, '').split()
1724 - # Preserve the order of var_split because it can matter for things
1725 - # like LINGUAS.
1726 - var_split = [ x for x in var_split if x in expand_flags ]
1727 - var_split.extend(expand_flags.difference(var_split))
1728 - has_wildcard = '*' in expand_flags
1729 - if has_wildcard:
1730 - var_split = [ x for x in var_split if x != "*" ]
1731 - has_iuse = set()
1732 - for x in self._iuse_implicit:
1733 - if x[:prefix_len] == prefix:
1734 - has_iuse.add(x[prefix_len:])
1735 - if has_wildcard:
1736 - # * means to enable everything in IUSE that's not masked
1737 - if has_iuse:
1738 - usemask = self._usemask
1739 - for suffix in has_iuse:
1740 - x = prefix + suffix
1741 - if x not in usemask:
1742 - if suffix not in expand_flags:
1743 - var_split.append(suffix)
1744 - else:
1745 - # If there is a wildcard and no matching flags in IUSE then
1746 - # LINGUAS should be unset so that all .mo files are
1747 - # installed.
1748 - var_split = []
1749 - # Make the flags unique and filter them according to IUSE.
1750 - # Also, continue to preserve order for things like LINGUAS
1751 - # and filter any duplicates that variable may contain.
1752 - filtered_var_split = []
1753 - remaining = has_iuse.intersection(var_split)
1754 - for x in var_split:
1755 - if x in remaining:
1756 - remaining.remove(x)
1757 - filtered_var_split.append(x)
1758 - var_split = filtered_var_split
1759 -
1760 - if var_split:
1761 - value = ' '.join(var_split)
1762 - else:
1763 - # Don't export empty USE_EXPAND vars unless the user config
1764 - # exports them as empty. This is required for vars such as
1765 - # LINGUAS, where unset and empty have different meanings.
1766 - if has_wildcard:
1767 - # ebuild.sh will see this and unset the variable so
1768 - # that things like LINGUAS work properly
1769 - value = '*'
1770 - else:
1771 - if has_iuse:
1772 - value = ''
1773 - else:
1774 - # It's not in IUSE, so just allow the variable content
1775 - # to pass through if it is defined somewhere. This
1776 - # allows packages that support LINGUAS but don't
1777 - # declare it in IUSE to use the variable outside of the
1778 - # USE_EXPAND context.
1779 - value = None
1780 -
1781 - return value
1782 -
1783 - def setcpv(self, mycpv, use_cache=1, mydb=None):
1784 - """
1785 - Load a particular CPV into the config, this lets us see the
1786 - Default USE flags for a particular ebuild as well as the USE
1787 - flags from package.use.
1788 -
1789 - @param mycpv: A cpv to load
1790 - @type mycpv: string
1791 - @param use_cache: Enables caching
1792 - @type use_cache: Boolean
1793 - @param mydb: a dbapi instance that supports aux_get with the IUSE key.
1794 - @type mydb: dbapi or derivative.
1795 - @rtype: None
1796 - """
1797 -
1798 - self.modifying()
1799 -
1800 - pkg = None
1801 - built_use = None
1802 - if not isinstance(mycpv, basestring):
1803 - pkg = mycpv
1804 - mycpv = pkg.cpv
1805 - mydb = pkg.metadata
1806 - args_hash = (mycpv, id(pkg))
1807 - if pkg.built:
1808 - built_use = pkg.use.enabled
1809 - else:
1810 - args_hash = (mycpv, id(mydb))
1811 -
1812 - if args_hash == self._setcpv_args_hash:
1813 - return
1814 - self._setcpv_args_hash = args_hash
1815 -
1816 - has_changed = False
1817 - self.mycpv = mycpv
1818 - cat, pf = catsplit(mycpv)
1819 - cp = cpv_getkey(mycpv)
1820 - cpv_slot = self.mycpv
1821 - pkginternaluse = ""
1822 - iuse = ""
1823 - pkg_configdict = self.configdict["pkg"]
1824 - previous_iuse = pkg_configdict.get("IUSE")
1825 -
1826 - aux_keys = self._setcpv_aux_keys
1827 -
1828 - # Discard any existing metadata from the previous package, but
1829 - # preserve things like USE_EXPAND values and PORTAGE_USE which
1830 - # might be reused.
1831 - for k in aux_keys:
1832 - pkg_configdict.pop(k, None)
1833 -
1834 - pkg_configdict["CATEGORY"] = cat
1835 - pkg_configdict["PF"] = pf
1836 - if mydb:
1837 - if not hasattr(mydb, "aux_get"):
1838 - for k in aux_keys:
1839 - if k in mydb:
1840 - # Make these lazy, since __getitem__ triggers
1841 - # evaluation of USE conditionals which can't
1842 - # occur until PORTAGE_USE is calculated below.
1843 - pkg_configdict.addLazySingleton(k,
1844 - mydb.__getitem__, k)
1845 - else:
1846 - for k, v in zip(aux_keys, mydb.aux_get(self.mycpv, aux_keys)):
1847 - pkg_configdict[k] = v
1848 - repository = pkg_configdict.pop("repository", None)
1849 - if repository is not None:
1850 - pkg_configdict["PORTAGE_REPO_NAME"] = repository
1851 - slot = pkg_configdict["SLOT"]
1852 - iuse = pkg_configdict["IUSE"]
1853 - if pkg is None:
1854 - cpv_slot = "%s:%s" % (self.mycpv, slot)
1855 - else:
1856 - cpv_slot = pkg
1857 - pkginternaluse = []
1858 - for x in iuse.split():
1859 - if x.startswith("+"):
1860 - pkginternaluse.append(x[1:])
1861 - elif x.startswith("-"):
1862 - pkginternaluse.append(x)
1863 - pkginternaluse = " ".join(pkginternaluse)
1864 - if pkginternaluse != self.configdict["pkginternal"].get("USE", ""):
1865 - self.configdict["pkginternal"]["USE"] = pkginternaluse
1866 - has_changed = True
1867 -
1868 - defaults = []
1869 - pos = 0
1870 - for i, pkgprofileuse_dict in enumerate(self.pkgprofileuse):
1871 - cpdict = pkgprofileuse_dict.get(cp)
1872 - if cpdict:
1873 - keys = list(cpdict)
1874 - while keys:
1875 - bestmatch = best_match_to_list(cpv_slot, keys)
1876 - if bestmatch:
1877 - keys.remove(bestmatch)
1878 - defaults.insert(pos, cpdict[bestmatch])
1879 - else:
1880 - break
1881 - del keys
1882 - if self.make_defaults_use[i]:
1883 - defaults.insert(pos, self.make_defaults_use[i])
1884 - pos = len(defaults)
1885 - defaults = " ".join(defaults)
1886 - if defaults != self.configdict["defaults"].get("USE",""):
1887 - self.configdict["defaults"]["USE"] = defaults
1888 - has_changed = True
1889 -
1890 - useforce = self._getUseForce(cpv_slot)
1891 - if useforce != self.useforce:
1892 - self.useforce = useforce
1893 - has_changed = True
1894 -
1895 - usemask = self._getUseMask(cpv_slot)
1896 - if usemask != self.usemask:
1897 - self.usemask = usemask
1898 - has_changed = True
1899 - oldpuse = self.puse
1900 - self.puse = ""
1901 - cpdict = self.pusedict.get(cp)
1902 - if cpdict:
1903 - keys = list(cpdict)
1904 - while keys:
1905 - self.pusekey = best_match_to_list(cpv_slot, keys)
1906 - if self.pusekey:
1907 - keys.remove(self.pusekey)
1908 - self.puse = (" ".join(cpdict[self.pusekey])) + " " + self.puse
1909 - else:
1910 - break
1911 - del keys
1912 - if oldpuse != self.puse:
1913 - has_changed = True
1914 - self.configdict["pkg"]["PKGUSE"] = self.puse[:] # For saving to PUSE file
1915 - self.configdict["pkg"]["USE"] = self.puse[:] # this gets appended to USE
1916 -
1917 - if has_changed:
1918 - self.reset(keeping_pkg=1,use_cache=use_cache)
1919 -
1920 - # Ensure that "pkg" values are always preferred over "env" values.
1921 - # This must occur _after_ the above reset() call, since reset()
1922 - # copies values from self.backupenv.
1923 - env_configdict = self.configdict['env']
1924 - for k in pkg_configdict:
1925 - if k != 'USE':
1926 - env_configdict.pop(k, None)
1927 -
1928 - lazy_vars = self._lazy_vars(built_use, self)
1929 - env_configdict.addLazySingleton('ACCEPT_LICENSE',
1930 - lazy_vars.__getitem__, 'ACCEPT_LICENSE')
1931 - env_configdict.addLazySingleton('PORTAGE_RESTRICT',
1932 - lazy_vars.__getitem__, 'PORTAGE_RESTRICT')
1933 -
1934 - # If reset() has not been called, it's safe to return
1935 - # early if IUSE has not changed.
1936 - if not has_changed and previous_iuse == iuse:
1937 - return
1938 -
1939 - # Filter out USE flags that aren't part of IUSE. This has to
1940 - # be done for every setcpv() call since practically every
1941 - # package has different IUSE.
1942 - use = set(self["USE"].split())
1943 - iuse_implicit = self._get_implicit_iuse()
1944 - iuse_implicit.update(x.lstrip("+-") for x in iuse.split())
1945 -
1946 - # PORTAGE_IUSE is not always needed so it's lazily evaluated.
1947 - self.configdict["pkg"].addLazySingleton(
1948 - "PORTAGE_IUSE", _lazy_iuse_regex, iuse_implicit)
1949 -
1950 - ebuild_force_test = self.get("EBUILD_FORCE_TEST") == "1"
1951 - if ebuild_force_test and \
1952 - not hasattr(self, "_ebuild_force_test_msg_shown"):
1953 - self._ebuild_force_test_msg_shown = True
1954 - writemsg(_("Forcing test.\n"), noiselevel=-1)
1955 - if "test" in self.features:
1956 - if "test" in self.usemask and not ebuild_force_test:
1957 - # "test" is in IUSE and USE=test is masked, so execution
1958 - # of src_test() probably is not reliable. Therefore,
1959 - # temporarily disable FEATURES=test just for this package.
1960 - self["FEATURES"] = " ".join(x for x in self.features \
1961 - if x != "test")
1962 - use.discard("test")
1963 - else:
1964 - use.add("test")
1965 - if ebuild_force_test:
1966 - self.usemask.discard("test")
1967 -
1968 - # Allow _* flags from USE_EXPAND wildcards to pass through here.
1969 - use.difference_update([x for x in use \
1970 - if x not in iuse_implicit and x[-2:] != '_*'])
1971 -
1972 - # Use the calculated USE flags to regenerate the USE_EXPAND flags so
1973 - # that they are consistent. For optimal performance, use slice
1974 - # comparison instead of startswith().
1975 - use_expand_split = set(x.lower() for \
1976 - x in self.get('USE_EXPAND', '').split())
1977 - lazy_use_expand = self._lazy_use_expand(use, self.usemask,
1978 - iuse_implicit, use_expand_split, self._use_expand_dict)
1979 -
1980 - use_expand_iuses = {}
1981 - for x in iuse_implicit:
1982 - x_split = x.split('_')
1983 - if len(x_split) == 1:
1984 - continue
1985 - for i in range(len(x_split) - 1):
1986 - k = '_'.join(x_split[:i+1])
1987 - if k in use_expand_split:
1988 - v = use_expand_iuses.get(k)
1989 - if v is None:
1990 - v = set()
1991 - use_expand_iuses[k] = v
1992 - v.add(x)
1993 - break
1994 -
1995 - # If it's not in IUSE, variable content is allowed
1996 - # to pass through if it is defined somewhere. This
1997 - # allows packages that support LINGUAS but don't
1998 - # declare it in IUSE to use the variable outside of the
1999 - # USE_EXPAND context.
2000 - for k, use_expand_iuse in use_expand_iuses.items():
2001 - if k + '_*' in use:
2002 - use.update( x for x in use_expand_iuse if x not in usemask )
2003 - k = k.upper()
2004 - self.configdict['env'].addLazySingleton(k,
2005 - lazy_use_expand.__getitem__, k)
2006 -
2007 - # Filtered for the ebuild environment. Store this in a separate
2008 - # attribute since we still want to be able to see global USE
2009 - # settings for things like emerge --info.
2010 -
2011 - self.configdict["pkg"]["PORTAGE_USE"] = \
2012 - " ".join(sorted(x for x in use if x[-2:] != '_*'))
2013 -
2014 - def _get_implicit_iuse(self):
2015 - """
2016 - Some flags are considered to
2017 - be implicit members of IUSE:
2018 - * Flags derived from ARCH
2019 - * Flags derived from USE_EXPAND_HIDDEN variables
2020 - * Masked flags, such as those from {,package}use.mask
2021 - * Forced flags, such as those from {,package}use.force
2022 - * build and bootstrap flags used by bootstrap.sh
2023 - """
2024 - iuse_implicit = set()
2025 - # Flags derived from ARCH.
2026 - arch = self.configdict["defaults"].get("ARCH")
2027 - if arch:
2028 - iuse_implicit.add(arch)
2029 - iuse_implicit.update(self.get("PORTAGE_ARCHLIST", "").split())
2030 -
2031 - # Flags derived from USE_EXPAND_HIDDEN variables
2032 - # such as ELIBC, KERNEL, and USERLAND.
2033 - use_expand_hidden = self.get("USE_EXPAND_HIDDEN", "").split()
2034 - for x in use_expand_hidden:
2035 - iuse_implicit.add(x.lower() + "_.*")
2036 -
2037 - # Flags that have been masked or forced.
2038 - iuse_implicit.update(self.usemask)
2039 - iuse_implicit.update(self.useforce)
2040 -
2041 - # build and bootstrap flags used by bootstrap.sh
2042 - iuse_implicit.add("build")
2043 - iuse_implicit.add("bootstrap")
2044 -
2045 - # Controlled by FEATURES=test. Make this implicit, so handling
2046 - # of FEATURES=test is consistent regardless of explicit IUSE.
2047 - # Users may use use.mask/package.use.mask to control
2048 - # FEATURES=test for all ebuilds, regardless of explicit IUSE.
2049 - iuse_implicit.add("test")
2050 -
2051 - return iuse_implicit
2052 -
2053 - def _getUseMask(self, pkg):
2054 - cp = getattr(pkg, "cp", None)
2055 - if cp is None:
2056 - cp = cpv_getkey(dep.remove_slot(pkg))
2057 - usemask = []
2058 - pos = 0
2059 - for i, pusemask_dict in enumerate(self.pusemask_list):
2060 - cpdict = pusemask_dict.get(cp)
2061 - if cpdict:
2062 - keys = list(cpdict)
2063 - while keys:
2064 - best_match = best_match_to_list(pkg, keys)
2065 - if best_match:
2066 - keys.remove(best_match)
2067 - usemask.insert(pos, cpdict[best_match])
2068 - else:
2069 - break
2070 - del keys
2071 - if self.usemask_list[i]:
2072 - usemask.insert(pos, self.usemask_list[i])
2073 - pos = len(usemask)
2074 - return set(stack_lists(usemask, incremental=True))
2075 -
2076 - def _getUseForce(self, pkg):
2077 - cp = getattr(pkg, "cp", None)
2078 - if cp is None:
2079 - cp = cpv_getkey(dep.remove_slot(pkg))
2080 - useforce = []
2081 - pos = 0
2082 - for i, puseforce_dict in enumerate(self.puseforce_list):
2083 - cpdict = puseforce_dict.get(cp)
2084 - if cpdict:
2085 - keys = list(cpdict)
2086 - while keys:
2087 - best_match = best_match_to_list(pkg, keys)
2088 - if best_match:
2089 - keys.remove(best_match)
2090 - useforce.insert(pos, cpdict[best_match])
2091 - else:
2092 - break
2093 - del keys
2094 - if self.useforce_list[i]:
2095 - useforce.insert(pos, self.useforce_list[i])
2096 - pos = len(useforce)
2097 - return set(stack_lists(useforce, incremental=True))
2098 -
2099 - def _getMaskAtom(self, cpv, metadata):
2100 - """
2101 - Take a package and return a matching package.mask atom, or None if no
2102 - such atom exists or it has been cancelled by package.unmask. PROVIDE
2103 - is not checked, so atoms will not be found for old-style virtuals.
2104 -
2105 - @param cpv: The package name
2106 - @type cpv: String
2107 - @param metadata: A dictionary of raw package metadata
2108 - @type metadata: dict
2109 - @rtype: String
2110 - @return: An matching atom string or None if one is not found.
2111 - """
2112 -
2113 - cp = cpv_getkey(cpv)
2114 - mask_atoms = self.pmaskdict.get(cp)
2115 - if mask_atoms:
2116 - pkg_list = ["%s:%s" % (cpv, metadata["SLOT"])]
2117 - unmask_atoms = self.punmaskdict.get(cp)
2118 - for x in mask_atoms:
2119 - if not match_from_list(x, pkg_list):
2120 - continue
2121 - if unmask_atoms:
2122 - for y in unmask_atoms:
2123 - if match_from_list(y, pkg_list):
2124 - return None
2125 - return x
2126 - return None
2127 -
2128 - def _getProfileMaskAtom(self, cpv, metadata):
2129 - """
2130 - Take a package and return a matching profile atom, or None if no
2131 - such atom exists. Note that a profile atom may or may not have a "*"
2132 - prefix. PROVIDE is not checked, so atoms will not be found for
2133 - old-style virtuals.
2134 -
2135 - @param cpv: The package name
2136 - @type cpv: String
2137 - @param metadata: A dictionary of raw package metadata
2138 - @type metadata: dict
2139 - @rtype: String
2140 - @return: An matching profile atom string or None if one is not found.
2141 - """
2142 -
2143 - cp = cpv_getkey(cpv)
2144 - profile_atoms = self.prevmaskdict.get(cp)
2145 - if profile_atoms:
2146 - pkg_list = ["%s:%s" % (cpv, metadata["SLOT"])]
2147 - for x in profile_atoms:
2148 - if match_from_list(x, pkg_list):
2149 - continue
2150 - return x
2151 - return None
2152 -
2153 - def _getKeywords(self, cpv, metadata):
2154 - cp = cpv_getkey(cpv)
2155 - pkg = "%s:%s" % (cpv, metadata["SLOT"])
2156 - keywords = [[x for x in metadata["KEYWORDS"].split() if x != "-*"]]
2157 - pos = len(keywords)
2158 - for pkeywords_dict in self._pkeywords_list:
2159 - cpdict = pkeywords_dict.get(cp)
2160 - if cpdict:
2161 - keys = list(cpdict)
2162 - while keys:
2163 - best_match = best_match_to_list(pkg, keys)
2164 - if best_match:
2165 - keys.remove(best_match)
2166 - keywords.insert(pos, cpdict[best_match])
2167 - else:
2168 - break
2169 - pos = len(keywords)
2170 - return stack_lists(keywords, incremental=True)
2171 -
2172 - def _getMissingKeywords(self, cpv, metadata):
2173 - """
2174 - Take a package and return a list of any KEYWORDS that the user may
2175 - may need to accept for the given package. If the KEYWORDS are empty
2176 - and the the ** keyword has not been accepted, the returned list will
2177 - contain ** alone (in order to distiguish from the case of "none
2178 - missing").
2179 -
2180 - @param cpv: The package name (for package.keywords support)
2181 - @type cpv: String
2182 - @param metadata: A dictionary of raw package metadata
2183 - @type metadata: dict
2184 - @rtype: List
2185 - @return: A list of KEYWORDS that have not been accepted.
2186 - """
2187 -
2188 - # Hack: Need to check the env directly here as otherwise stacking
2189 - # doesn't work properly as negative values are lost in the config
2190 - # object (bug #139600)
2191 - egroups = self.configdict["backupenv"].get(
2192 - "ACCEPT_KEYWORDS", "").split()
2193 - mygroups = self._getKeywords(cpv, metadata)
2194 - # Repoman may modify this attribute as necessary.
2195 - pgroups = self["ACCEPT_KEYWORDS"].split()
2196 - match=0
2197 - cp = cpv_getkey(cpv)
2198 - pkgdict = self.pkeywordsdict.get(cp)
2199 - matches = False
2200 - if pkgdict:
2201 - cpv_slot_list = ["%s:%s" % (cpv, metadata["SLOT"])]
2202 - for atom, pkgkeywords in pkgdict.items():
2203 - if match_from_list(atom, cpv_slot_list):
2204 - matches = True
2205 - pgroups.extend(pkgkeywords)
2206 - if matches or egroups:
2207 - pgroups.extend(egroups)
2208 - inc_pgroups = set()
2209 - for x in pgroups:
2210 - if x.startswith("-"):
2211 - if x == "-*":
2212 - inc_pgroups.clear()
2213 - else:
2214 - inc_pgroups.discard(x[1:])
2215 - else:
2216 - inc_pgroups.add(x)
2217 - pgroups = inc_pgroups
2218 - del inc_pgroups
2219 - hasstable = False
2220 - hastesting = False
2221 - for gp in mygroups:
2222 - if gp == "*" or (gp == "-*" and len(mygroups) == 1):
2223 - writemsg(_("--- WARNING: Package '%(cpv)s' uses"
2224 - " '%(keyword)s' keyword.\n") % {"cpv": cpv, "keyword": gp}, noiselevel=-1)
2225 - if gp == "*":
2226 - match = 1
2227 - break
2228 - elif gp in pgroups:
2229 - match=1
2230 - break
2231 - elif gp.startswith("~"):
2232 - hastesting = True
2233 - elif not gp.startswith("-"):
2234 - hasstable = True
2235 - if not match and \
2236 - ((hastesting and "~*" in pgroups) or \
2237 - (hasstable and "*" in pgroups) or "**" in pgroups):
2238 - match=1
2239 - if match:
2240 - missing = []
2241 - else:
2242 - if not mygroups:
2243 - # If KEYWORDS is empty then we still have to return something
2244 - # in order to distiguish from the case of "none missing".
2245 - mygroups.append("**")
2246 - missing = mygroups
2247 - return missing
2248 -
2249 - def _getMissingLicenses(self, cpv, metadata):
2250 - """
2251 - Take a LICENSE string and return a list any licenses that the user may
2252 - may need to accept for the given package. The returned list will not
2253 - contain any licenses that have already been accepted. This method
2254 - can throw an InvalidDependString exception.
2255 -
2256 - @param cpv: The package name (for package.license support)
2257 - @type cpv: String
2258 - @param metadata: A dictionary of raw package metadata
2259 - @type metadata: dict
2260 - @rtype: List
2261 - @return: A list of licenses that have not been accepted.
2262 - """
2263 - accept_license = self._accept_license
2264 - cpdict = self._plicensedict.get(cpv_getkey(cpv), None)
2265 - if cpdict:
2266 - accept_license = list(self._accept_license)
2267 - cpv_slot = "%s:%s" % (cpv, metadata["SLOT"])
2268 - for atom in match_to_list(cpv_slot, list(cpdict)):
2269 - accept_license.extend(cpdict[atom])
2270 -
2271 - licenses = set(flatten(dep.use_reduce(dep.paren_reduce(
2272 - metadata["LICENSE"]), matchall=1)))
2273 - licenses.discard('||')
2274 -
2275 - acceptable_licenses = set()
2276 - for x in accept_license:
2277 - if x == '*':
2278 - acceptable_licenses.update(licenses)
2279 - elif x == '-*':
2280 - acceptable_licenses.clear()
2281 - elif x[:1] == '-':
2282 - acceptable_licenses.discard(x[1:])
2283 - else:
2284 - acceptable_licenses.add(x)
2285 -
2286 - license_str = metadata["LICENSE"]
2287 - if "?" in license_str:
2288 - use = metadata["USE"].split()
2289 - else:
2290 - use = []
2291 -
2292 - license_struct = portage.dep.use_reduce(
2293 - portage.dep.paren_reduce(license_str), uselist=use)
2294 - license_struct = portage.dep.dep_opconvert(license_struct)
2295 - return self._getMaskedLicenses(license_struct, acceptable_licenses)
2296 -
2297 - def _getMaskedLicenses(self, license_struct, acceptable_licenses):
2298 - if not license_struct:
2299 - return []
2300 - if license_struct[0] == "||":
2301 - ret = []
2302 - for element in license_struct[1:]:
2303 - if isinstance(element, list):
2304 - if element:
2305 - ret.append(self._getMaskedLicenses(
2306 - element, acceptable_licenses))
2307 - if not ret[-1]:
2308 - return []
2309 - else:
2310 - if element in acceptable_licenses:
2311 - return []
2312 - ret.append(element)
2313 - # Return all masked licenses, since we don't know which combination
2314 - # (if any) the user will decide to unmask.
2315 - return flatten(ret)
2316 -
2317 - ret = []
2318 - for element in license_struct:
2319 - if isinstance(element, list):
2320 - if element:
2321 - ret.extend(self._getMaskedLicenses(element,
2322 - acceptable_licenses))
2323 - else:
2324 - if element not in acceptable_licenses:
2325 - ret.append(element)
2326 - return ret
2327 -
2328 - def _getMissingProperties(self, cpv, metadata):
2329 - """
2330 - Take a PROPERTIES string and return a list of any properties the user may
2331 - may need to accept for the given package. The returned list will not
2332 - contain any properties that have already been accepted. This method
2333 - can throw an InvalidDependString exception.
2334 -
2335 - @param cpv: The package name (for package.properties support)
2336 - @type cpv: String
2337 - @param metadata: A dictionary of raw package metadata
2338 - @type metadata: dict
2339 - @rtype: List
2340 - @return: A list of properties that have not been accepted.
2341 - """
2342 - accept_properties = self._accept_properties
2343 - cpdict = self._ppropertiesdict.get(cpv_getkey(cpv), None)
2344 - if cpdict:
2345 - accept_properties = list(self._accept_properties)
2346 - cpv_slot = "%s:%s" % (cpv, metadata["SLOT"])
2347 - for atom in match_to_list(cpv_slot, list(cpdict)):
2348 - accept_properties.extend(cpdict[atom])
2349 -
2350 - properties = set(flatten(dep.use_reduce(dep.paren_reduce(
2351 - metadata["PROPERTIES"]), matchall=1)))
2352 - properties.discard('||')
2353 -
2354 - acceptable_properties = set()
2355 - for x in accept_properties:
2356 - if x == '*':
2357 - acceptable_properties.update(properties)
2358 - elif x == '-*':
2359 - acceptable_properties.clear()
2360 - elif x[:1] == '-':
2361 - acceptable_properties.discard(x[1:])
2362 - else:
2363 - acceptable_properties.add(x)
2364 -
2365 - properties_str = metadata["PROPERTIES"]
2366 - if "?" in properties_str:
2367 - use = metadata["USE"].split()
2368 - else:
2369 - use = []
2370 -
2371 - properties_struct = portage.dep.use_reduce(
2372 - portage.dep.paren_reduce(properties_str), uselist=use)
2373 - properties_struct = portage.dep.dep_opconvert(properties_struct)
2374 - return self._getMaskedProperties(properties_struct, acceptable_properties)
2375 -
2376 - def _getMaskedProperties(self, properties_struct, acceptable_properties):
2377 - if not properties_struct:
2378 - return []
2379 - if properties_struct[0] == "||":
2380 - ret = []
2381 - for element in properties_struct[1:]:
2382 - if isinstance(element, list):
2383 - if element:
2384 - ret.append(self._getMaskedProperties(
2385 - element, acceptable_properties))
2386 - if not ret[-1]:
2387 - return []
2388 - else:
2389 - if element in acceptable_properties:
2390 - return[]
2391 - ret.append(element)
2392 - # Return all masked properties, since we don't know which combination
2393 - # (if any) the user will decide to unmask
2394 - return flatten(ret)
2395 -
2396 - ret = []
2397 - for element in properties_struct:
2398 - if isinstance(element, list):
2399 - if element:
2400 - ret.extend(self._getMaskedProperties(element,
2401 - acceptable_properties))
2402 - else:
2403 - if element not in acceptable_properties:
2404 - ret.append(element)
2405 - return ret
2406 -
2407 - def _accept_chost(self, cpv, metadata):
2408 - """
2409 - @return True if pkg CHOST is accepted, False otherwise.
2410 - """
2411 - if self._accept_chost_re is None:
2412 - accept_chost = self.get("ACCEPT_CHOSTS", "").split()
2413 - if not accept_chost:
2414 - chost = self.get("CHOST")
2415 - if chost:
2416 - accept_chost.append(chost)
2417 - if not accept_chost:
2418 - self._accept_chost_re = re.compile(".*")
2419 - elif len(accept_chost) == 1:
2420 - try:
2421 - self._accept_chost_re = re.compile(r'^%s$' % accept_chost[0])
2422 - except re.error as e:
2423 - writemsg(_("!!! Invalid ACCEPT_CHOSTS value: '%s': %s\n") % \
2424 - (accept_chost[0], e), noiselevel=-1)
2425 - self._accept_chost_re = re.compile("^$")
2426 - else:
2427 - try:
2428 - self._accept_chost_re = re.compile(
2429 - r'^(%s)$' % "|".join(accept_chost))
2430 - except re.error as e:
2431 - writemsg(_("!!! Invalid ACCEPT_CHOSTS value: '%s': %s\n") % \
2432 - (" ".join(accept_chost), e), noiselevel=-1)
2433 - self._accept_chost_re = re.compile("^$")
2434 -
2435 - pkg_chost = metadata.get('CHOST', '')
2436 - return not pkg_chost or \
2437 - self._accept_chost_re.match(pkg_chost) is not None
2438 -
2439 - def setinst(self,mycpv,mydbapi):
2440 - """This updates the preferences for old-style virtuals,
2441 - affecting the behavior of dep_expand() and dep_check()
2442 - calls. It can change dbapi.match() behavior since that
2443 - calls dep_expand(). However, dbapi instances have
2444 - internal match caches that are not invalidated when
2445 - preferences are updated here. This can potentially
2446 - lead to some inconsistency (relevant to bug #1343)."""
2447 - self.modifying()
2448 - if len(self.virtuals) == 0:
2449 - self.getvirtuals()
2450 - # Grab the virtuals this package provides and add them into the tree virtuals.
2451 - if not hasattr(mydbapi, "aux_get"):
2452 - provides = mydbapi["PROVIDE"]
2453 - else:
2454 - provides = mydbapi.aux_get(mycpv, ["PROVIDE"])[0]
2455 - if not provides:
2456 - return
2457 - if isinstance(mydbapi, portdbapi):
2458 - self.setcpv(mycpv, mydb=mydbapi)
2459 - myuse = self["PORTAGE_USE"]
2460 - elif not hasattr(mydbapi, "aux_get"):
2461 - myuse = mydbapi["USE"]
2462 - else:
2463 - myuse = mydbapi.aux_get(mycpv, ["USE"])[0]
2464 - virts = flatten(portage.dep.use_reduce(portage.dep.paren_reduce(provides), uselist=myuse.split()))
2465 -
2466 - modified = False
2467 - cp = dep.Atom(cpv_getkey(mycpv))
2468 - for virt in virts:
2469 - try:
2470 - virt = dep.Atom(virt).cp
2471 - except exception.InvalidAtom:
2472 - continue
2473 - providers = self.virtuals.get(virt)
2474 - if providers and cp in providers:
2475 - continue
2476 - providers = self._depgraphVirtuals.get(virt)
2477 - if providers is None:
2478 - providers = []
2479 - self._depgraphVirtuals[virt] = providers
2480 - if cp not in providers:
2481 - providers.append(cp)
2482 - modified = True
2483 -
2484 - if modified:
2485 - self.virtuals = self.__getvirtuals_compile()
2486 -
2487 - def reload(self):
2488 - """Reload things like /etc/profile.env that can change during runtime."""
2489 - env_d_filename = os.path.join(self["ROOT"], EPREFIX_LSTRIP, "etc", "profile.env")
2490 - self.configdict["env.d"].clear()
2491 - env_d = getconfig(env_d_filename, expand=False)
2492 - if env_d:
2493 - # env_d will be None if profile.env doesn't exist.
2494 - self.configdict["env.d"].update(env_d)
2495 -
2496 - def _prune_incremental(self, split):
2497 - """
2498 - Prune off any parts of an incremental variable that are
2499 - made irrelevant by the latest occuring * or -*. This
2500 - could be more aggressive but that might be confusing
2501 - and the point is just to reduce noise a bit.
2502 - """
2503 - for i, x in enumerate(reversed(split)):
2504 - if x == '*':
2505 - split = split[-i-1:]
2506 - break
2507 - elif x == '-*':
2508 - if i == 0:
2509 - split = []
2510 - else:
2511 - split = split[-i:]
2512 - break
2513 - return split
2514 -
2515 - def regenerate(self,useonly=0,use_cache=1):
2516 - """
2517 - Regenerate settings
2518 - This involves regenerating valid USE flags, re-expanding USE_EXPAND flags
2519 - re-stacking USE flags (-flag and -*), as well as any other INCREMENTAL
2520 - variables. This also updates the env.d configdict; useful in case an ebuild
2521 - changes the environment.
2522 -
2523 - If FEATURES has already stacked, it is not stacked twice.
2524 -
2525 - @param useonly: Only regenerate USE flags (not any other incrementals)
2526 - @type useonly: Boolean
2527 - @param use_cache: Enable Caching (only for autouse)
2528 - @type use_cache: Boolean
2529 - @rtype: None
2530 - """
2531 -
2532 - self.modifying()
2533 - if self.already_in_regenerate:
2534 - # XXX: THIS REALLY NEEDS TO GET FIXED. autouse() loops.
2535 - writemsg("!!! Looping in regenerate.\n",1)
2536 - return
2537 - else:
2538 - self.already_in_regenerate = 1
2539 -
2540 - if useonly:
2541 - myincrementals=["USE"]
2542 - else:
2543 - myincrementals = self.incrementals
2544 - myincrementals = set(myincrementals)
2545 - # If self.features exists, it has already been stacked and may have
2546 - # been mutated, so don't stack it again or else any mutations will be
2547 - # reverted.
2548 - if "FEATURES" in myincrementals and hasattr(self, "features"):
2549 - myincrementals.remove("FEATURES")
2550 -
2551 - if "USE" in myincrementals:
2552 - # Process USE last because it depends on USE_EXPAND which is also
2553 - # an incremental!
2554 - myincrementals.remove("USE")
2555 -
2556 - mydbs = self.configlist[:-1]
2557 - mydbs.append(self.backupenv)
2558 -
2559 - # ACCEPT_LICENSE is a lazily evaluated incremental, so that * can be
2560 - # used to match all licenses without every having to explicitly expand
2561 - # it to all licenses.
2562 - if self.local_config:
2563 - mysplit = []
2564 - for curdb in mydbs:
2565 - mysplit.extend(curdb.get('ACCEPT_LICENSE', '').split())
2566 - mysplit = self._prune_incremental(mysplit)
2567 - accept_license_str = ' '.join(mysplit)
2568 - self.configlist[-1]['ACCEPT_LICENSE'] = accept_license_str
2569 - if accept_license_str != self._accept_license_str:
2570 - self._accept_license_str = accept_license_str
2571 - self._accept_license = tuple(self.expandLicenseTokens(mysplit))
2572 - else:
2573 - # repoman will accept any license
2574 - self._accept_license = ('*',)
2575 -
2576 - # ACCEPT_PROPERTIES works like ACCEPT_LICENSE, without groups
2577 - if self.local_config:
2578 - mysplit = []
2579 - for curdb in mydbs:
2580 - mysplit.extend(curdb.get('ACCEPT_PROPERTIES', '').split())
2581 - mysplit = self._prune_incremental(mysplit)
2582 - self.configlist[-1]['ACCEPT_PROPERTIES'] = ' '.join(mysplit)
2583 - if tuple(mysplit) != self._accept_properties:
2584 - self._accept_properties = tuple(mysplit)
2585 - else:
2586 - # repoman will accept any property
2587 - self._accept_properties = ('*',)
2588 -
2589 - for mykey in myincrementals:
2590 -
2591 - myflags=[]
2592 - for curdb in mydbs:
2593 - if mykey not in curdb:
2594 - continue
2595 - #variables are already expanded
2596 - mysplit = curdb[mykey].split()
2597 -
2598 - for x in mysplit:
2599 - if x=="-*":
2600 - # "-*" is a special "minus" var that means "unset all settings".
2601 - # so USE="-* gnome" will have *just* gnome enabled.
2602 - myflags = []
2603 - continue
2604 -
2605 - if x[0]=="+":
2606 - # Not legal. People assume too much. Complain.
2607 - writemsg(colorize("BAD",
2608 - _("USE flags should not start with a '+': %s") % x) \
2609 - + "\n", noiselevel=-1)
2610 - x=x[1:]
2611 - if not x:
2612 - continue
2613 -
2614 - if (x[0]=="-"):
2615 - if (x[1:] in myflags):
2616 - # Unset/Remove it.
2617 - del myflags[myflags.index(x[1:])]
2618 - continue
2619 -
2620 - # We got here, so add it now.
2621 - if x not in myflags:
2622 - myflags.append(x)
2623 -
2624 - myflags.sort()
2625 - #store setting in last element of configlist, the original environment:
2626 - if myflags or mykey in self:
2627 - self.configlist[-1][mykey] = " ".join(myflags)
2628 - del myflags
2629 -
2630 - # Do the USE calculation last because it depends on USE_EXPAND.
2631 - if "auto" in self["USE_ORDER"].split(":"):
2632 - self.configdict["auto"]["USE"] = autouse(
2633 - vartree(root=self["ROOT"], categories=self.categories,
2634 - settings=self),
2635 - use_cache=use_cache, mysettings=self)
2636 - else:
2637 - self.configdict["auto"]["USE"] = ""
2638 -
2639 - use_expand = self.get("USE_EXPAND", "").split()
2640 - use_expand_dict = self._use_expand_dict
2641 - use_expand_dict.clear()
2642 - for k in use_expand:
2643 - v = self.get(k)
2644 - if v is not None:
2645 - use_expand_dict[k] = v
2646 -
2647 - if not self.uvlist:
2648 - for x in self["USE_ORDER"].split(":"):
2649 - if x in self.configdict:
2650 - self.uvlist.append(self.configdict[x])
2651 - self.uvlist.reverse()
2652 -
2653 - # For optimal performance, use slice
2654 - # comparison instead of startswith().
2655 - myflags = set()
2656 - for curdb in self.uvlist:
2657 - cur_use_expand = [x for x in use_expand if x in curdb]
2658 - mysplit = curdb.get("USE", "").split()
2659 - if not mysplit and not cur_use_expand:
2660 - continue
2661 - for x in mysplit:
2662 - if x == "-*":
2663 - myflags.clear()
2664 - continue
2665 -
2666 - if x[0] == "+":
2667 - writemsg(colorize("BAD", _("USE flags should not start "
2668 - "with a '+': %s\n") % x), noiselevel=-1)
2669 - x = x[1:]
2670 - if not x:
2671 - continue
2672 -
2673 - if x[0] == "-":
2674 - myflags.discard(x[1:])
2675 - continue
2676 -
2677 - myflags.add(x)
2678 -
2679 - for var in cur_use_expand:
2680 - var_lower = var.lower()
2681 - is_not_incremental = var not in myincrementals
2682 - if is_not_incremental:
2683 - prefix = var_lower + "_"
2684 - prefix_len = len(prefix)
2685 - for x in list(myflags):
2686 - if x[:prefix_len] == prefix:
2687 - myflags.remove(x)
2688 - for x in curdb[var].split():
2689 - if x[0] == "+":
2690 - if is_not_incremental:
2691 - writemsg(colorize("BAD", _("Invalid '+' "
2692 - "operator in non-incremental variable "
2693 - "'%s': '%s'\n") % (var, x)), noiselevel=-1)
2694 - continue
2695 - else:
2696 - writemsg(colorize("BAD", _("Invalid '+' "
2697 - "operator in incremental variable "
2698 - "'%s': '%s'\n") % (var, x)), noiselevel=-1)
2699 - x = x[1:]
2700 - if x[0] == "-":
2701 - if is_not_incremental:
2702 - writemsg(colorize("BAD", _("Invalid '-' "
2703 - "operator in non-incremental variable "
2704 - "'%s': '%s'\n") % (var, x)), noiselevel=-1)
2705 - continue
2706 - myflags.discard(var_lower + "_" + x[1:])
2707 - continue
2708 - myflags.add(var_lower + "_" + x)
2709 -
2710 - if hasattr(self, "features"):
2711 - self.features.clear()
2712 - else:
2713 - self.features = set()
2714 - self.features.update(self.configlist[-1].get('FEATURES', '').split())
2715 - self['FEATURES'] = ' '.join(sorted(self.features))
2716 -
2717 - myflags.update(self.useforce)
2718 - arch = self.configdict["defaults"].get("ARCH")
2719 - if arch:
2720 - myflags.add(arch)
2721 -
2722 - myflags.difference_update(self.usemask)
2723 - self.configlist[-1]["USE"]= " ".join(sorted(myflags))
2724 -
2725 - self.already_in_regenerate = 0
2726 -
2727 - def get_virts_p(self, myroot=None):
2728 -
2729 - if myroot is not None:
2730 - warnings.warn("The 'myroot' parameter for " + \
2731 - "portage.config.get_virts_p() is deprecated",
2732 - DeprecationWarning, stacklevel=2)
2733 -
2734 - if self.virts_p:
2735 - return self.virts_p
2736 - virts = self.getvirtuals()
2737 - if virts:
2738 - for x in virts:
2739 - vkeysplit = x.split("/")
2740 - if vkeysplit[1] not in self.virts_p:
2741 - self.virts_p[vkeysplit[1]] = virts[x]
2742 - return self.virts_p
2743 -
2744 - def getvirtuals(self, myroot=None):
2745 - """myroot is now ignored because, due to caching, it has always been
2746 - broken for all but the first call."""
2747 -
2748 - if myroot is not None:
2749 - warnings.warn("The 'myroot' parameter for " + \
2750 - "portage.config.getvirtuals() is deprecated",
2751 - DeprecationWarning, stacklevel=2)
2752 -
2753 - myroot = self["ROOT"]
2754 - if self.virtuals:
2755 - return self.virtuals
2756 -
2757 - virtuals_list = []
2758 - for x in self.profiles:
2759 - virtuals_file = os.path.join(x, "virtuals")
2760 - virtuals_dict = grabdict(virtuals_file)
2761 - atoms_dict = {}
2762 - for k, v in virtuals_dict.items():
2763 - try:
2764 - virt_atom = portage.dep.Atom(k)
2765 - except portage.exception.InvalidAtom:
2766 - virt_atom = None
2767 - else:
2768 - if virt_atom.blocker or \
2769 - str(virt_atom) != str(virt_atom.cp):
2770 - virt_atom = None
2771 - if virt_atom is None:
2772 - writemsg(_("--- Invalid virtuals atom in %s: %s\n") % \
2773 - (virtuals_file, k), noiselevel=-1)
2774 - continue
2775 - providers = []
2776 - for atom in v:
2777 - atom_orig = atom
2778 - if atom[:1] == '-':
2779 - # allow incrementals
2780 - atom = atom[1:]
2781 - try:
2782 - atom = portage.dep.Atom(atom)
2783 - except portage.exception.InvalidAtom:
2784 - atom = None
2785 - else:
2786 - if atom.blocker:
2787 - atom = None
2788 - if atom is None:
2789 - writemsg(_("--- Invalid atom in %s: %s\n") % \
2790 - (virtuals_file, myatom), noiselevel=-1)
2791 - else:
2792 - if atom_orig == str(atom):
2793 - # normal atom, so return as Atom instance
2794 - providers.append(atom)
2795 - else:
2796 - # atom has special prefix, so return as string
2797 - providers.append(atom_orig)
2798 - if providers:
2799 - atoms_dict[virt_atom] = providers
2800 - if atoms_dict:
2801 - virtuals_list.append(atoms_dict)
2802 -
2803 - self.dirVirtuals = stack_dictlist(virtuals_list, incremental=True)
2804 - del virtuals_list
2805 -
2806 - for virt in self.dirVirtuals:
2807 - # Preference for virtuals decreases from left to right.
2808 - self.dirVirtuals[virt].reverse()
2809 -
2810 - # Repoman does not use user or tree virtuals.
2811 - if self.local_config and not self.treeVirtuals:
2812 - temp_vartree = vartree(myroot, None,
2813 - categories=self.categories, settings=self)
2814 - self._populate_treeVirtuals(temp_vartree)
2815 -
2816 - self.virtuals = self.__getvirtuals_compile()
2817 - return self.virtuals
2818 -
2819 - def _populate_treeVirtuals(self, vartree):
2820 - """Reduce the provides into a list by CP."""
2821 - for provide, cpv_list in vartree.get_all_provides().items():
2822 - try:
2823 - provide = dep.Atom(provide)
2824 - except exception.InvalidAtom:
2825 - continue
2826 - self.treeVirtuals[provide.cp] = \
2827 - [dep.Atom(cpv_getkey(cpv)) for cpv in cpv_list]
2828 -
2829 - def __getvirtuals_compile(self):
2830 - """Stack installed and profile virtuals. Preference for virtuals
2831 - decreases from left to right.
2832 - Order of preference:
2833 - 1. installed and in profile
2834 - 2. installed only
2835 - 3. profile only
2836 - """
2837 -
2838 - # Virtuals by profile+tree preferences.
2839 - ptVirtuals = {}
2840 -
2841 - for virt, installed_list in self.treeVirtuals.items():
2842 - profile_list = self.dirVirtuals.get(virt, None)
2843 - if not profile_list:
2844 - continue
2845 - for cp in installed_list:
2846 - if cp in profile_list:
2847 - ptVirtuals.setdefault(virt, [])
2848 - ptVirtuals[virt].append(cp)
2849 -
2850 - virtuals = stack_dictlist([ptVirtuals, self.treeVirtuals,
2851 - self.dirVirtuals, self._depgraphVirtuals])
2852 - return virtuals
2853 -
2854 - def __delitem__(self,mykey):
2855 - self.modifying()
2856 - for x in self.lookuplist:
2857 - if x != None:
2858 - if mykey in x:
2859 - del x[mykey]
2860 -
2861 - def __getitem__(self,mykey):
2862 - for d in self.lookuplist:
2863 - if mykey in d:
2864 - return d[mykey]
2865 - return '' # for backward compat, don't raise KeyError
2866 -
2867 - def get(self, k, x=None):
2868 - for d in self.lookuplist:
2869 - if k in d:
2870 - return d[k]
2871 - return x
2872 -
2873 - def pop(self, key, *args):
2874 - if len(args) > 1:
2875 - raise TypeError(
2876 - "pop expected at most 2 arguments, got " + \
2877 - repr(1 + len(args)))
2878 - v = self
2879 - for d in reversed(self.lookuplist):
2880 - v = d.pop(key, v)
2881 - if v is self:
2882 - if args:
2883 - return args[0]
2884 - raise KeyError(key)
2885 - return v
2886 -
2887 - def has_key(self,mykey):
2888 - warnings.warn("portage.config.has_key() is deprecated, "
2889 - "use the in operator instead",
2890 - DeprecationWarning, stacklevel=2)
2891 - return mykey in self
2892 -
2893 - def __contains__(self, mykey):
2894 - """Called to implement membership test operators (in and not in)."""
2895 - for d in self.lookuplist:
2896 - if mykey in d:
2897 - return True
2898 - return False
2899 -
2900 - def setdefault(self, k, x=None):
2901 - v = self.get(k)
2902 - if v is not None:
2903 - return v
2904 - else:
2905 - self[k] = x
2906 - return x
2907 -
2908 - def keys(self):
2909 - return list(self)
2910 -
2911 - def __iter__(self):
2912 - keys = set()
2913 - for d in self.lookuplist:
2914 - keys.update(d)
2915 - return iter(keys)
2916 -
2917 - def iterkeys(self):
2918 - return iter(self)
2919 -
2920 - def iteritems(self):
2921 - for k in self:
2922 - yield (k, self[k])
2923 -
2924 - def items(self):
2925 - return list(self.iteritems())
2926 -
2927 - def __setitem__(self,mykey,myvalue):
2928 - "set a value; will be thrown away at reset() time"
2929 - if not isinstance(myvalue, basestring):
2930 - raise ValueError("Invalid type being used as a value: '%s': '%s'" % (str(mykey),str(myvalue)))
2931 -
2932 - # Avoid potential UnicodeDecodeError exceptions later.
2933 - mykey = _unicode_decode(mykey)
2934 - myvalue = _unicode_decode(myvalue)
2935 -
2936 - self.modifying()
2937 - self.modifiedkeys.append(mykey)
2938 - self.configdict["env"][mykey]=myvalue
2939 -
2940 - def environ(self):
2941 - "return our locally-maintained environment"
2942 - mydict={}
2943 - environ_filter = self._environ_filter
2944 -
2945 - eapi = self.get('EAPI')
2946 - phase = self.get('EBUILD_PHASE')
2947 - filter_calling_env = False
2948 - if phase not in ('clean', 'cleanrm', 'depend'):
2949 - temp_dir = self.get('T')
2950 - if temp_dir is not None and \
2951 - os.path.exists(os.path.join(temp_dir, 'environment')):
2952 - filter_calling_env = True
2953 -
2954 - environ_whitelist = self._environ_whitelist
2955 - env_d = self.configdict["env.d"]
2956 - for x in self:
2957 - if x in environ_filter:
2958 - continue
2959 - myvalue = self[x]
2960 - if not isinstance(myvalue, basestring):
2961 - writemsg(_("!!! Non-string value in config: %s=%s\n") % \
2962 - (x, myvalue), noiselevel=-1)
2963 - continue
2964 - if filter_calling_env and \
2965 - x not in environ_whitelist and \
2966 - not self._environ_whitelist_re.match(x):
2967 - # Do not allow anything to leak into the ebuild
2968 - # environment unless it is explicitly whitelisted.
2969 - # This ensures that variables unset by the ebuild
2970 - # remain unset.
2971 - continue
2972 - mydict[x] = myvalue
2973 - if "HOME" not in mydict and "BUILD_PREFIX" in mydict:
2974 - writemsg("*** HOME not set. Setting to "+mydict["BUILD_PREFIX"]+"\n")
2975 - mydict["HOME"]=mydict["BUILD_PREFIX"][:]
2976 -
2977 - if filter_calling_env:
2978 - if phase:
2979 - whitelist = []
2980 - if "rpm" == phase:
2981 - whitelist.append("RPMDIR")
2982 - for k in whitelist:
2983 - v = self.get(k)
2984 - if v is not None:
2985 - mydict[k] = v
2986 -
2987 - # Filtered by IUSE and implicit IUSE.
2988 - mydict["USE"] = self.get("PORTAGE_USE", "")
2989 -
2990 - # Don't export AA to the ebuild environment in EAPIs that forbid it
2991 - if eapi not in ("0", "1", "2", "3", "3_pre2"):
2992 - mydict.pop("AA", None)
2993 -
2994 - if phase == 'depend':
2995 - mydict.pop('FILESDIR', None)
2996 -
2997 - return mydict
2998 -
2999 - def thirdpartymirrors(self):
3000 - if getattr(self, "_thirdpartymirrors", None) is None:
3001 - profileroots = [os.path.join(self["PORTDIR"], "profiles")]
3002 - for x in self["PORTDIR_OVERLAY"].split():
3003 - profileroots.insert(0, os.path.join(x, "profiles"))
3004 - thirdparty_lists = [grabdict(os.path.join(x, "thirdpartymirrors")) for x in profileroots]
3005 - self._thirdpartymirrors = stack_dictlist(thirdparty_lists, incremental=True)
3006 - return self._thirdpartymirrors
3007 -
3008 - def archlist(self):
3009 - return flatten([[myarch, "~" + myarch] \
3010 - for myarch in self["PORTAGE_ARCHLIST"].split()])
3011 -
3012 - def selinux_enabled(self):
3013 - if getattr(self, "_selinux_enabled", None) is None:
3014 - self._selinux_enabled = 0
3015 - if "selinux" in self["USE"].split():
3016 - if selinux:
3017 - if selinux.is_selinux_enabled() == 1:
3018 - self._selinux_enabled = 1
3019 - else:
3020 - self._selinux_enabled = 0
3021 - else:
3022 - writemsg(_("!!! SELinux module not found. Please verify that it was installed.\n"),
3023 - noiselevel=-1)
3024 - self._selinux_enabled = 0
3025 -
3026 - return self._selinux_enabled
3027 -
3028 - if sys.hexversion >= 0x3000000:
3029 - keys = __iter__
3030 - items = iteritems
3031 -
3032 def _can_test_pty_eof():
3033 """
3034 The _test_pty_eof() function seems to hang on most
3035 @@ -4232,6 +1396,7 @@
3036 return retval >> 8
3037 return retval
3038
3039 +<<<<<<< .working
3040 _userpriv_spawn_kwargs = (
3041 ("uid", portage_uid),
3042 ("gid", portage_gid),
3043 @@ -5334,6 +2499,8 @@
3044 return 0
3045 return 1
3046
3047 +=======
3048 +>>>>>>> .merge-right.r15429
3049 def digestgen(myarchives, mysettings, overwrite=1, manifestonly=0, myportdb=None):
3050 """
3051 Generates a digest file if missing. Assumes all files are available.
3052
3053 Modified: main/branches/prefix/pym/portage/dbapi/bintree.py
3054 ===================================================================
3055 --- main/branches/prefix/pym/portage/dbapi/bintree.py 2010-02-22 13:02:22 UTC (rev 15433)
3056 +++ main/branches/prefix/pym/portage/dbapi/bintree.py 2010-02-22 13:26:20 UTC (rev 15434)
3057 @@ -22,11 +22,12 @@
3058 from portage.const import EAPI
3059 from portage.localization import _
3060
3061 -from portage import dep_expand, listdir, _check_distfile, _movefile
3062 +from portage import dep_expand, listdir, _movefile
3063 from portage import os
3064 from portage import _encodings
3065 from portage import _unicode_decode
3066 from portage import _unicode_encode
3067 +from portage.package.ebuild.fetch import _check_distfile
3068
3069 import codecs
3070 import errno
3071
3072 Modified: main/branches/prefix/pym/portage/dbapi/porttree.py
3073 ===================================================================
3074 --- main/branches/prefix/pym/portage/dbapi/porttree.py 2010-02-22 13:02:22 UTC (rev 15433)
3075 +++ main/branches/prefix/pym/portage/dbapi/porttree.py 2010-02-22 13:26:20 UTC (rev 15434)
3076 @@ -18,6 +18,7 @@
3077 )
3078
3079 from portage.cache.cache_errors import CacheError
3080 +from portage.cache.mappings import Mapping
3081 from portage.const import REPO_NAME_LOC
3082 from portage.data import portage_gid, secpass
3083 from portage.dbapi import dbapi
3084 @@ -127,17 +128,17 @@
3085 _use_mutable = True
3086
3087 def _get_settings(self):
3088 - warnings.warn("Use portdbapi.settings insead of portdbapi.mysettings",
3089 + warnings.warn("Use portdbapi.settings instead of portdbapi.mysettings",
3090 DeprecationWarning)
3091 return self.settings
3092
3093 def _set_settings(self, settings):
3094 - warnings.warn("Use portdbapi.settings insead of portdbapi.mysettings",
3095 + warnings.warn("Use portdbapi.settings instead of portdbapi.mysettings",
3096 DeprecationWarning)
3097 self.settings = settings
3098
3099 def _del_settings (self):
3100 - warnings.warn("Use portdbapi.settings insead of portdbapi.mysettings",
3101 + warnings.warn("Use portdbapi.settings instead of portdbapi.mysettings",
3102 DeprecationWarning)
3103 del self.settings
3104
3105 @@ -1243,7 +1244,7 @@
3106 pass
3107 return myslot
3108
3109 -class FetchlistDict(portage.cache.mappings.Mapping):
3110 +class FetchlistDict(Mapping):
3111 """
3112 This provide a mapping interface to retrieve fetch lists. It's used
3113 to allow portage.manifest.Manifest to access fetch lists via a standard
3114
3115 Modified: main/branches/prefix/pym/portage/package/ebuild/config.py
3116 ===================================================================
3117 --- main/trunk/pym/portage/package/ebuild/config.py 2010-02-22 10:44:23 UTC (rev 15429)
3118 +++ main/branches/prefix/pym/portage/package/ebuild/config.py 2010-02-22 13:26:20 UTC (rev 15434)
3119 @@ -190,6 +190,8 @@
3120 "ROOT", "ROOTPATH", "T", "TMP", "TMPDIR",
3121 "USE_EXPAND", "USE_ORDER", "WORKDIR",
3122 "XARGS",
3123 + "BPREFIX", "DEFAULT_PATH", "EXTRA_PATH",
3124 + "PORTAGE_GROUP", "PORTAGE_USER",
3125 ]
3126
3127 # user config variables
3128 @@ -221,11 +223,12 @@
3129 ]
3130
3131 # other variables inherited from the calling environment
3132 + # UNIXMODE is necessary for MiNT
3133 _environ_whitelist += [
3134 "CVS_RSH", "ECHANGELOG_USER",
3135 "GPG_AGENT_INFO",
3136 "SSH_AGENT_PID", "SSH_AUTH_SOCK",
3137 - "STY", "WINDOW", "XAUTHORITY",
3138 + "STY", "WINDOW", "XAUTHORITY", "UNIXMODE",
3139 ]
3140
3141 _environ_whitelist = frozenset(_environ_whitelist)
3142 @@ -246,6 +249,10 @@
3143 # misc variables inherited from the calling environment
3144 _environ_filter += [
3145 "INFOPATH", "MANPATH", "USER",
3146 + "HOST", "GROUP", "LOGNAME", "MAIL", "REMOTEHOST",
3147 + "SECURITYSESSIONID",
3148 + "TERMINFO", "TERM_PROGRAM", "TERM_PROGRAM_VERSION",
3149 + "VENDOR", "__CF_USER_TEXT_ENCODING",
3150 ]
3151
3152 # variables that break bash
3153 @@ -434,7 +441,7 @@
3154 raise DirectoryNotFound(var)
3155
3156 if config_root is None:
3157 - config_root = "/"
3158 + config_root = EPREFIX + os.path.sep
3159
3160 config_root = normalize_path(os.path.abspath(
3161 config_root)).rstrip(os.path.sep) + os.path.sep
3162 @@ -627,8 +634,8 @@
3163 target_root = normalize_path(os.path.abspath(
3164 target_root)).rstrip(os.path.sep) + os.path.sep
3165
3166 - ensure_dirs(target_root)
3167 - check_var_directory("ROOT", target_root)
3168 + portage.util.ensure_dirs(target_root + EPREFIX_LSTRIP)
3169 + check_var_directory("EROOT", target_root + EPREFIX_LSTRIP)
3170
3171 # The expand_map is used for variable substitution
3172 # in getconfig() calls, and the getconfig() calls
3173 @@ -647,7 +654,7 @@
3174 # lead to unexpected results.
3175 expand_map = {}
3176
3177 - env_d = getconfig(os.path.join(target_root, "etc", "profile.env"),
3178 + env_d = getconfig(os.path.join(target_root, EPREFIX_LSTRIP, "etc", "profile.env"),
3179 expand=expand_map)
3180 # env_d will be None if profile.env doesn't exist.
3181 if env_d:
3182 @@ -680,7 +687,7 @@
3183
3184 # make.globals should not be relative to config_root
3185 # because it only contains constants.
3186 - for x in (GLOBAL_CONFIG_PATH, "/etc"):
3187 + for x in (portage.const.GLOBAL_CONFIG_PATH, BPREFIX+"/etc"):
3188 self.mygcfg = getconfig(os.path.join(x, "make.globals"),
3189 expand=expand_map)
3190 if self.mygcfg:
3191 @@ -758,11 +765,10 @@
3192 self.backup_changes("PORTAGE_CONFIGROOT")
3193 self["ROOT"] = target_root
3194 self.backup_changes("ROOT")
3195 + self["EPREFIX"] = EPREFIX
3196
3197 - # Prefix forward compatability, set EPREFIX to the empty string
3198 - self["EPREFIX"] = ''
3199 self.backup_changes("EPREFIX")
3200 - self["EROOT"] = target_root
3201 + self["EROOT"] = target_root + EPREFIX_LSTRIP + os.path.sep
3202 self.backup_changes("EROOT")
3203
3204 self.pusedict = {}
3205 @@ -1020,13 +1026,13 @@
3206 """
3207 Create a few directories that are critical to portage operation
3208 """
3209 - if not os.access(self["ROOT"], os.W_OK):
3210 + if not os.access(self["ROOT"] + EPREFIX_LSTRIP, os.W_OK):
3211 return
3212
3213 # gid, mode, mask, preserve_perms
3214 dir_mode_map = {
3215 - "tmp" : ( -1, 0o1777, 0, True),
3216 - "var/tmp" : ( -1, 0o1777, 0, True),
3217 + EPREFIX_LSTRIP+"/tmp" : ( -1, 0o1777, 0, True),
3218 + EPREFIX_LSTRIP+"/var/tmp" : ( -1, 0o1777, 0, True),
3219 PRIVATE_PATH : (portage_gid, 0o2750, 0o2, False),
3220 CACHE_PATH : (portage_gid, 0o755, 0o2, False)
3221 }
3222 @@ -2061,7 +2067,7 @@
3223
3224 def reload(self):
3225 """Reload things like /etc/profile.env that can change during runtime."""
3226 - env_d_filename = os.path.join(self["ROOT"], "etc", "profile.env")
3227 + env_d_filename = os.path.join(self["ROOT"], EPREFIX_LSTRIP, "etc", "profile.env")
3228 self.configdict["env.d"].clear()
3229 env_d = getconfig(env_d_filename, expand=False)
3230 if env_d:
3231 @@ -2566,10 +2572,11 @@
3232 mydict.pop("AA", None)
3233
3234 # Prefix variables are supported starting with EAPI 3.
3235 - if phase == 'depend' or eapi in (None, "0", "1", "2"):
3236 - mydict.pop("ED", None)
3237 - mydict.pop("EPREFIX", None)
3238 - mydict.pop("EROOT", None)
3239 + # but during transition, we just support them anywhere
3240 + #if phase == 'depend' or eapi in (None, "0", "1", "2"):
3241 + # mydict.pop("ED", None)
3242 + # mydict.pop("EPREFIX", None)
3243 + # mydict.pop("EROOT", None)
3244
3245 if phase == 'depend':
3246 mydict.pop('FILESDIR', None)
3247
3248 Copied: main/branches/prefix/pym/portage/util/__init__.py (from rev 15429, main/trunk/pym/portage/util/__init__.py)
3249 ===================================================================
3250 --- main/branches/prefix/pym/portage/util/__init__.py (rev 0)
3251 +++ main/branches/prefix/pym/portage/util/__init__.py 2010-02-22 13:26:20 UTC (rev 15434)
3252 @@ -0,0 +1,1441 @@
3253 +# Copyright 2004-2009 Gentoo Foundation
3254 +# Distributed under the terms of the GNU General Public License v2
3255 +# $Id$
3256 +
3257 +__all__ = ['apply_permissions', 'apply_recursive_permissions',
3258 + 'apply_secpass_permissions', 'apply_stat_permissions', 'atomic_ofstream',
3259 + 'cmp_sort_key', 'ConfigProtect', 'dump_traceback', 'ensure_dirs',
3260 + 'find_updated_config_files', 'getconfig', 'getlibpaths', 'grabdict',
3261 + 'grabdict_package', 'grabfile', 'grabfile_package', 'grablines',
3262 + 'initialize_logger', 'LazyItemsDict', 'map_dictlist_vals',
3263 + 'new_protect_filename', 'normalize_path', 'pickle_read', 'stack_dictlist',
3264 + 'stack_dicts', 'stack_lists', 'unique_array', 'varexpand', 'write_atomic',
3265 + 'writedict', 'writemsg', 'writemsg_level', 'writemsg_stdout']
3266 +
3267 +try:
3268 + from subprocess import getstatusoutput as subprocess_getstatusoutput
3269 +except ImportError:
3270 + from commands import getstatusoutput as subprocess_getstatusoutput
3271 +import codecs
3272 +import errno
3273 +import logging
3274 +import re
3275 +import shlex
3276 +import stat
3277 +import string
3278 +import sys
3279 +
3280 +import portage
3281 +from portage import StringIO
3282 +from portage import os
3283 +from portage import _encodings
3284 +from portage import _os_merge
3285 +from portage import _unicode_encode
3286 +from portage import _unicode_decode
3287 +from portage.exception import InvalidAtom, PortageException, FileNotFound, \
3288 + OperationNotPermitted, PermissionDenied, ReadOnlyFileSystem
3289 +from portage.dep import Atom, isvalidatom
3290 +from portage.localization import _
3291 +from portage.proxy.objectproxy import ObjectProxy
3292 +from portage.cache.mappings import UserDict
3293 +from portage.const import EPREFIX, EPREFIX_LSTRIP
3294 +
3295 +try:
3296 + import cPickle as pickle
3297 +except ImportError:
3298 + import pickle
3299 +
3300 +noiselimit = 0
3301 +
3302 +def initialize_logger(level=logging.WARN):
3303 + """Sets up basic logging of portage activities
3304 + Args:
3305 + level: the level to emit messages at ('info', 'debug', 'warning' ...)
3306 + Returns:
3307 + None
3308 + """
3309 + logging.basicConfig(level=logging.WARN, format='[%(levelname)-4s] %(message)s')
3310 +
3311 +def writemsg(mystr,noiselevel=0,fd=None):
3312 + """Prints out warning and debug messages based on the noiselimit setting"""
3313 + global noiselimit
3314 + if fd is None:
3315 + fd = sys.stderr
3316 + if noiselevel <= noiselimit:
3317 + # avoid potential UnicodeEncodeError
3318 + mystr = _unicode_encode(mystr,
3319 + encoding=_encodings['stdio'], errors='backslashreplace')
3320 + if sys.hexversion >= 0x3000000:
3321 + fd = fd.buffer
3322 + fd.write(mystr)
3323 + fd.flush()
3324 +
3325 +def writemsg_stdout(mystr,noiselevel=0):
3326 + """Prints messages stdout based on the noiselimit setting"""
3327 + writemsg(mystr, noiselevel=noiselevel, fd=sys.stdout)
3328 +
3329 +def writemsg_level(msg, level=0, noiselevel=0):
3330 + """
3331 + Show a message for the given level as defined by the logging module
3332 + (default is 0). When level >= logging.WARNING then the message is
3333 + sent to stderr, otherwise it is sent to stdout. The noiselevel is
3334 + passed directly to writemsg().
3335 +
3336 + @type msg: str
3337 + @param msg: a message string, including newline if appropriate
3338 + @type level: int
3339 + @param level: a numeric logging level (see the logging module)
3340 + @type noiselevel: int
3341 + @param noiselevel: passed directly to writemsg
3342 + """
3343 + if level >= logging.WARNING:
3344 + fd = sys.stderr
3345 + else:
3346 + fd = sys.stdout
3347 + writemsg(msg, noiselevel=noiselevel, fd=fd)
3348 +
3349 +def normalize_path(mypath):
3350 + """
3351 + os.path.normpath("//foo") returns "//foo" instead of "/foo"
3352 + We dislike this behavior so we create our own normpath func
3353 + to fix it.
3354 + """
3355 + if sys.hexversion >= 0x3000000 and isinstance(mypath, bytes):
3356 + path_sep = os.path.sep.encode()
3357 + else:
3358 + path_sep = os.path.sep
3359 +
3360 + if mypath.startswith(path_sep):
3361 + # posixpath.normpath collapses 3 or more leading slashes to just 1.
3362 + return os.path.normpath(2*path_sep + mypath)
3363 + else:
3364 + return os.path.normpath(mypath)
3365 +
3366 +def grabfile(myfilename, compat_level=0, recursive=0):
3367 + """This function grabs the lines in a file, normalizes whitespace and returns lines in a list; if a line
3368 + begins with a #, it is ignored, as are empty lines"""
3369 +
3370 + mylines=grablines(myfilename, recursive)
3371 + newlines=[]
3372 + for x in mylines:
3373 + #the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
3374 + #into single spaces.
3375 + myline = _unicode_decode(' ').join(x.split())
3376 + if not len(myline):
3377 + continue
3378 + if myline[0]=="#":
3379 + # Check if we have a compat-level string. BC-integration data.
3380 + # '##COMPAT==>N<==' 'some string attached to it'
3381 + mylinetest = myline.split("<==",1)
3382 + if len(mylinetest) == 2:
3383 + myline_potential = mylinetest[1]
3384 + mylinetest = mylinetest[0].split("##COMPAT==>")
3385 + if len(mylinetest) == 2:
3386 + if compat_level >= int(mylinetest[1]):
3387 + # It's a compat line, and the key matches.
3388 + newlines.append(myline_potential)
3389 + continue
3390 + else:
3391 + continue
3392 + newlines.append(myline)
3393 + return newlines
3394 +
3395 +def map_dictlist_vals(func,myDict):
3396 + """Performs a function on each value of each key in a dictlist.
3397 + Returns a new dictlist."""
3398 + new_dl = {}
3399 + for key in myDict:
3400 + new_dl[key] = []
3401 + new_dl[key] = [func(x) for x in myDict[key]]
3402 + return new_dl
3403 +
3404 +def stack_dictlist(original_dicts, incremental=0, incrementals=[], ignore_none=0):
3405 + """
3406 + Stacks an array of dict-types into one array. Optionally merging or
3407 + overwriting matching key/value pairs for the dict[key]->list.
3408 + Returns a single dict. Higher index in lists is preferenced.
3409 +
3410 + Example usage:
3411 + >>> from portage.util import stack_dictlist
3412 + >>> print stack_dictlist( [{'a':'b'},{'x':'y'}])
3413 + >>> {'a':'b','x':'y'}
3414 + >>> print stack_dictlist( [{'a':'b'},{'a':'c'}], incremental = True )
3415 + >>> {'a':['b','c'] }
3416 + >>> a = {'KEYWORDS':['x86','alpha']}
3417 + >>> b = {'KEYWORDS':['-x86']}
3418 + >>> print stack_dictlist( [a,b] )
3419 + >>> { 'KEYWORDS':['x86','alpha','-x86']}
3420 + >>> print stack_dictlist( [a,b], incremental=True)
3421 + >>> { 'KEYWORDS':['alpha'] }
3422 + >>> print stack_dictlist( [a,b], incrementals=['KEYWORDS'])
3423 + >>> { 'KEYWORDS':['alpha'] }
3424 +
3425 + @param original_dicts a list of (dictionary objects or None)
3426 + @type list
3427 + @param incremental True or false depending on whether new keys should overwrite
3428 + keys which already exist.
3429 + @type boolean
3430 + @param incrementals A list of items that should be incremental (-foo removes foo from
3431 + the returned dict).
3432 + @type list
3433 + @param ignore_none Appears to be ignored, but probably was used long long ago.
3434 + @type boolean
3435 +
3436 + """
3437 + final_dict = {}
3438 + for mydict in original_dicts:
3439 + if mydict is None:
3440 + continue
3441 + for y in mydict:
3442 + if not y in final_dict:
3443 + final_dict[y] = []
3444 +
3445 + for thing in mydict[y]:
3446 + if thing:
3447 + if incremental or y in incrementals:
3448 + if thing == "-*":
3449 + final_dict[y] = []
3450 + continue
3451 + elif thing[:1] == '-':
3452 + try:
3453 + final_dict[y].remove(thing[1:])
3454 + except ValueError:
3455 + pass
3456 + continue
3457 + if thing not in final_dict[y]:
3458 + final_dict[y].append(thing)
3459 + if y in final_dict and not final_dict[y]:
3460 + del final_dict[y]
3461 + return final_dict
3462 +
3463 +def stack_dicts(dicts, incremental=0, incrementals=[], ignore_none=0):
3464 + """Stacks an array of dict-types into one array. Optionally merging or
3465 + overwriting matching key/value pairs for the dict[key]->string.
3466 + Returns a single dict."""
3467 + final_dict = {}
3468 + for mydict in dicts:
3469 + if not mydict:
3470 + continue
3471 + for k, v in mydict.items():
3472 + if k in final_dict and (incremental or (k in incrementals)):
3473 + final_dict[k] += " " + v
3474 + else:
3475 + final_dict[k] = v
3476 + return final_dict
3477 +
3478 +def stack_lists(lists, incremental=1):
3479 + """Stacks an array of list-types into one array. Optionally removing
3480 + distinct values using '-value' notation. Higher index is preferenced.
3481 +
3482 + all elements must be hashable."""
3483 +
3484 + new_list = {}
3485 + for x in lists:
3486 + for y in filter(None, x):
3487 + if incremental:
3488 + if y == "-*":
3489 + new_list.clear()
3490 + elif y[:1] == '-':
3491 + new_list.pop(y[1:], None)
3492 + else:
3493 + new_list[y] = True
3494 + else:
3495 + new_list[y] = True
3496 + return list(new_list)
3497 +
3498 +def grabdict(myfilename, juststrings=0, empty=0, recursive=0, incremental=1):
3499 + """
3500 + This function grabs the lines in a file, normalizes whitespace and returns lines in a dictionary
3501 +
3502 + @param myfilename: file to process
3503 + @type myfilename: string (path)
3504 + @param juststrings: only return strings
3505 + @type juststrings: Boolean (integer)
3506 + @param empty: Ignore certain lines
3507 + @type empty: Boolean (integer)
3508 + @param recursive: Recursively grab ( support for /etc/portage/package.keywords/* and friends )
3509 + @type recursive: Boolean (integer)
3510 + @param incremental: Append to the return list, don't overwrite
3511 + @type incremental: Boolean (integer)
3512 + @rtype: Dictionary
3513 + @returns:
3514 + 1. Returns the lines in a file in a dictionary, for example:
3515 + 'sys-apps/portage x86 amd64 ppc'
3516 + would return
3517 + { "sys-apps/portage" : [ 'x86', 'amd64', 'ppc' ]
3518 + the line syntax is key : [list of values]
3519 + """
3520 + newdict={}
3521 + for x in grablines(myfilename, recursive):
3522 + #the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
3523 + #into single spaces.
3524 + if x[0] == "#":
3525 + continue
3526 + myline=x.split()
3527 + if len(myline) < 2 and empty == 0:
3528 + continue
3529 + if len(myline) < 1 and empty == 1:
3530 + continue
3531 + if incremental:
3532 + newdict.setdefault(myline[0], []).extend(myline[1:])
3533 + else:
3534 + newdict[myline[0]] = myline[1:]
3535 + if juststrings:
3536 + for k, v in newdict.items():
3537 + newdict[k] = " ".join(v)
3538 + return newdict
3539 +
3540 +def grabdict_package(myfilename, juststrings=0, recursive=0):
3541 + """ Does the same thing as grabdict except it validates keys
3542 + with isvalidatom()"""
3543 + pkgs=grabdict(myfilename, juststrings, empty=1, recursive=recursive)
3544 + # We need to call keys() here in order to avoid the possibility of
3545 + # "RuntimeError: dictionary changed size during iteration"
3546 + # when an invalid atom is deleted.
3547 + atoms = {}
3548 + for k, v in pkgs.items():
3549 + try:
3550 + k = Atom(k)
3551 + except InvalidAtom:
3552 + writemsg(_("--- Invalid atom in %s: %s\n") % (myfilename, k),
3553 + noiselevel=-1)
3554 + else:
3555 + atoms[k] = v
3556 + return atoms
3557 +
3558 +def grabfile_package(myfilename, compatlevel=0, recursive=0):
3559 + pkgs=grabfile(myfilename, compatlevel, recursive=recursive)
3560 + mybasename = os.path.basename(myfilename)
3561 + atoms = []
3562 + for pkg in pkgs:
3563 + pkg_orig = pkg
3564 + # for packages and package.mask files
3565 + if pkg[:1] == "-":
3566 + pkg = pkg[1:]
3567 + if pkg[:1] == '*' and mybasename == 'packages':
3568 + pkg = pkg[1:]
3569 + try:
3570 + pkg = Atom(pkg)
3571 + except InvalidAtom:
3572 + writemsg(_("--- Invalid atom in %s: %s\n") % (myfilename, pkg),
3573 + noiselevel=-1)
3574 + else:
3575 + if pkg_orig == str(pkg):
3576 + # normal atom, so return as Atom instance
3577 + atoms.append(pkg)
3578 + else:
3579 + # atom has special prefix, so return as string
3580 + atoms.append(pkg_orig)
3581 + return atoms
3582 +
3583 +def grablines(myfilename,recursive=0):
3584 + mylines=[]
3585 + if recursive and os.path.isdir(myfilename):
3586 + if myfilename in ["RCS", "CVS", "SCCS"]:
3587 + return mylines
3588 + dirlist = os.listdir(myfilename)
3589 + dirlist.sort()
3590 + for f in dirlist:
3591 + if not f.startswith(".") and not f.endswith("~"):
3592 + mylines.extend(grablines(
3593 + os.path.join(myfilename, f), recursive))
3594 + else:
3595 + try:
3596 + myfile = codecs.open(_unicode_encode(myfilename,
3597 + encoding=_encodings['fs'], errors='strict'),
3598 + mode='r', encoding=_encodings['content'], errors='replace')
3599 + mylines = myfile.readlines()
3600 + myfile.close()
3601 + except IOError as e:
3602 + if e.errno == PermissionDenied.errno:
3603 + raise PermissionDenied(myfilename)
3604 + pass
3605 + return mylines
3606 +
3607 +def writedict(mydict,myfilename,writekey=True):
3608 + """Writes out a dict to a file; writekey=0 mode doesn't write out
3609 + the key and assumes all values are strings, not lists."""
3610 + myfile = None
3611 + try:
3612 + myfile = atomic_ofstream(myfilename)
3613 + if not writekey:
3614 + for x in mydict.values():
3615 + myfile.write(x+"\n")
3616 + else:
3617 + for x in mydict:
3618 + myfile.write("%s %s\n" % (x, " ".join(mydict[x])))
3619 + myfile.close()
3620 + except IOError:
3621 + if myfile is not None:
3622 + myfile.abort()
3623 + return 0
3624 + return 1
3625 +
3626 +def shlex_split(s):
3627 + """
3628 + This is equivalent to shlex.split but it temporarily encodes unicode
3629 + strings to bytes since shlex.split() doesn't handle unicode strings.
3630 + """
3631 + is_unicode = sys.hexversion < 0x3000000 and isinstance(s, unicode)
3632 + if is_unicode:
3633 + s = _unicode_encode(s)
3634 + rval = shlex.split(s)
3635 + if is_unicode:
3636 + rval = [_unicode_decode(x) for x in rval]
3637 + return rval
3638 +
3639 +class _tolerant_shlex(shlex.shlex):
3640 + def sourcehook(self, newfile):
3641 + try:
3642 + return shlex.shlex.sourcehook(self, newfile)
3643 + except EnvironmentError as e:
3644 + writemsg(_("!!! Parse error in '%s': source command failed: %s\n") % \
3645 + (self.infile, str(e)), noiselevel=-1)
3646 + return (newfile, StringIO())
3647 +
3648 +_invalid_var_name_re = re.compile(r'^\d|\W')
3649 +
3650 +def getconfig(mycfg, tolerant=0, allow_sourcing=False, expand=True):
3651 + if isinstance(expand, dict):
3652 + # Some existing variable definitions have been
3653 + # passed in, for use in substitutions.
3654 + expand_map = expand
3655 + expand = True
3656 + else:
3657 + expand_map = {}
3658 + mykeys = {}
3659 + try:
3660 + # Workaround for avoiding a silent error in shlex that
3661 + # is triggered by a source statement at the end of the file without a
3662 + # trailing newline after the source statement
3663 + # NOTE: shex doesn't seem to support unicode objects
3664 + # (produces spurious \0 characters with python-2.6.2)
3665 + if sys.hexversion < 0x3000000:
3666 + content = open(_unicode_encode(mycfg,
3667 + encoding=_encodings['fs'], errors='strict'), 'rb').read()
3668 + else:
3669 + content = open(_unicode_encode(mycfg,
3670 + encoding=_encodings['fs'], errors='strict'), mode='r',
3671 + encoding=_encodings['content'], errors='replace').read()
3672 + if content and content[-1] != '\n':
3673 + content += '\n'
3674 + except IOError as e:
3675 + if e.errno == PermissionDenied.errno:
3676 + raise PermissionDenied(mycfg)
3677 + if e.errno != errno.ENOENT:
3678 + writemsg("open('%s', 'r'): %s\n" % (mycfg, e), noiselevel=-1)
3679 + raise
3680 + return None
3681 + try:
3682 + if tolerant:
3683 + shlex_class = _tolerant_shlex
3684 + else:
3685 + shlex_class = shlex.shlex
3686 + # The default shlex.sourcehook() implementation
3687 + # only joins relative paths when the infile
3688 + # attribute is properly set.
3689 + lex = shlex_class(content, infile=mycfg, posix=True)
3690 + lex.wordchars = string.digits + string.ascii_letters + \
3691 + "~!@#$%*_\:;?,./-+{}"
3692 + lex.quotes="\"'"
3693 + if allow_sourcing:
3694 + lex.source="source"
3695 + while 1:
3696 + key=lex.get_token()
3697 + if key == "export":
3698 + key = lex.get_token()
3699 + if key is None:
3700 + #normal end of file
3701 + break;
3702 + equ=lex.get_token()
3703 + if (equ==''):
3704 + #unexpected end of file
3705 + #lex.error_leader(self.filename,lex.lineno)
3706 + if not tolerant:
3707 + writemsg(_("!!! Unexpected end of config file: variable %s\n") % key,
3708 + noiselevel=-1)
3709 + raise Exception(_("ParseError: Unexpected EOF: %s: on/before line %s") % (mycfg, lex.lineno))
3710 + else:
3711 + return mykeys
3712 + elif (equ!='='):
3713 + #invalid token
3714 + #lex.error_leader(self.filename,lex.lineno)
3715 + if not tolerant:
3716 + raise Exception(_("ParseError: Invalid token "
3717 + "'%s' (not '='): %s: line %s") % \
3718 + (equ, mycfg, lex.lineno))
3719 + else:
3720 + return mykeys
3721 + val=lex.get_token()
3722 + if val is None:
3723 + #unexpected end of file
3724 + #lex.error_leader(self.filename,lex.lineno)
3725 + if not tolerant:
3726 + writemsg(_("!!! Unexpected end of config file: variable %s\n") % key,
3727 + noiselevel=-1)
3728 + raise portage.exception.CorruptionError(_("ParseError: Unexpected EOF: %s: line %s") % (mycfg, lex.lineno))
3729 + else:
3730 + return mykeys
3731 + key = _unicode_decode(key)
3732 + val = _unicode_decode(val)
3733 +
3734 + if _invalid_var_name_re.search(key) is not None:
3735 + if not tolerant:
3736 + raise Exception(_(
3737 + "ParseError: Invalid variable name '%s': line %s") % \
3738 + (key, lex.lineno - 1))
3739 + writemsg(_("!!! Invalid variable name '%s': line %s in %s\n") \
3740 + % (key, lex.lineno - 1, mycfg), noiselevel=-1)
3741 + continue
3742 +
3743 + if expand:
3744 + mykeys[key] = varexpand(val, expand_map)
3745 + expand_map[key] = mykeys[key]
3746 + else:
3747 + mykeys[key] = val
3748 + except SystemExit as e:
3749 + raise
3750 + except Exception as e:
3751 + raise portage.exception.ParseError(str(e)+" in "+mycfg)
3752 + return mykeys
3753 +
3754 +#cache expansions of constant strings
3755 +cexpand={}
3756 +def varexpand(mystring, mydict={}):
3757 + newstring = cexpand.get(" "+mystring, None)
3758 + if newstring is not None:
3759 + return newstring
3760 +
3761 + """
3762 + new variable expansion code. Preserves quotes, handles \n, etc.
3763 + This code is used by the configfile code, as well as others (parser)
3764 + This would be a good bunch of code to port to C.
3765 + """
3766 + numvars=0
3767 + mystring=" "+mystring
3768 + #in single, double quotes
3769 + insing=0
3770 + indoub=0
3771 + pos=1
3772 + newstring=" "
3773 + while (pos<len(mystring)):
3774 + if (mystring[pos]=="'") and (mystring[pos-1]!="\\"):
3775 + if (indoub):
3776 + newstring=newstring+"'"
3777 + else:
3778 + newstring += "'" # Quote removal is handled by shlex.
3779 + insing=not insing
3780 + pos=pos+1
3781 + continue
3782 + elif (mystring[pos]=='"') and (mystring[pos-1]!="\\"):
3783 + if (insing):
3784 + newstring=newstring+'"'
3785 + else:
3786 + newstring += '"' # Quote removal is handled by shlex.
3787 + indoub=not indoub
3788 + pos=pos+1
3789 + continue
3790 + if (not insing):
3791 + #expansion time
3792 + if (mystring[pos]=="\n"):
3793 + #convert newlines to spaces
3794 + newstring=newstring+" "
3795 + pos=pos+1
3796 + elif (mystring[pos]=="\\"):
3797 + #backslash expansion time
3798 + if (pos+1>=len(mystring)):
3799 + newstring=newstring+mystring[pos]
3800 + break
3801 + else:
3802 + a=mystring[pos+1]
3803 + pos=pos+2
3804 + if a=='a':
3805 + newstring=newstring+chr(0o07)
3806 + elif a=='b':
3807 + newstring=newstring+chr(0o10)
3808 + elif a=='e':
3809 + newstring=newstring+chr(0o33)
3810 + elif (a=='f') or (a=='n'):
3811 + newstring=newstring+chr(0o12)
3812 + elif a=='r':
3813 + newstring=newstring+chr(0o15)
3814 + elif a=='t':
3815 + newstring=newstring+chr(0o11)
3816 + elif a=='v':
3817 + newstring=newstring+chr(0o13)
3818 + elif a!='\n':
3819 + #remove backslash only, as bash does: this takes care of \\ and \' and \" as well
3820 + newstring=newstring+mystring[pos-1:pos]
3821 + continue
3822 + elif (mystring[pos]=="$") and (mystring[pos-1]!="\\"):
3823 + pos=pos+1
3824 + if mystring[pos]=="{":
3825 + pos=pos+1
3826 + braced=True
3827 + else:
3828 + braced=False
3829 + myvstart=pos
3830 + validchars=string.ascii_letters+string.digits+"_"
3831 + while mystring[pos] in validchars:
3832 + if (pos+1)>=len(mystring):
3833 + if braced:
3834 + cexpand[mystring]=""
3835 + return ""
3836 + else:
3837 + pos=pos+1
3838 + break
3839 + pos=pos+1
3840 + myvarname=mystring[myvstart:pos]
3841 + if braced:
3842 + if mystring[pos]!="}":
3843 + cexpand[mystring]=""
3844 + return ""
3845 + else:
3846 + pos=pos+1
3847 + if len(myvarname)==0:
3848 + cexpand[mystring]=""
3849 + return ""
3850 + numvars=numvars+1
3851 + if myvarname in mydict:
3852 + newstring=newstring+mydict[myvarname]
3853 + else:
3854 + newstring=newstring+mystring[pos]
3855 + pos=pos+1
3856 + else:
3857 + newstring=newstring+mystring[pos]
3858 + pos=pos+1
3859 + if numvars==0:
3860 + cexpand[mystring]=newstring[1:]
3861 + return newstring[1:]
3862 +
3863 +# broken and removed, but can still be imported
3864 +pickle_write = None
3865 +
3866 +def pickle_read(filename,default=None,debug=0):
3867 + import os
3868 + if not os.access(filename, os.R_OK):
3869 + writemsg(_("pickle_read(): File not readable. '")+filename+"'\n",1)
3870 + return default
3871 + data = None
3872 + try:
3873 + myf = open(_unicode_encode(filename,
3874 + encoding=_encodings['fs'], errors='strict'), 'rb')
3875 + mypickle = pickle.Unpickler(myf)
3876 + data = mypickle.load()
3877 + myf.close()
3878 + del mypickle,myf
3879 + writemsg(_("pickle_read(): Loaded pickle. '")+filename+"'\n",1)
3880 + except SystemExit as e:
3881 + raise
3882 + except Exception as e:
3883 + writemsg(_("!!! Failed to load pickle: ")+str(e)+"\n",1)
3884 + data = default
3885 + return data
3886 +
3887 +def dump_traceback(msg, noiselevel=1):
3888 + import sys, traceback
3889 + info = sys.exc_info()
3890 + if not info[2]:
3891 + stack = traceback.extract_stack()[:-1]
3892 + error = None
3893 + else:
3894 + stack = traceback.extract_tb(info[2])
3895 + error = str(info[1])
3896 + writemsg("\n====================================\n", noiselevel=noiselevel)
3897 + writemsg("%s\n\n" % msg, noiselevel=noiselevel)
3898 + for line in traceback.format_list(stack):
3899 + writemsg(line, noiselevel=noiselevel)
3900 + if error:
3901 + writemsg(error+"\n", noiselevel=noiselevel)
3902 + writemsg("====================================\n\n", noiselevel=noiselevel)
3903 +
3904 +class cmp_sort_key(object):
3905 + """
3906 + In python-3.0 the list.sort() method no longer has a "cmp" keyword
3907 + argument. This class acts as an adapter which converts a cmp function
3908 + into one that's suitable for use as the "key" keyword argument to
3909 + list.sort(), making it easier to port code for python-3.0 compatibility.
3910 + It works by generating key objects which use the given cmp function to
3911 + implement their __lt__ method.
3912 + """
3913 + __slots__ = ("_cmp_func",)
3914 +
3915 + def __init__(self, cmp_func):
3916 + """
3917 + @type cmp_func: callable which takes 2 positional arguments
3918 + @param cmp_func: A cmp function.
3919 + """
3920 + self._cmp_func = cmp_func
3921 +
3922 + def __call__(self, lhs):
3923 + return self._cmp_key(self._cmp_func, lhs)
3924 +
3925 + class _cmp_key(object):
3926 + __slots__ = ("_cmp_func", "_obj")
3927 +
3928 + def __init__(self, cmp_func, obj):
3929 + self._cmp_func = cmp_func
3930 + self._obj = obj
3931 +
3932 + def __lt__(self, other):
3933 + if other.__class__ is not self.__class__:
3934 + raise TypeError("Expected type %s, got %s" % \
3935 + (self.__class__, other.__class__))
3936 + return self._cmp_func(self._obj, other._obj) < 0
3937 +
3938 +def unique_array(s):
3939 + """lifted from python cookbook, credit: Tim Peters
3940 + Return a list of the elements in s in arbitrary order, sans duplicates"""
3941 + n = len(s)
3942 + # assume all elements are hashable, if so, it's linear
3943 + try:
3944 + return list(set(s))
3945 + except TypeError:
3946 + pass
3947 +
3948 + # so much for linear. abuse sort.
3949 + try:
3950 + t = list(s)
3951 + t.sort()
3952 + except TypeError:
3953 + pass
3954 + else:
3955 + assert n > 0
3956 + last = t[0]
3957 + lasti = i = 1
3958 + while i < n:
3959 + if t[i] != last:
3960 + t[lasti] = last = t[i]
3961 + lasti += 1
3962 + i += 1
3963 + return t[:lasti]
3964 +
3965 + # blah. back to original portage.unique_array
3966 + u = []
3967 + for x in s:
3968 + if x not in u:
3969 + u.append(x)
3970 + return u
3971 +
3972 +def apply_permissions(filename, uid=-1, gid=-1, mode=-1, mask=-1,
3973 + stat_cached=None, follow_links=True):
3974 + """Apply user, group, and mode bits to a file if the existing bits do not
3975 + already match. The default behavior is to force an exact match of mode
3976 + bits. When mask=0 is specified, mode bits on the target file are allowed
3977 + to be a superset of the mode argument (via logical OR). When mask>0, the
3978 + mode bits that the target file is allowed to have are restricted via
3979 + logical XOR.
3980 + Returns True if the permissions were modified and False otherwise."""
3981 +
3982 + modified = False
3983 +
3984 + if stat_cached is None:
3985 + try:
3986 + if follow_links:
3987 + stat_cached = os.stat(filename)
3988 + else:
3989 + stat_cached = os.lstat(filename)
3990 + except OSError as oe:
3991 + func_call = "stat('%s')" % filename
3992 + if oe.errno == errno.EPERM:
3993 + raise OperationNotPermitted(func_call)
3994 + elif oe.errno == errno.EACCES:
3995 + raise PermissionDenied(func_call)
3996 + elif oe.errno == errno.ENOENT:
3997 + raise FileNotFound(filename)
3998 + else:
3999 + raise
4000 +
4001 + if (uid != -1 and uid != stat_cached.st_uid) or \
4002 + (gid != -1 and gid != stat_cached.st_gid):
4003 + try:
4004 + if follow_links:
4005 + os.chown(filename, uid, gid)
4006 + else:
4007 + import portage.data
4008 + portage.data.lchown(filename, uid, gid)
4009 + modified = True
4010 + except OSError as oe:
4011 + func_call = "chown('%s', %i, %i)" % (filename, uid, gid)
4012 + if oe.errno == errno.EPERM:
4013 + raise OperationNotPermitted(func_call)
4014 + elif oe.errno == errno.EACCES:
4015 + raise PermissionDenied(func_call)
4016 + elif oe.errno == errno.EROFS:
4017 + raise ReadOnlyFileSystem(func_call)
4018 + elif oe.errno == errno.ENOENT:
4019 + raise FileNotFound(filename)
4020 + else:
4021 + raise
4022 +
4023 + new_mode = -1
4024 + st_mode = stat_cached.st_mode & 0o7777 # protect from unwanted bits
4025 + if mask >= 0:
4026 + if mode == -1:
4027 + mode = 0 # Don't add any mode bits when mode is unspecified.
4028 + else:
4029 + mode = mode & 0o7777
4030 + if (mode & st_mode != mode) or \
4031 + ((mask ^ st_mode) & st_mode != st_mode):
4032 + new_mode = mode | st_mode
4033 + new_mode = (mask ^ new_mode) & new_mode
4034 + elif mode != -1:
4035 + mode = mode & 0o7777 # protect from unwanted bits
4036 + if mode != st_mode:
4037 + new_mode = mode
4038 +
4039 + # The chown system call may clear S_ISUID and S_ISGID
4040 + # bits, so those bits are restored if necessary.
4041 + if modified and new_mode == -1 and \
4042 + (st_mode & stat.S_ISUID or st_mode & stat.S_ISGID):
4043 + if mode == -1:
4044 + new_mode = st_mode
4045 + else:
4046 + mode = mode & 0o7777
4047 + if mask >= 0:
4048 + new_mode = mode | st_mode
4049 + new_mode = (mask ^ new_mode) & new_mode
4050 + else:
4051 + new_mode = mode
4052 + if not (new_mode & stat.S_ISUID or new_mode & stat.S_ISGID):
4053 + new_mode = -1
4054 +
4055 + if not follow_links and stat.S_ISLNK(stat_cached.st_mode):
4056 + # Mode doesn't matter for symlinks.
4057 + new_mode = -1
4058 +
4059 + if new_mode != -1:
4060 + try:
4061 + os.chmod(filename, new_mode)
4062 + modified = True
4063 + except OSError as oe:
4064 + func_call = "chmod('%s', %s)" % (filename, oct(new_mode))
4065 + if oe.errno == errno.EPERM:
4066 + raise OperationNotPermitted(func_call)
4067 + elif oe.errno == errno.EACCES:
4068 + raise PermissionDenied(func_call)
4069 + elif oe.errno == errno.EROFS:
4070 + raise ReadOnlyFileSystem(func_call)
4071 + elif oe.errno == errno.ENOENT:
4072 + raise FileNotFound(filename)
4073 + raise
4074 + return modified
4075 +
4076 +def apply_stat_permissions(filename, newstat, **kwargs):
4077 + """A wrapper around apply_secpass_permissions that gets
4078 + uid, gid, and mode from a stat object"""
4079 + return apply_secpass_permissions(filename, uid=newstat.st_uid, gid=newstat.st_gid,
4080 + mode=newstat.st_mode, **kwargs)
4081 +
4082 +def apply_recursive_permissions(top, uid=-1, gid=-1,
4083 + dirmode=-1, dirmask=-1, filemode=-1, filemask=-1, onerror=None):
4084 + """A wrapper around apply_secpass_permissions that applies permissions
4085 + recursively. If optional argument onerror is specified, it should be a
4086 + function; it will be called with one argument, a PortageException instance.
4087 + Returns True if all permissions are applied and False if some are left
4088 + unapplied."""
4089 +
4090 + if onerror is None:
4091 + # Default behavior is to dump errors to stderr so they won't
4092 + # go unnoticed. Callers can pass in a quiet instance.
4093 + def onerror(e):
4094 + if isinstance(e, OperationNotPermitted):
4095 + writemsg(_("Operation Not Permitted: %s\n") % str(e),
4096 + noiselevel=-1)
4097 + elif isinstance(e, FileNotFound):
4098 + writemsg(_("File Not Found: '%s'\n") % str(e), noiselevel=-1)
4099 + else:
4100 + raise
4101 +
4102 + all_applied = True
4103 + for dirpath, dirnames, filenames in os.walk(top):
4104 + try:
4105 + applied = apply_secpass_permissions(dirpath,
4106 + uid=uid, gid=gid, mode=dirmode, mask=dirmask)
4107 + if not applied:
4108 + all_applied = False
4109 + except PortageException as e:
4110 + all_applied = False
4111 + onerror(e)
4112 +
4113 + for name in filenames:
4114 + try:
4115 + applied = apply_secpass_permissions(os.path.join(dirpath, name),
4116 + uid=uid, gid=gid, mode=filemode, mask=filemask)
4117 + if not applied:
4118 + all_applied = False
4119 + except PortageException as e:
4120 + # Ignore InvalidLocation exceptions such as FileNotFound
4121 + # and DirectoryNotFound since sometimes things disappear,
4122 + # like when adjusting permissions on DISTCC_DIR.
4123 + if not isinstance(e, portage.exception.InvalidLocation):
4124 + all_applied = False
4125 + onerror(e)
4126 + return all_applied
4127 +
4128 +def apply_secpass_permissions(filename, uid=-1, gid=-1, mode=-1, mask=-1,
4129 + stat_cached=None, follow_links=True):
4130 + """A wrapper around apply_permissions that uses secpass and simple
4131 + logic to apply as much of the permissions as possible without
4132 + generating an obviously avoidable permission exception. Despite
4133 + attempts to avoid an exception, it's possible that one will be raised
4134 + anyway, so be prepared.
4135 + Returns True if all permissions are applied and False if some are left
4136 + unapplied."""
4137 +
4138 + if stat_cached is None:
4139 + try:
4140 + if follow_links:
4141 + stat_cached = os.stat(filename)
4142 + else:
4143 + stat_cached = os.lstat(filename)
4144 + except OSError as oe:
4145 + func_call = "stat('%s')" % filename
4146 + if oe.errno == errno.EPERM:
4147 + raise OperationNotPermitted(func_call)
4148 + elif oe.errno == errno.EACCES:
4149 + raise PermissionDenied(func_call)
4150 + elif oe.errno == errno.ENOENT:
4151 + raise FileNotFound(filename)
4152 + else:
4153 + raise
4154 +
4155 + all_applied = True
4156 +
4157 + import portage.data # not imported globally because of circular dep
4158 + if portage.data.secpass < 2:
4159 +
4160 + if uid != -1 and \
4161 + uid != stat_cached.st_uid:
4162 + all_applied = False
4163 + uid = -1
4164 +
4165 + if gid != -1 and \
4166 + gid != stat_cached.st_gid and \
4167 + gid not in os.getgroups():
4168 + all_applied = False
4169 + gid = -1
4170 +
4171 + apply_permissions(filename, uid=uid, gid=gid, mode=mode, mask=mask,
4172 + stat_cached=stat_cached, follow_links=follow_links)
4173 + return all_applied
4174 +
4175 +class atomic_ofstream(ObjectProxy):
4176 + """Write a file atomically via os.rename(). Atomic replacement prevents
4177 + interprocess interference and prevents corruption of the target
4178 + file when the write is interrupted (for example, when an 'out of space'
4179 + error occurs)."""
4180 +
4181 + def __init__(self, filename, mode='w', follow_links=True, **kargs):
4182 + """Opens a temporary filename.pid in the same directory as filename."""
4183 + ObjectProxy.__init__(self)
4184 + object.__setattr__(self, '_aborted', False)
4185 + if 'b' in mode:
4186 + open_func = open
4187 + else:
4188 + open_func = codecs.open
4189 + kargs.setdefault('encoding', _encodings['content'])
4190 + kargs.setdefault('errors', 'backslashreplace')
4191 +
4192 + if follow_links:
4193 + canonical_path = os.path.realpath(filename)
4194 + object.__setattr__(self, '_real_name', canonical_path)
4195 + tmp_name = "%s.%i" % (canonical_path, os.getpid())
4196 + try:
4197 + object.__setattr__(self, '_file',
4198 + open_func(_unicode_encode(tmp_name,
4199 + encoding=_encodings['fs'], errors='strict'),
4200 + mode=mode, **kargs))
4201 + return
4202 + except IOError as e:
4203 + if canonical_path == filename:
4204 + raise
4205 + writemsg(_("!!! Failed to open file: '%s'\n") % tmp_name,
4206 + noiselevel=-1)
4207 + writemsg("!!! %s\n" % str(e), noiselevel=-1)
4208 +
4209 + object.__setattr__(self, '_real_name', filename)
4210 + tmp_name = "%s.%i" % (filename, os.getpid())
4211 + object.__setattr__(self, '_file',
4212 + open_func(_unicode_encode(tmp_name,
4213 + encoding=_encodings['fs'], errors='strict'),
4214 + mode=mode, **kargs))
4215 +
4216 + def _get_target(self):
4217 + return object.__getattribute__(self, '_file')
4218 +
4219 + def __getattribute__(self, attr):
4220 + if attr in ('close', 'abort', '__del__'):
4221 + return object.__getattribute__(self, attr)
4222 + return getattr(object.__getattribute__(self, '_file'), attr)
4223 +
4224 + def close(self):
4225 + """Closes the temporary file, copies permissions (if possible),
4226 + and performs the atomic replacement via os.rename(). If the abort()
4227 + method has been called, then the temp file is closed and removed."""
4228 + f = object.__getattribute__(self, '_file')
4229 + real_name = object.__getattribute__(self, '_real_name')
4230 + if not f.closed:
4231 + try:
4232 + f.close()
4233 + if not object.__getattribute__(self, '_aborted'):
4234 + try:
4235 + apply_stat_permissions(f.name, os.stat(real_name))
4236 + except OperationNotPermitted:
4237 + pass
4238 + except FileNotFound:
4239 + pass
4240 + except OSError as oe: # from the above os.stat call
4241 + if oe.errno in (errno.ENOENT, errno.EPERM):
4242 + pass
4243 + else:
4244 + raise
4245 + os.rename(f.name, real_name)
4246 + finally:
4247 + # Make sure we cleanup the temp file
4248 + # even if an exception is raised.
4249 + try:
4250 + os.unlink(f.name)
4251 + except OSError as oe:
4252 + pass
4253 +
4254 + def abort(self):
4255 + """If an error occurs while writing the file, the user should
4256 + call this method in order to leave the target file unchanged.
4257 + This will call close() automatically."""
4258 + if not object.__getattribute__(self, '_aborted'):
4259 + object.__setattr__(self, '_aborted', True)
4260 + self.close()
4261 +
4262 + def __del__(self):
4263 + """If the user does not explicitely call close(), it is
4264 + assumed that an error has occurred, so we abort()."""
4265 + try:
4266 + f = object.__getattribute__(self, '_file')
4267 + except AttributeError:
4268 + pass
4269 + else:
4270 + if not f.closed:
4271 + self.abort()
4272 + # ensure destructor from the base class is called
4273 + base_destructor = getattr(ObjectProxy, '__del__', None)
4274 + if base_destructor is not None:
4275 + base_destructor(self)
4276 +
4277 +def write_atomic(file_path, content, **kwargs):
4278 + f = None
4279 + try:
4280 + f = atomic_ofstream(file_path, **kwargs)
4281 + f.write(content)
4282 + f.close()
4283 + except (IOError, OSError) as e:
4284 + if f:
4285 + f.abort()
4286 + func_call = "write_atomic('%s')" % file_path
4287 + if e.errno == errno.EPERM:
4288 + raise OperationNotPermitted(func_call)
4289 + elif e.errno == errno.EACCES:
4290 + raise PermissionDenied(func_call)
4291 + elif e.errno == errno.EROFS:
4292 + raise ReadOnlyFileSystem(func_call)
4293 + elif e.errno == errno.ENOENT:
4294 + raise FileNotFound(file_path)
4295 + else:
4296 + raise
4297 +
4298 +def ensure_dirs(dir_path, *args, **kwargs):
4299 + """Create a directory and call apply_permissions.
4300 + Returns True if a directory is created or the permissions needed to be
4301 + modified, and False otherwise."""
4302 +
4303 + created_dir = False
4304 +
4305 + try:
4306 + os.makedirs(dir_path)
4307 + created_dir = True
4308 + except OSError as oe:
4309 + func_call = "makedirs('%s')" % dir_path
4310 + if oe.errno in (errno.EEXIST, errno.EISDIR):
4311 + pass
4312 + elif oe.errno == errno.EPERM:
4313 + raise OperationNotPermitted(func_call)
4314 + elif oe.errno == errno.EACCES:
4315 + raise PermissionDenied(func_call)
4316 + elif oe.errno == errno.EROFS:
4317 + raise ReadOnlyFileSystem(func_call)
4318 + else:
4319 + raise
4320 + perms_modified = apply_permissions(dir_path, *args, **kwargs)
4321 + return created_dir or perms_modified
4322 +
4323 +class LazyItemsDict(UserDict):
4324 + """A mapping object that behaves like a standard dict except that it allows
4325 + for lazy initialization of values via callable objects. Lazy items can be
4326 + overwritten and deleted just as normal items."""
4327 +
4328 + __slots__ = ('lazy_items',)
4329 +
4330 + def __init__(self, *args, **kwargs):
4331 +
4332 + self.lazy_items = {}
4333 + UserDict.__init__(self, *args, **kwargs)
4334 +
4335 + def addLazyItem(self, item_key, value_callable, *pargs, **kwargs):
4336 + """Add a lazy item for the given key. When the item is requested,
4337 + value_callable will be called with *pargs and **kwargs arguments."""
4338 + self.lazy_items[item_key] = \
4339 + self._LazyItem(value_callable, pargs, kwargs, False)
4340 + # make it show up in self.keys(), etc...
4341 + UserDict.__setitem__(self, item_key, None)
4342 +
4343 + def addLazySingleton(self, item_key, value_callable, *pargs, **kwargs):
4344 + """This is like addLazyItem except value_callable will only be called
4345 + a maximum of 1 time and the result will be cached for future requests."""
4346 + self.lazy_items[item_key] = \
4347 + self._LazyItem(value_callable, pargs, kwargs, True)
4348 + # make it show up in self.keys(), etc...
4349 + UserDict.__setitem__(self, item_key, None)
4350 +
4351 + def update(self, *args, **kwargs):
4352 + if len(args) > 1:
4353 + raise TypeError(
4354 + "expected at most 1 positional argument, got " + \
4355 + repr(len(args)))
4356 + if args:
4357 + map_obj = args[0]
4358 + else:
4359 + map_obj = None
4360 + if map_obj is None:
4361 + pass
4362 + elif isinstance(map_obj, LazyItemsDict):
4363 + for k in map_obj:
4364 + if k in map_obj.lazy_items:
4365 + UserDict.__setitem__(self, k, None)
4366 + else:
4367 + UserDict.__setitem__(self, k, map_obj[k])
4368 + self.lazy_items.update(map_obj.lazy_items)
4369 + else:
4370 + UserDict.update(self, map_obj)
4371 + if kwargs:
4372 + UserDict.update(self, kwargs)
4373 +
4374 + def __getitem__(self, item_key):
4375 + if item_key in self.lazy_items:
4376 + lazy_item = self.lazy_items[item_key]
4377 + pargs = lazy_item.pargs
4378 + if pargs is None:
4379 + pargs = ()
4380 + kwargs = lazy_item.kwargs
4381 + if kwargs is None:
4382 + kwargs = {}
4383 + result = lazy_item.func(*pargs, **kwargs)
4384 + if lazy_item.singleton:
4385 + self[item_key] = result
4386 + return result
4387 +
4388 + else:
4389 + return UserDict.__getitem__(self, item_key)
4390 +
4391 + def __setitem__(self, item_key, value):
4392 + if item_key in self.lazy_items:
4393 + del self.lazy_items[item_key]
4394 + UserDict.__setitem__(self, item_key, value)
4395 +
4396 + def __delitem__(self, item_key):
4397 + if item_key in self.lazy_items:
4398 + del self.lazy_items[item_key]
4399 + UserDict.__delitem__(self, item_key)
4400 +
4401 + def clear(self):
4402 + self.lazy_items.clear()
4403 + UserDict.clear(self)
4404 +
4405 + def copy(self):
4406 + return self.__copy__()
4407 +
4408 + def __copy__(self):
4409 + return self.__class__(self)
4410 +
4411 + def __deepcopy__(self, memo=None):
4412 + """
4413 + WARNING: If any of the lazy items contains a bound method then it's
4414 + typical for deepcopy() to raise an exception like this:
4415 +
4416 + File "/usr/lib/python2.5/copy.py", line 189, in deepcopy
4417 + y = _reconstruct(x, rv, 1, memo)
4418 + File "/usr/lib/python2.5/copy.py", line 322, in _reconstruct
4419 + y = callable(*args)
4420 + File "/usr/lib/python2.5/copy_reg.py", line 92, in __newobj__
4421 + return cls.__new__(cls, *args)
4422 + TypeError: instancemethod expected at least 2 arguments, got 0
4423 +
4424 + If deepcopy() needs to work, this problem can be avoided by
4425 + implementing lazy items with normal (non-bound) functions.
4426 +
4427 + If deepcopy() raises a TypeError for a lazy item that has been added
4428 + via a call to addLazySingleton(), the singleton will be automatically
4429 + evaluated and deepcopy() will instead be called on the result.
4430 + """
4431 + if memo is None:
4432 + memo = {}
4433 + from copy import deepcopy
4434 + result = self.__class__()
4435 + memo[id(self)] = result
4436 + for k in self:
4437 + k_copy = deepcopy(k, memo)
4438 + if k in self.lazy_items:
4439 + lazy_item = self.lazy_items[k]
4440 + try:
4441 + result.lazy_items[k_copy] = deepcopy(lazy_item, memo)
4442 + except TypeError:
4443 + if not lazy_item.singleton:
4444 + raise
4445 + UserDict.__setitem__(result,
4446 + k_copy, deepcopy(self[k], memo))
4447 + else:
4448 + UserDict.__setitem__(result, k_copy, None)
4449 + else:
4450 + UserDict.__setitem__(result, k_copy, deepcopy(self[k], memo))
4451 + return result
4452 +
4453 + class _LazyItem(object):
4454 +
4455 + __slots__ = ('func', 'pargs', 'kwargs', 'singleton')
4456 +
4457 + def __init__(self, func, pargs, kwargs, singleton):
4458 +
4459 + if not pargs:
4460 + pargs = None
4461 + if not kwargs:
4462 + kwargs = None
4463 +
4464 + self.func = func
4465 + self.pargs = pargs
4466 + self.kwargs = kwargs
4467 + self.singleton = singleton
4468 +
4469 + def __copy__(self):
4470 + return self.__class__(self.func, self.pargs,
4471 + self.kwargs, self.singleton)
4472 +
4473 + def __deepcopy__(self, memo=None):
4474 + """
4475 + Override this since the default implementation can fail silently,
4476 + leaving some attributes unset.
4477 + """
4478 + if memo is None:
4479 + memo = {}
4480 + from copy import deepcopy
4481 + result = self.__copy__()
4482 + memo[id(self)] = result
4483 + result.func = deepcopy(self.func, memo)
4484 + result.pargs = deepcopy(self.pargs, memo)
4485 + result.kwargs = deepcopy(self.kwargs, memo)
4486 + result.singleton = deepcopy(self.singleton, memo)
4487 + return result
4488 +
4489 +class ConfigProtect(object):
4490 + def __init__(self, myroot, protect_list, mask_list):
4491 + self.myroot = myroot
4492 + self.protect_list = protect_list
4493 + self.mask_list = mask_list
4494 + self.updateprotect()
4495 +
4496 + def updateprotect(self):
4497 + """Update internal state for isprotected() calls. Nonexistent paths
4498 + are ignored."""
4499 +
4500 + os = _os_merge
4501 +
4502 + self.protect = []
4503 + self._dirs = set()
4504 + for x in self.protect_list:
4505 + ppath = normalize_path(
4506 + os.path.join(self.myroot + EPREFIX_LSTRIP, x.lstrip(os.path.sep)))
4507 + mystat = None
4508 + try:
4509 + if stat.S_ISDIR(os.stat(ppath).st_mode):
4510 + self._dirs.add(ppath)
4511 + self.protect.append(ppath)
4512 + except OSError:
4513 + # If it doesn't exist, there's no need to protect it.
4514 + pass
4515 +
4516 + self.protectmask = []
4517 + for x in self.mask_list:
4518 + ppath = normalize_path(
4519 + os.path.join(self.myroot + EPREFIX_LSTRIP, x.lstrip(os.path.sep)))
4520 + mystat = None
4521 + try:
4522 + """Use lstat so that anything, even a broken symlink can be
4523 + protected."""
4524 + if stat.S_ISDIR(os.lstat(ppath).st_mode):
4525 + self._dirs.add(ppath)
4526 + self.protectmask.append(ppath)
4527 + """Now use stat in case this is a symlink to a directory."""
4528 + if stat.S_ISDIR(os.stat(ppath).st_mode):
4529 + self._dirs.add(ppath)
4530 + except OSError:
4531 + # If it doesn't exist, there's no need to mask it.
4532 + pass
4533 +
4534 + def isprotected(self, obj):
4535 + """Returns True if obj is protected, False otherwise. The caller must
4536 + ensure that obj is normalized with a single leading slash. A trailing
4537 + slash is optional for directories."""
4538 + masked = 0
4539 + protected = 0
4540 + sep = os.path.sep
4541 + for ppath in self.protect:
4542 + if len(ppath) > masked and obj.startswith(ppath):
4543 + if ppath in self._dirs:
4544 + if obj != ppath and not obj.startswith(ppath + sep):
4545 + # /etc/foo does not match /etc/foobaz
4546 + continue
4547 + elif obj != ppath:
4548 + # force exact match when CONFIG_PROTECT lists a
4549 + # non-directory
4550 + continue
4551 + protected = len(ppath)
4552 + #config file management
4553 + for pmpath in self.protectmask:
4554 + if len(pmpath) >= protected and obj.startswith(pmpath):
4555 + if pmpath in self._dirs:
4556 + if obj != pmpath and \
4557 + not obj.startswith(pmpath + sep):
4558 + # /etc/foo does not match /etc/foobaz
4559 + continue
4560 + elif obj != pmpath:
4561 + # force exact match when CONFIG_PROTECT_MASK lists
4562 + # a non-directory
4563 + continue
4564 + #skip, it's in the mask
4565 + masked = len(pmpath)
4566 + return protected > masked
4567 +
4568 +def new_protect_filename(mydest, newmd5=None):
4569 + """Resolves a config-protect filename for merging, optionally
4570 + using the last filename if the md5 matches.
4571 + (dest,md5) ==> 'string' --- path_to_target_filename
4572 + (dest) ==> ('next', 'highest') --- next_target and most-recent_target
4573 + """
4574 +
4575 + # config protection filename format:
4576 + # ._cfg0000_foo
4577 + # 0123456789012
4578 +
4579 + os = _os_merge
4580 +
4581 + prot_num = -1
4582 + last_pfile = ""
4583 +
4584 + if not os.path.exists(mydest):
4585 + return mydest
4586 +
4587 + real_filename = os.path.basename(mydest)
4588 + real_dirname = os.path.dirname(mydest)
4589 + for pfile in os.listdir(real_dirname):
4590 + if pfile[0:5] != "._cfg":
4591 + continue
4592 + if pfile[10:] != real_filename:
4593 + continue
4594 + try:
4595 + new_prot_num = int(pfile[5:9])
4596 + if new_prot_num > prot_num:
4597 + prot_num = new_prot_num
4598 + last_pfile = pfile
4599 + except ValueError:
4600 + continue
4601 + prot_num = prot_num + 1
4602 +
4603 + new_pfile = normalize_path(os.path.join(real_dirname,
4604 + "._cfg" + str(prot_num).zfill(4) + "_" + real_filename))
4605 + old_pfile = normalize_path(os.path.join(real_dirname, last_pfile))
4606 + if last_pfile and newmd5:
4607 + import portage.checksum
4608 + try:
4609 + last_pfile_md5 = portage.checksum._perform_md5_merge(old_pfile)
4610 + except FileNotFound:
4611 + # The file suddenly disappeared or it's a broken symlink.
4612 + pass
4613 + else:
4614 + if last_pfile_md5 == newmd5:
4615 + return old_pfile
4616 + return new_pfile
4617 +
4618 +def find_updated_config_files(target_root, config_protect):
4619 + """
4620 + Return a tuple of configuration files that needs to be updated.
4621 + The tuple contains lists organized like this:
4622 + [ protected_dir, file_list ]
4623 + If the protected config isn't a protected_dir but a procted_file, list is:
4624 + [ protected_file, None ]
4625 + If no configuration files needs to be updated, None is returned
4626 + """
4627 +
4628 + os = _os_merge
4629 +
4630 + if config_protect:
4631 + # directories with some protect files in them
4632 + for x in config_protect:
4633 + files = []
4634 +
4635 + x = os.path.join(target_root, x.lstrip(os.path.sep))
4636 + if not os.access(x, os.W_OK):
4637 + continue
4638 + try:
4639 + mymode = os.lstat(x).st_mode
4640 + except OSError:
4641 + continue
4642 +
4643 + if stat.S_ISLNK(mymode):
4644 + # We want to treat it like a directory if it
4645 + # is a symlink to an existing directory.
4646 + try:
4647 + real_mode = os.stat(x).st_mode
4648 + if stat.S_ISDIR(real_mode):
4649 + mymode = real_mode
4650 + except OSError:
4651 + pass
4652 +
4653 + if stat.S_ISDIR(mymode):
4654 + mycommand = \
4655 + "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
4656 + else:
4657 + mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
4658 + os.path.split(x.rstrip(os.path.sep))
4659 + mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
4660 + a = subprocess_getstatusoutput(mycommand)
4661 +
4662 + if a[0] == 0:
4663 + files = a[1].split('\0')
4664 + # split always produces an empty string as the last element
4665 + if files and not files[-1]:
4666 + del files[-1]
4667 + if files:
4668 + if stat.S_ISDIR(mymode):
4669 + yield (x, files)
4670 + else:
4671 + yield (x, None)
4672 +
4673 +def getlibpaths(root):
4674 + """ Return a list of paths that are used for library lookups """
4675 +
4676 + # PREFIX HACK: LD_LIBRARY_PATH isn't portable, and considered
4677 + # harmfull, so better not use it. We don't need any host OS lib
4678 + # paths either, so do Prefix case.
4679 + if EPREFIX != '':
4680 + rval = []
4681 + rval.append(EPREFIX + "/usr/lib")
4682 + rval.append(EPREFIX + "/lib")
4683 + # we don't know the CHOST here, so it's a bit hard to guess
4684 + # where GCC's and ld's libs are. Though, GCC's libs should be
4685 + # in lib and usr/lib, binutils' libs rarely used
4686 + else:
4687 + # the following is based on the information from ld.so(8)
4688 + rval = os.environ.get("LD_LIBRARY_PATH", "").split(":")
4689 + rval.extend(grabfile(os.path.join(root, "etc", "ld.so.conf")))
4690 + rval.append("/usr/lib")
4691 + rval.append("/lib")
4692 +
4693 + return [normalize_path(x) for x in rval if x]
4694
4695 Copied: main/branches/prefix/pym/portage/util/digraph.py (from rev 15429, main/trunk/pym/portage/util/digraph.py)
4696 ===================================================================
4697 --- main/branches/prefix/pym/portage/util/digraph.py (rev 0)
4698 +++ main/branches/prefix/pym/portage/util/digraph.py 2010-02-22 13:26:20 UTC (rev 15434)
4699 @@ -0,0 +1,281 @@
4700 +# Copyright 2010 Gentoo Foundation
4701 +# Distributed under the terms of the GNU General Public License v2
4702 +# $Id$
4703 +
4704 +class digraph(object):
4705 + """
4706 + A directed graph object.
4707 + """
4708 +
4709 + def __init__(self):
4710 + """Create an empty digraph"""
4711 +
4712 + # { node : ( { child : priority } , { parent : priority } ) }
4713 + self.nodes = {}
4714 + self.order = []
4715 +
4716 + def add(self, node, parent, priority=0):
4717 + """Adds the specified node with the specified parent.
4718 +
4719 + If the dep is a soft-dep and the node already has a hard
4720 + relationship to the parent, the relationship is left as hard."""
4721 +
4722 + if node not in self.nodes:
4723 + self.nodes[node] = ({}, {}, node)
4724 + self.order.append(node)
4725 +
4726 + if not parent:
4727 + return
4728 +
4729 + if parent not in self.nodes:
4730 + self.nodes[parent] = ({}, {}, parent)
4731 + self.order.append(parent)
4732 +
4733 + priorities = self.nodes[node][1].get(parent)
4734 + if priorities is None:
4735 + priorities = []
4736 + self.nodes[node][1][parent] = priorities
4737 + self.nodes[parent][0][node] = priorities
4738 + priorities.append(priority)
4739 + priorities.sort()
4740 +
4741 + def remove(self, node):
4742 + """Removes the specified node from the digraph, also removing
4743 + and ties to other nodes in the digraph. Raises KeyError if the
4744 + node doesn't exist."""
4745 +
4746 + if node not in self.nodes:
4747 + raise KeyError(node)
4748 +
4749 + for parent in self.nodes[node][1]:
4750 + del self.nodes[parent][0][node]
4751 + for child in self.nodes[node][0]:
4752 + del self.nodes[child][1][node]
4753 +
4754 + del self.nodes[node]
4755 + self.order.remove(node)
4756 +
4757 + def difference_update(self, t):
4758 + """
4759 + Remove all given nodes from node_set. This is more efficient
4760 + than multiple calls to the remove() method.
4761 + """
4762 + if isinstance(t, (list, tuple)) or \
4763 + not hasattr(t, "__contains__"):
4764 + t = frozenset(t)
4765 + order = []
4766 + for node in self.order:
4767 + if node not in t:
4768 + order.append(node)
4769 + continue
4770 + for parent in self.nodes[node][1]:
4771 + del self.nodes[parent][0][node]
4772 + for child in self.nodes[node][0]:
4773 + del self.nodes[child][1][node]
4774 + del self.nodes[node]
4775 + self.order = order
4776 +
4777 + def remove_edge(self, child, parent):
4778 + """
4779 + Remove edge in the direction from child to parent. Note that it is
4780 + possible for a remaining edge to exist in the opposite direction.
4781 + Any endpoint vertices that become isolated will remain in the graph.
4782 + """
4783 +
4784 + # Nothing should be modified when a KeyError is raised.
4785 + for k in parent, child:
4786 + if k not in self.nodes:
4787 + raise KeyError(k)
4788 +
4789 + # Make sure the edge exists.
4790 + if child not in self.nodes[parent][0]:
4791 + raise KeyError(child)
4792 + if parent not in self.nodes[child][1]:
4793 + raise KeyError(parent)
4794 +
4795 + # Remove the edge.
4796 + del self.nodes[child][1][parent]
4797 + del self.nodes[parent][0][child]
4798 +
4799 + def __iter__(self):
4800 + return iter(self.order)
4801 +
4802 + def contains(self, node):
4803 + """Checks if the digraph contains mynode"""
4804 + return node in self.nodes
4805 +
4806 + def get(self, key, default=None):
4807 + node_data = self.nodes.get(key, self)
4808 + if node_data is self:
4809 + return default
4810 + return node_data[2]
4811 +
4812 + def all_nodes(self):
4813 + """Return a list of all nodes in the graph"""
4814 + return self.order[:]
4815 +
4816 + def child_nodes(self, node, ignore_priority=None):
4817 + """Return all children of the specified node"""
4818 + if ignore_priority is None:
4819 + return list(self.nodes[node][0])
4820 + children = []
4821 + if hasattr(ignore_priority, '__call__'):
4822 + for child, priorities in self.nodes[node][0].items():
4823 + for priority in priorities:
4824 + if not ignore_priority(priority):
4825 + children.append(child)
4826 + break
4827 + else:
4828 + for child, priorities in self.nodes[node][0].items():
4829 + if ignore_priority < priorities[-1]:
4830 + children.append(child)
4831 + return children
4832 +
4833 + def parent_nodes(self, node, ignore_priority=None):
4834 + """Return all parents of the specified node"""
4835 + if ignore_priority is None:
4836 + return list(self.nodes[node][1])
4837 + parents = []
4838 + if hasattr(ignore_priority, '__call__'):
4839 + for parent, priorities in self.nodes[node][1].items():
4840 + for priority in priorities:
4841 + if not ignore_priority(priority):
4842 + parents.append(parent)
4843 + break
4844 + else:
4845 + for parent, priorities in self.nodes[node][1].items():
4846 + if ignore_priority < priorities[-1]:
4847 + parents.append(parent)
4848 + return parents
4849 +
4850 + def leaf_nodes(self, ignore_priority=None):
4851 + """Return all nodes that have no children
4852 +
4853 + If ignore_soft_deps is True, soft deps are not counted as
4854 + children in calculations."""
4855 +
4856 + leaf_nodes = []
4857 + if ignore_priority is None:
4858 + for node in self.order:
4859 + if not self.nodes[node][0]:
4860 + leaf_nodes.append(node)
4861 + elif hasattr(ignore_priority, '__call__'):
4862 + for node in self.order:
4863 + is_leaf_node = True
4864 + for child, priorities in self.nodes[node][0].items():
4865 + for priority in priorities:
4866 + if not ignore_priority(priority):
4867 + is_leaf_node = False
4868 + break
4869 + if not is_leaf_node:
4870 + break
4871 + if is_leaf_node:
4872 + leaf_nodes.append(node)
4873 + else:
4874 + for node in self.order:
4875 + is_leaf_node = True
4876 + for child, priorities in self.nodes[node][0].items():
4877 + if ignore_priority < priorities[-1]:
4878 + is_leaf_node = False
4879 + break
4880 + if is_leaf_node:
4881 + leaf_nodes.append(node)
4882 + return leaf_nodes
4883 +
4884 + def root_nodes(self, ignore_priority=None):
4885 + """Return all nodes that have no parents.
4886 +
4887 + If ignore_soft_deps is True, soft deps are not counted as
4888 + parents in calculations."""
4889 +
4890 + root_nodes = []
4891 + if ignore_priority is None:
4892 + for node in self.order:
4893 + if not self.nodes[node][1]:
4894 + root_nodes.append(node)
4895 + elif hasattr(ignore_priority, '__call__'):
4896 + for node in self.order:
4897 + is_root_node = True
4898 + for parent, priorities in self.nodes[node][1].items():
4899 + for priority in priorities:
4900 + if not ignore_priority(priority):
4901 + is_root_node = False
4902 + break
4903 + if not is_root_node:
4904 + break
4905 + if is_root_node:
4906 + root_nodes.append(node)
4907 + else:
4908 + for node in self.order:
4909 + is_root_node = True
4910 + for parent, priorities in self.nodes[node][1].items():
4911 + if ignore_priority < priorities[-1]:
4912 + is_root_node = False
4913 + break
4914 + if is_root_node:
4915 + root_nodes.append(node)
4916 + return root_nodes
4917 +
4918 + def is_empty(self):
4919 + """Checks if the digraph is empty"""
4920 + return len(self.nodes) == 0
4921 +
4922 + def clone(self):
4923 + clone = digraph()
4924 + clone.nodes = {}
4925 + memo = {}
4926 + for children, parents, node in self.nodes.values():
4927 + children_clone = {}
4928 + for child, priorities in children.items():
4929 + priorities_clone = memo.get(id(priorities))
4930 + if priorities_clone is None:
4931 + priorities_clone = priorities[:]
4932 + memo[id(priorities)] = priorities_clone
4933 + children_clone[child] = priorities_clone
4934 + parents_clone = {}
4935 + for parent, priorities in parents.items():
4936 + priorities_clone = memo.get(id(priorities))
4937 + if priorities_clone is None:
4938 + priorities_clone = priorities[:]
4939 + memo[id(priorities)] = priorities_clone
4940 + parents_clone[parent] = priorities_clone
4941 + clone.nodes[node] = (children_clone, parents_clone, node)
4942 + clone.order = self.order[:]
4943 + return clone
4944 +
4945 + def delnode(self, node):
4946 + try:
4947 + self.remove(node)
4948 + except KeyError:
4949 + pass
4950 +
4951 + def firstzero(self):
4952 + leaf_nodes = self.leaf_nodes()
4953 + if leaf_nodes:
4954 + return leaf_nodes[0]
4955 + return None
4956 +
4957 + def hasallzeros(self, ignore_priority=None):
4958 + return len(self.leaf_nodes(ignore_priority=ignore_priority)) == \
4959 + len(self.order)
4960 +
4961 + def debug_print(self):
4962 + def output(s):
4963 + writemsg(s, noiselevel=-1)
4964 + for node in self.nodes:
4965 + output("%s " % (node,))
4966 + if self.nodes[node][0]:
4967 + output("depends on\n")
4968 + else:
4969 + output("(no children)\n")
4970 + for child, priorities in self.nodes[node][0].items():
4971 + output(" %s (%s)\n" % (child, priorities[-1],))
4972 +
4973 + # Backward compatibility
4974 + addnode = add
4975 + allnodes = all_nodes
4976 + allzeros = leaf_nodes
4977 + hasnode = contains
4978 + __contains__ = contains
4979 + empty = is_empty
4980 + copy = clone
4981
4982 Deleted: main/branches/prefix/pym/portage/util.py
4983 ===================================================================
4984 --- main/branches/prefix/pym/portage/util.py 2010-02-22 13:02:22 UTC (rev 15433)
4985 +++ main/branches/prefix/pym/portage/util.py 2010-02-22 13:26:20 UTC (rev 15434)
4986 @@ -1,1441 +0,0 @@
4987 -# Copyright 2004-2009 Gentoo Foundation
4988 -# Distributed under the terms of the GNU General Public License v2
4989 -# $Id$
4990 -
4991 -__all__ = ['apply_permissions', 'apply_recursive_permissions',
4992 - 'apply_secpass_permissions', 'apply_stat_permissions', 'atomic_ofstream',
4993 - 'cmp_sort_key', 'ConfigProtect', 'dump_traceback', 'ensure_dirs',
4994 - 'find_updated_config_files', 'getconfig', 'getlibpaths', 'grabdict',
4995 - 'grabdict_package', 'grabfile', 'grabfile_package', 'grablines',
4996 - 'initialize_logger', 'LazyItemsDict', 'map_dictlist_vals',
4997 - 'new_protect_filename', 'normalize_path', 'pickle_read', 'stack_dictlist',
4998 - 'stack_dicts', 'stack_lists', 'unique_array', 'varexpand', 'write_atomic',
4999 - 'writedict', 'writemsg', 'writemsg_level', 'writemsg_stdout']
5000 -
5001 -try:
5002 - from subprocess import getstatusoutput as subprocess_getstatusoutput
5003 -except ImportError:
5004 - from commands import getstatusoutput as subprocess_getstatusoutput
5005 -import codecs
5006 -import errno
5007 -import logging
5008 -import re
5009 -import shlex
5010 -import stat
5011 -import string
5012 -import sys
5013 -
5014 -import portage
5015 -from portage import StringIO
5016 -from portage import os
5017 -from portage import _encodings
5018 -from portage import _os_merge
5019 -from portage import _unicode_encode
5020 -from portage import _unicode_decode
5021 -from portage.exception import InvalidAtom, PortageException, FileNotFound, \
5022 - OperationNotPermitted, PermissionDenied, ReadOnlyFileSystem
5023 -from portage.dep import Atom, isvalidatom
5024 -from portage.localization import _
5025 -from portage.proxy.objectproxy import ObjectProxy
5026 -from portage.cache.mappings import UserDict
5027 -from portage.const import EPREFIX, EPREFIX_LSTRIP
5028 -
5029 -try:
5030 - import cPickle as pickle
5031 -except ImportError:
5032 - import pickle
5033 -
5034 -noiselimit = 0
5035 -
5036 -def initialize_logger(level=logging.WARN):
5037 - """Sets up basic logging of portage activities
5038 - Args:
5039 - level: the level to emit messages at ('info', 'debug', 'warning' ...)
5040 - Returns:
5041 - None
5042 - """
5043 - logging.basicConfig(level=logging.WARN, format='[%(levelname)-4s] %(message)s')
5044 -
5045 -def writemsg(mystr,noiselevel=0,fd=None):
5046 - """Prints out warning and debug messages based on the noiselimit setting"""
5047 - global noiselimit
5048 - if fd is None:
5049 - fd = sys.stderr
5050 - if noiselevel <= noiselimit:
5051 - # avoid potential UnicodeEncodeError
5052 - mystr = _unicode_encode(mystr,
5053 - encoding=_encodings['stdio'], errors='backslashreplace')
5054 - if sys.hexversion >= 0x3000000:
5055 - fd = fd.buffer
5056 - fd.write(mystr)
5057 - fd.flush()
5058 -
5059 -def writemsg_stdout(mystr,noiselevel=0):
5060 - """Prints messages stdout based on the noiselimit setting"""
5061 - writemsg(mystr, noiselevel=noiselevel, fd=sys.stdout)
5062 -
5063 -def writemsg_level(msg, level=0, noiselevel=0):
5064 - """
5065 - Show a message for the given level as defined by the logging module
5066 - (default is 0). When level >= logging.WARNING then the message is
5067 - sent to stderr, otherwise it is sent to stdout. The noiselevel is
5068 - passed directly to writemsg().
5069 -
5070 - @type msg: str
5071 - @param msg: a message string, including newline if appropriate
5072 - @type level: int
5073 - @param level: a numeric logging level (see the logging module)
5074 - @type noiselevel: int
5075 - @param noiselevel: passed directly to writemsg
5076 - """
5077 - if level >= logging.WARNING:
5078 - fd = sys.stderr
5079 - else:
5080 - fd = sys.stdout
5081 - writemsg(msg, noiselevel=noiselevel, fd=fd)
5082 -
5083 -def normalize_path(mypath):
5084 - """
5085 - os.path.normpath("//foo") returns "//foo" instead of "/foo"
5086 - We dislike this behavior so we create our own normpath func
5087 - to fix it.
5088 - """
5089 - if sys.hexversion >= 0x3000000 and isinstance(mypath, bytes):
5090 - path_sep = os.path.sep.encode()
5091 - else:
5092 - path_sep = os.path.sep
5093 -
5094 - if mypath.startswith(path_sep):
5095 - # posixpath.normpath collapses 3 or more leading slashes to just 1.
5096 - return os.path.normpath(2*path_sep + mypath)
5097 - else:
5098 - return os.path.normpath(mypath)
5099 -
5100 -def grabfile(myfilename, compat_level=0, recursive=0):
5101 - """This function grabs the lines in a file, normalizes whitespace and returns lines in a list; if a line
5102 - begins with a #, it is ignored, as are empty lines"""
5103 -
5104 - mylines=grablines(myfilename, recursive)
5105 - newlines=[]
5106 - for x in mylines:
5107 - #the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
5108 - #into single spaces.
5109 - myline = _unicode_decode(' ').join(x.split())
5110 - if not len(myline):
5111 - continue
5112 - if myline[0]=="#":
5113 - # Check if we have a compat-level string. BC-integration data.
5114 - # '##COMPAT==>N<==' 'some string attached to it'
5115 - mylinetest = myline.split("<==",1)
5116 - if len(mylinetest) == 2:
5117 - myline_potential = mylinetest[1]
5118 - mylinetest = mylinetest[0].split("##COMPAT==>")
5119 - if len(mylinetest) == 2:
5120 - if compat_level >= int(mylinetest[1]):
5121 - # It's a compat line, and the key matches.
5122 - newlines.append(myline_potential)
5123 - continue
5124 - else:
5125 - continue
5126 - newlines.append(myline)
5127 - return newlines
5128 -
5129 -def map_dictlist_vals(func,myDict):
5130 - """Performs a function on each value of each key in a dictlist.
5131 - Returns a new dictlist."""
5132 - new_dl = {}
5133 - for key in myDict:
5134 - new_dl[key] = []
5135 - new_dl[key] = [func(x) for x in myDict[key]]
5136 - return new_dl
5137 -
5138 -def stack_dictlist(original_dicts, incremental=0, incrementals=[], ignore_none=0):
5139 - """
5140 - Stacks an array of dict-types into one array. Optionally merging or
5141 - overwriting matching key/value pairs for the dict[key]->list.
5142 - Returns a single dict. Higher index in lists is preferenced.
5143 -
5144 - Example usage:
5145 - >>> from portage.util import stack_dictlist
5146 - >>> print stack_dictlist( [{'a':'b'},{'x':'y'}])
5147 - >>> {'a':'b','x':'y'}
5148 - >>> print stack_dictlist( [{'a':'b'},{'a':'c'}], incremental = True )
5149 - >>> {'a':['b','c'] }
5150 - >>> a = {'KEYWORDS':['x86','alpha']}
5151 - >>> b = {'KEYWORDS':['-x86']}
5152 - >>> print stack_dictlist( [a,b] )
5153 - >>> { 'KEYWORDS':['x86','alpha','-x86']}
5154 - >>> print stack_dictlist( [a,b], incremental=True)
5155 - >>> { 'KEYWORDS':['alpha'] }
5156 - >>> print stack_dictlist( [a,b], incrementals=['KEYWORDS'])
5157 - >>> { 'KEYWORDS':['alpha'] }
5158 -
5159 - @param original_dicts a list of (dictionary objects or None)
5160 - @type list
5161 - @param incremental True or false depending on whether new keys should overwrite
5162 - keys which already exist.
5163 - @type boolean
5164 - @param incrementals A list of items that should be incremental (-foo removes foo from
5165 - the returned dict).
5166 - @type list
5167 - @param ignore_none Appears to be ignored, but probably was used long long ago.
5168 - @type boolean
5169 -
5170 - """
5171 - final_dict = {}
5172 - for mydict in original_dicts:
5173 - if mydict is None:
5174 - continue
5175 - for y in mydict:
5176 - if not y in final_dict:
5177 - final_dict[y] = []
5178 -
5179 - for thing in mydict[y]:
5180 - if thing:
5181 - if incremental or y in incrementals:
5182 - if thing == "-*":
5183 - final_dict[y] = []
5184 - continue
5185 - elif thing[:1] == '-':
5186 - try:
5187 - final_dict[y].remove(thing[1:])
5188 - except ValueError:
5189 - pass
5190 - continue
5191 - if thing not in final_dict[y]:
5192 - final_dict[y].append(thing)
5193 - if y in final_dict and not final_dict[y]:
5194 - del final_dict[y]
5195 - return final_dict
5196 -
5197 -def stack_dicts(dicts, incremental=0, incrementals=[], ignore_none=0):
5198 - """Stacks an array of dict-types into one array. Optionally merging or
5199 - overwriting matching key/value pairs for the dict[key]->string.
5200 - Returns a single dict."""
5201 - final_dict = {}
5202 - for mydict in dicts:
5203 - if not mydict:
5204 - continue
5205 - for k, v in mydict.items():
5206 - if k in final_dict and (incremental or (k in incrementals)):
5207 - final_dict[k] += " " + v
5208 - else:
5209 - final_dict[k] = v
5210 - return final_dict
5211 -
5212 -def stack_lists(lists, incremental=1):
5213 - """Stacks an array of list-types into one array. Optionally removing
5214 - distinct values using '-value' notation. Higher index is preferenced.
5215 -
5216 - all elements must be hashable."""
5217 -
5218 - new_list = {}
5219 - for x in lists:
5220 - for y in filter(None, x):
5221 - if incremental:
5222 - if y == "-*":
5223 - new_list.clear()
5224 - elif y[:1] == '-':
5225 - new_list.pop(y[1:], None)
5226 - else:
5227 - new_list[y] = True
5228 - else:
5229 - new_list[y] = True
5230 - return list(new_list)
5231 -
5232 -def grabdict(myfilename, juststrings=0, empty=0, recursive=0, incremental=1):
5233 - """
5234 - This function grabs the lines in a file, normalizes whitespace and returns lines in a dictionary
5235 -
5236 - @param myfilename: file to process
5237 - @type myfilename: string (path)
5238 - @param juststrings: only return strings
5239 - @type juststrings: Boolean (integer)
5240 - @param empty: Ignore certain lines
5241 - @type empty: Boolean (integer)
5242 - @param recursive: Recursively grab ( support for /etc/portage/package.keywords/* and friends )
5243 - @type recursive: Boolean (integer)
5244 - @param incremental: Append to the return list, don't overwrite
5245 - @type incremental: Boolean (integer)
5246 - @rtype: Dictionary
5247 - @returns:
5248 - 1. Returns the lines in a file in a dictionary, for example:
5249 - 'sys-apps/portage x86 amd64 ppc'
5250 - would return
5251 - { "sys-apps/portage" : [ 'x86', 'amd64', 'ppc' ]
5252 - the line syntax is key : [list of values]
5253 - """
5254 - newdict={}
5255 - for x in grablines(myfilename, recursive):
5256 - #the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
5257 - #into single spaces.
5258 - if x[0] == "#":
5259 - continue
5260 - myline=x.split()
5261 - if len(myline) < 2 and empty == 0:
5262 - continue
5263 - if len(myline) < 1 and empty == 1:
5264 - continue
5265 - if incremental:
5266 - newdict.setdefault(myline[0], []).extend(myline[1:])
5267 - else:
5268 - newdict[myline[0]] = myline[1:]
5269 - if juststrings:
5270 - for k, v in newdict.items():
5271 - newdict[k] = " ".join(v)
5272 - return newdict
5273 -
5274 -def grabdict_package(myfilename, juststrings=0, recursive=0):
5275 - """ Does the same thing as grabdict except it validates keys
5276 - with isvalidatom()"""
5277 - pkgs=grabdict(myfilename, juststrings, empty=1, recursive=recursive)
5278 - # We need to call keys() here in order to avoid the possibility of
5279 - # "RuntimeError: dictionary changed size during iteration"
5280 - # when an invalid atom is deleted.
5281 - atoms = {}
5282 - for k, v in pkgs.items():
5283 - try:
5284 - k = Atom(k)
5285 - except InvalidAtom:
5286 - writemsg(_("--- Invalid atom in %s: %s\n") % (myfilename, k),
5287 - noiselevel=-1)
5288 - else:
5289 - atoms[k] = v
5290 - return atoms
5291 -
5292 -def grabfile_package(myfilename, compatlevel=0, recursive=0):
5293 - pkgs=grabfile(myfilename, compatlevel, recursive=recursive)
5294 - mybasename = os.path.basename(myfilename)
5295 - atoms = []
5296 - for pkg in pkgs:
5297 - pkg_orig = pkg
5298 - # for packages and package.mask files
5299 - if pkg[:1] == "-":
5300 - pkg = pkg[1:]
5301 - if pkg[:1] == '*' and mybasename == 'packages':
5302 - pkg = pkg[1:]
5303 - try:
5304 - pkg = Atom(pkg)
5305 - except InvalidAtom:
5306 - writemsg(_("--- Invalid atom in %s: %s\n") % (myfilename, pkg),
5307 - noiselevel=-1)
5308 - else:
5309 - if pkg_orig == str(pkg):
5310 - # normal atom, so return as Atom instance
5311 - atoms.append(pkg)
5312 - else:
5313 - # atom has special prefix, so return as string
5314 - atoms.append(pkg_orig)
5315 - return atoms
5316 -
5317 -def grablines(myfilename,recursive=0):
5318 - mylines=[]
5319 - if recursive and os.path.isdir(myfilename):
5320 - if myfilename in ["RCS", "CVS", "SCCS"]:
5321 - return mylines
5322 - dirlist = os.listdir(myfilename)
5323 - dirlist.sort()
5324 - for f in dirlist:
5325 - if not f.startswith(".") and not f.endswith("~"):
5326 - mylines.extend(grablines(
5327 - os.path.join(myfilename, f), recursive))
5328 - else:
5329 - try:
5330 - myfile = codecs.open(_unicode_encode(myfilename,
5331 - encoding=_encodings['fs'], errors='strict'),
5332 - mode='r', encoding=_encodings['content'], errors='replace')
5333 - mylines = myfile.readlines()
5334 - myfile.close()
5335 - except IOError as e:
5336 - if e.errno == PermissionDenied.errno:
5337 - raise PermissionDenied(myfilename)
5338 - pass
5339 - return mylines
5340 -
5341 -def writedict(mydict,myfilename,writekey=True):
5342 - """Writes out a dict to a file; writekey=0 mode doesn't write out
5343 - the key and assumes all values are strings, not lists."""
5344 - myfile = None
5345 - try:
5346 - myfile = atomic_ofstream(myfilename)
5347 - if not writekey:
5348 - for x in mydict.values():
5349 - myfile.write(x+"\n")
5350 - else:
5351 - for x in mydict:
5352 - myfile.write("%s %s\n" % (x, " ".join(mydict[x])))
5353 - myfile.close()
5354 - except IOError:
5355 - if myfile is not None:
5356 - myfile.abort()
5357 - return 0
5358 - return 1
5359 -
5360 -def shlex_split(s):
5361 - """
5362 - This is equivalent to shlex.split but it temporarily encodes unicode
5363 - strings to bytes since shlex.split() doesn't handle unicode strings.
5364 - """
5365 - is_unicode = sys.hexversion < 0x3000000 and isinstance(s, unicode)
5366 - if is_unicode:
5367 - s = _unicode_encode(s)
5368 - rval = shlex.split(s)
5369 - if is_unicode:
5370 - rval = [_unicode_decode(x) for x in rval]
5371 - return rval
5372 -
5373 -class _tolerant_shlex(shlex.shlex):
5374 - def sourcehook(self, newfile):
5375 - try:
5376 - return shlex.shlex.sourcehook(self, newfile)
5377 - except EnvironmentError as e:
5378 - writemsg(_("!!! Parse error in '%s': source command failed: %s\n") % \
5379 - (self.infile, str(e)), noiselevel=-1)
5380 - return (newfile, StringIO())
5381 -
5382 -_invalid_var_name_re = re.compile(r'^\d|\W')
5383 -
5384 -def getconfig(mycfg, tolerant=0, allow_sourcing=False, expand=True):
5385 - if isinstance(expand, dict):
5386 - # Some existing variable definitions have been
5387 - # passed in, for use in substitutions.
5388 - expand_map = expand
5389 - expand = True
5390 - else:
5391 - expand_map = {}
5392 - mykeys = {}
5393 - try:
5394 - # Workaround for avoiding a silent error in shlex that
5395 - # is triggered by a source statement at the end of the file without a
5396 - # trailing newline after the source statement
5397 - # NOTE: shex doesn't seem to support unicode objects
5398 - # (produces spurious \0 characters with python-2.6.2)
5399 - if sys.hexversion < 0x3000000:
5400 - content = open(_unicode_encode(mycfg,
5401 - encoding=_encodings['fs'], errors='strict'), 'rb').read()
5402 - else:
5403 - content = open(_unicode_encode(mycfg,
5404 - encoding=_encodings['fs'], errors='strict'), mode='r',
5405 - encoding=_encodings['content'], errors='replace').read()
5406 - if content and content[-1] != '\n':
5407 - content += '\n'
5408 - except IOError as e:
5409 - if e.errno == PermissionDenied.errno:
5410 - raise PermissionDenied(mycfg)
5411 - if e.errno != errno.ENOENT:
5412 - writemsg("open('%s', 'r'): %s\n" % (mycfg, e), noiselevel=-1)
5413 - raise
5414 - return None
5415 - try:
5416 - if tolerant:
5417 - shlex_class = _tolerant_shlex
5418 - else:
5419 - shlex_class = shlex.shlex
5420 - # The default shlex.sourcehook() implementation
5421 - # only joins relative paths when the infile
5422 - # attribute is properly set.
5423 - lex = shlex_class(content, infile=mycfg, posix=True)
5424 - lex.wordchars = string.digits + string.ascii_letters + \
5425 - "~!@#$%*_\:;?,./-+{}"
5426 - lex.quotes="\"'"
5427 - if allow_sourcing:
5428 - lex.source="source"
5429 - while 1:
5430 - key=lex.get_token()
5431 - if key == "export":
5432 - key = lex.get_token()
5433 - if key is None:
5434 - #normal end of file
5435 - break;
5436 - equ=lex.get_token()
5437 - if (equ==''):
5438 - #unexpected end of file
5439 - #lex.error_leader(self.filename,lex.lineno)
5440 - if not tolerant:
5441 - writemsg(_("!!! Unexpected end of config file: variable %s\n") % key,
5442 - noiselevel=-1)
5443 - raise Exception(_("ParseError: Unexpected EOF: %s: on/before line %s") % (mycfg, lex.lineno))
5444 - else:
5445 - return mykeys
5446 - elif (equ!='='):
5447 - #invalid token
5448 - #lex.error_leader(self.filename,lex.lineno)
5449 - if not tolerant:
5450 - raise Exception(_("ParseError: Invalid token "
5451 - "'%s' (not '='): %s: line %s") % \
5452 - (equ, mycfg, lex.lineno))
5453 - else:
5454 - return mykeys
5455 - val=lex.get_token()
5456 - if val is None:
5457 - #unexpected end of file
5458 - #lex.error_leader(self.filename,lex.lineno)
5459 - if not tolerant:
5460 - writemsg(_("!!! Unexpected end of config file: variable %s\n") % key,
5461 - noiselevel=-1)
5462 - raise portage.exception.CorruptionError(_("ParseError: Unexpected EOF: %s: line %s") % (mycfg, lex.lineno))
5463 - else:
5464 - return mykeys
5465 - key = _unicode_decode(key)
5466 - val = _unicode_decode(val)
5467 -
5468 - if _invalid_var_name_re.search(key) is not None:
5469 - if not tolerant:
5470 - raise Exception(_(
5471 - "ParseError: Invalid variable name '%s': line %s") % \
5472 - (key, lex.lineno - 1))
5473 - writemsg(_("!!! Invalid variable name '%s': line %s in %s\n") \
5474 - % (key, lex.lineno - 1, mycfg), noiselevel=-1)
5475 - continue
5476 -
5477 - if expand:
5478 - mykeys[key] = varexpand(val, expand_map)
5479 - expand_map[key] = mykeys[key]
5480 - else:
5481 - mykeys[key] = val
5482 - except SystemExit as e:
5483 - raise
5484 - except Exception as e:
5485 - raise portage.exception.ParseError(str(e)+" in "+mycfg)
5486 - return mykeys
5487 -
5488 -#cache expansions of constant strings
5489 -cexpand={}
5490 -def varexpand(mystring, mydict={}):
5491 - newstring = cexpand.get(" "+mystring, None)
5492 - if newstring is not None:
5493 - return newstring
5494 -
5495 - """
5496 - new variable expansion code. Preserves quotes, handles \n, etc.
5497 - This code is used by the configfile code, as well as others (parser)
5498 - This would be a good bunch of code to port to C.
5499 - """
5500 - numvars=0
5501 - mystring=" "+mystring
5502 - #in single, double quotes
5503 - insing=0
5504 - indoub=0
5505 - pos=1
5506 - newstring=" "
5507 - while (pos<len(mystring)):
5508 - if (mystring[pos]=="'") and (mystring[pos-1]!="\\"):
5509 - if (indoub):
5510 - newstring=newstring+"'"
5511 - else:
5512 - newstring += "'" # Quote removal is handled by shlex.
5513 - insing=not insing
5514 - pos=pos+1
5515 - continue
5516 - elif (mystring[pos]=='"') and (mystring[pos-1]!="\\"):
5517 - if (insing):
5518 - newstring=newstring+'"'
5519 - else:
5520 - newstring += '"' # Quote removal is handled by shlex.
5521 - indoub=not indoub
5522 - pos=pos+1
5523 - continue
5524 - if (not insing):
5525 - #expansion time
5526 - if (mystring[pos]=="\n"):
5527 - #convert newlines to spaces
5528 - newstring=newstring+" "
5529 - pos=pos+1
5530 - elif (mystring[pos]=="\\"):
5531 - #backslash expansion time
5532 - if (pos+1>=len(mystring)):
5533 - newstring=newstring+mystring[pos]
5534 - break
5535 - else:
5536 - a=mystring[pos+1]
5537 - pos=pos+2
5538 - if a=='a':
5539 - newstring=newstring+chr(0o07)
5540 - elif a=='b':
5541 - newstring=newstring+chr(0o10)
5542 - elif a=='e':
5543 - newstring=newstring+chr(0o33)
5544 - elif (a=='f') or (a=='n'):
5545 - newstring=newstring+chr(0o12)
5546 - elif a=='r':
5547 - newstring=newstring+chr(0o15)
5548 - elif a=='t':
5549 - newstring=newstring+chr(0o11)
5550 - elif a=='v':
5551 - newstring=newstring+chr(0o13)
5552 - elif a!='\n':
5553 - #remove backslash only, as bash does: this takes care of \\ and \' and \" as well
5554 - newstring=newstring+mystring[pos-1:pos]
5555 - continue
5556 - elif (mystring[pos]=="$") and (mystring[pos-1]!="\\"):
5557 - pos=pos+1
5558 - if mystring[pos]=="{":
5559 - pos=pos+1
5560 - braced=True
5561 - else:
5562 - braced=False
5563 - myvstart=pos
5564 - validchars=string.ascii_letters+string.digits+"_"
5565 - while mystring[pos] in validchars:
5566 - if (pos+1)>=len(mystring):
5567 - if braced:
5568 - cexpand[mystring]=""
5569 - return ""
5570 - else:
5571 - pos=pos+1
5572 - break
5573 - pos=pos+1
5574 - myvarname=mystring[myvstart:pos]
5575 - if braced:
5576 - if mystring[pos]!="}":
5577 - cexpand[mystring]=""
5578 - return ""
5579 - else:
5580 - pos=pos+1
5581 - if len(myvarname)==0:
5582 - cexpand[mystring]=""
5583 - return ""
5584 - numvars=numvars+1
5585 - if myvarname in mydict:
5586 - newstring=newstring+mydict[myvarname]
5587 - else:
5588 - newstring=newstring+mystring[pos]
5589 - pos=pos+1
5590 - else:
5591 - newstring=newstring+mystring[pos]
5592 - pos=pos+1
5593 - if numvars==0:
5594 - cexpand[mystring]=newstring[1:]
5595 - return newstring[1:]
5596 -
5597 -# broken and removed, but can still be imported
5598 -pickle_write = None
5599 -
5600 -def pickle_read(filename,default=None,debug=0):
5601 - import os
5602 - if not os.access(filename, os.R_OK):
5603 - writemsg(_("pickle_read(): File not readable. '")+filename+"'\n",1)
5604 - return default
5605 - data = None
5606 - try:
5607 - myf = open(_unicode_encode(filename,
5608 - encoding=_encodings['fs'], errors='strict'), 'rb')
5609 - mypickle = pickle.Unpickler(myf)
5610 - data = mypickle.load()
5611 - myf.close()
5612 - del mypickle,myf
5613 - writemsg(_("pickle_read(): Loaded pickle. '")+filename+"'\n",1)
5614 - except SystemExit as e:
5615 - raise
5616 - except Exception as e:
5617 - writemsg(_("!!! Failed to load pickle: ")+str(e)+"\n",1)
5618 - data = default
5619 - return data
5620 -
5621 -def dump_traceback(msg, noiselevel=1):
5622 - import sys, traceback
5623 - info = sys.exc_info()
5624 - if not info[2]:
5625 - stack = traceback.extract_stack()[:-1]
5626 - error = None
5627 - else:
5628 - stack = traceback.extract_tb(info[2])
5629 - error = str(info[1])
5630 - writemsg("\n====================================\n", noiselevel=noiselevel)
5631 - writemsg("%s\n\n" % msg, noiselevel=noiselevel)
5632 - for line in traceback.format_list(stack):
5633 - writemsg(line, noiselevel=noiselevel)
5634 - if error:
5635 - writemsg(error+"\n", noiselevel=noiselevel)
5636 - writemsg("====================================\n\n", noiselevel=noiselevel)
5637 -
5638 -class cmp_sort_key(object):
5639 - """
5640 - In python-3.0 the list.sort() method no longer has a "cmp" keyword
5641 - argument. This class acts as an adapter which converts a cmp function
5642 - into one that's suitable for use as the "key" keyword argument to
5643 - list.sort(), making it easier to port code for python-3.0 compatibility.
5644 - It works by generating key objects which use the given cmp function to
5645 - implement their __lt__ method.
5646 - """
5647 - __slots__ = ("_cmp_func",)
5648 -
5649 - def __init__(self, cmp_func):
5650 - """
5651 - @type cmp_func: callable which takes 2 positional arguments
5652 - @param cmp_func: A cmp function.
5653 - """
5654 - self._cmp_func = cmp_func
5655 -
5656 - def __call__(self, lhs):
5657 - return self._cmp_key(self._cmp_func, lhs)
5658 -
5659 - class _cmp_key(object):
5660 - __slots__ = ("_cmp_func", "_obj")
5661 -
5662 - def __init__(self, cmp_func, obj):
5663 - self._cmp_func = cmp_func
5664 - self._obj = obj
5665 -
5666 - def __lt__(self, other):
5667 - if other.__class__ is not self.__class__:
5668 - raise TypeError("Expected type %s, got %s" % \
5669 - (self.__class__, other.__class__))
5670 - return self._cmp_func(self._obj, other._obj) < 0
5671 -
5672 -def unique_array(s):
5673 - """lifted from python cookbook, credit: Tim Peters
5674 - Return a list of the elements in s in arbitrary order, sans duplicates"""
5675 - n = len(s)
5676 - # assume all elements are hashable, if so, it's linear
5677 - try:
5678 - return list(set(s))
5679 - except TypeError:
5680 - pass
5681 -
5682 - # so much for linear. abuse sort.
5683 - try:
5684 - t = list(s)
5685 - t.sort()
5686 - except TypeError:
5687 - pass
5688 - else:
5689 - assert n > 0
5690 - last = t[0]
5691 - lasti = i = 1
5692 - while i < n:
5693 - if t[i] != last:
5694 - t[lasti] = last = t[i]
5695 - lasti += 1
5696 - i += 1
5697 - return t[:lasti]
5698 -
5699 - # blah. back to original portage.unique_array
5700 - u = []
5701 - for x in s:
5702 - if x not in u:
5703 - u.append(x)
5704 - return u
5705 -
5706 -def apply_permissions(filename, uid=-1, gid=-1, mode=-1, mask=-1,
5707 - stat_cached=None, follow_links=True):
5708 - """Apply user, group, and mode bits to a file if the existing bits do not
5709 - already match. The default behavior is to force an exact match of mode
5710 - bits. When mask=0 is specified, mode bits on the target file are allowed
5711 - to be a superset of the mode argument (via logical OR). When mask>0, the
5712 - mode bits that the target file is allowed to have are restricted via
5713 - logical XOR.
5714 - Returns True if the permissions were modified and False otherwise."""
5715 -
5716 - modified = False
5717 -
5718 - if stat_cached is None:
5719 - try:
5720 - if follow_links:
5721 - stat_cached = os.stat(filename)
5722 - else:
5723 - stat_cached = os.lstat(filename)
5724 - except OSError as oe:
5725 - func_call = "stat('%s')" % filename
5726 - if oe.errno == errno.EPERM:
5727 - raise OperationNotPermitted(func_call)
5728 - elif oe.errno == errno.EACCES:
5729 - raise PermissionDenied(func_call)
5730 - elif oe.errno == errno.ENOENT:
5731 - raise FileNotFound(filename)
5732 - else:
5733 - raise
5734 -
5735 - if (uid != -1 and uid != stat_cached.st_uid) or \
5736 - (gid != -1 and gid != stat_cached.st_gid):
5737 - try:
5738 - if follow_links:
5739 - os.chown(filename, uid, gid)
5740 - else:
5741 - import portage.data
5742 - portage.data.lchown(filename, uid, gid)
5743 - modified = True
5744 - except OSError as oe:
5745 - func_call = "chown('%s', %i, %i)" % (filename, uid, gid)
5746 - if oe.errno == errno.EPERM:
5747 - raise OperationNotPermitted(func_call)
5748 - elif oe.errno == errno.EACCES:
5749 - raise PermissionDenied(func_call)
5750 - elif oe.errno == errno.EROFS:
5751 - raise ReadOnlyFileSystem(func_call)
5752 - elif oe.errno == errno.ENOENT:
5753 - raise FileNotFound(filename)
5754 - else:
5755 - raise
5756 -
5757 - new_mode = -1
5758 - st_mode = stat_cached.st_mode & 0o7777 # protect from unwanted bits
5759 - if mask >= 0:
5760 - if mode == -1:
5761 - mode = 0 # Don't add any mode bits when mode is unspecified.
5762 - else:
5763 - mode = mode & 0o7777
5764 - if (mode & st_mode != mode) or \
5765 - ((mask ^ st_mode) & st_mode != st_mode):
5766 - new_mode = mode | st_mode
5767 - new_mode = (mask ^ new_mode) & new_mode
5768 - elif mode != -1:
5769 - mode = mode & 0o7777 # protect from unwanted bits
5770 - if mode != st_mode:
5771 - new_mode = mode
5772 -
5773 - # The chown system call may clear S_ISUID and S_ISGID
5774 - # bits, so those bits are restored if necessary.
5775 - if modified and new_mode == -1 and \
5776 - (st_mode & stat.S_ISUID or st_mode & stat.S_ISGID):
5777 - if mode == -1:
5778 - new_mode = st_mode
5779 - else:
5780 - mode = mode & 0o7777
5781 - if mask >= 0:
5782 - new_mode = mode | st_mode
5783 - new_mode = (mask ^ new_mode) & new_mode
5784 - else:
5785 - new_mode = mode
5786 - if not (new_mode & stat.S_ISUID or new_mode & stat.S_ISGID):
5787 - new_mode = -1
5788 -
5789 - if not follow_links and stat.S_ISLNK(stat_cached.st_mode):
5790 - # Mode doesn't matter for symlinks.
5791 - new_mode = -1
5792 -
5793 - if new_mode != -1:
5794 - try:
5795 - os.chmod(filename, new_mode)
5796 - modified = True
5797 - except OSError as oe:
5798 - func_call = "chmod('%s', %s)" % (filename, oct(new_mode))
5799 - if oe.errno == errno.EPERM:
5800 - raise OperationNotPermitted(func_call)
5801 - elif oe.errno == errno.EACCES:
5802 - raise PermissionDenied(func_call)
5803 - elif oe.errno == errno.EROFS:
5804 - raise ReadOnlyFileSystem(func_call)
5805 - elif oe.errno == errno.ENOENT:
5806 - raise FileNotFound(filename)
5807 - raise
5808 - return modified
5809 -
5810 -def apply_stat_permissions(filename, newstat, **kwargs):
5811 - """A wrapper around apply_secpass_permissions that gets
5812 - uid, gid, and mode from a stat object"""
5813 - return apply_secpass_permissions(filename, uid=newstat.st_uid, gid=newstat.st_gid,
5814 - mode=newstat.st_mode, **kwargs)
5815 -
5816 -def apply_recursive_permissions(top, uid=-1, gid=-1,
5817 - dirmode=-1, dirmask=-1, filemode=-1, filemask=-1, onerror=None):
5818 - """A wrapper around apply_secpass_permissions that applies permissions
5819 - recursively. If optional argument onerror is specified, it should be a
5820 - function; it will be called with one argument, a PortageException instance.
5821 - Returns True if all permissions are applied and False if some are left
5822 - unapplied."""
5823 -
5824 - if onerror is None:
5825 - # Default behavior is to dump errors to stderr so they won't
5826 - # go unnoticed. Callers can pass in a quiet instance.
5827 - def onerror(e):
5828 - if isinstance(e, OperationNotPermitted):
5829 - writemsg(_("Operation Not Permitted: %s\n") % str(e),
5830 - noiselevel=-1)
5831 - elif isinstance(e, FileNotFound):
5832 - writemsg(_("File Not Found: '%s'\n") % str(e), noiselevel=-1)
5833 - else:
5834 - raise
5835 -
5836 - all_applied = True
5837 - for dirpath, dirnames, filenames in os.walk(top):
5838 - try:
5839 - applied = apply_secpass_permissions(dirpath,
5840 - uid=uid, gid=gid, mode=dirmode, mask=dirmask)
5841 - if not applied:
5842 - all_applied = False
5843 - except PortageException as e:
5844 - all_applied = False
5845 - onerror(e)
5846 -
5847 - for name in filenames:
5848 - try:
5849 - applied = apply_secpass_permissions(os.path.join(dirpath, name),
5850 - uid=uid, gid=gid, mode=filemode, mask=filemask)
5851 - if not applied:
5852 - all_applied = False
5853 - except PortageException as e:
5854 - # Ignore InvalidLocation exceptions such as FileNotFound
5855 - # and DirectoryNotFound since sometimes things disappear,
5856 - # like when adjusting permissions on DISTCC_DIR.
5857 - if not isinstance(e, portage.exception.InvalidLocation):
5858 - all_applied = False
5859 - onerror(e)
5860 - return all_applied
5861 -
5862 -def apply_secpass_permissions(filename, uid=-1, gid=-1, mode=-1, mask=-1,
5863 - stat_cached=None, follow_links=True):
5864 - """A wrapper around apply_permissions that uses secpass and simple
5865 - logic to apply as much of the permissions as possible without
5866 - generating an obviously avoidable permission exception. Despite
5867 - attempts to avoid an exception, it's possible that one will be raised
5868 - anyway, so be prepared.
5869 - Returns True if all permissions are applied and False if some are left
5870 - unapplied."""
5871 -
5872 - if stat_cached is None:
5873 - try:
5874 - if follow_links:
5875 - stat_cached = os.stat(filename)
5876 - else:
5877 - stat_cached = os.lstat(filename)
5878 - except OSError as oe:
5879 - func_call = "stat('%s')" % filename
5880 - if oe.errno == errno.EPERM:
5881 - raise OperationNotPermitted(func_call)
5882 - elif oe.errno == errno.EACCES:
5883 - raise PermissionDenied(func_call)
5884 - elif oe.errno == errno.ENOENT:
5885 - raise FileNotFound(filename)
5886 - else:
5887 - raise
5888 -
5889 - all_applied = True
5890 -
5891 - import portage.data # not imported globally because of circular dep
5892 - if portage.data.secpass < 2:
5893 -
5894 - if uid != -1 and \
5895 - uid != stat_cached.st_uid:
5896 - all_applied = False
5897 - uid = -1
5898 -
5899 - if gid != -1 and \
5900 - gid != stat_cached.st_gid and \
5901 - gid not in os.getgroups():
5902 - all_applied = False
5903 - gid = -1
5904 -
5905 - apply_permissions(filename, uid=uid, gid=gid, mode=mode, mask=mask,
5906 - stat_cached=stat_cached, follow_links=follow_links)
5907 - return all_applied
5908 -
5909 -class atomic_ofstream(ObjectProxy):
5910 - """Write a file atomically via os.rename(). Atomic replacement prevents
5911 - interprocess interference and prevents corruption of the target
5912 - file when the write is interrupted (for example, when an 'out of space'
5913 - error occurs)."""
5914 -
5915 - def __init__(self, filename, mode='w', follow_links=True, **kargs):
5916 - """Opens a temporary filename.pid in the same directory as filename."""
5917 - ObjectProxy.__init__(self)
5918 - object.__setattr__(self, '_aborted', False)
5919 - if 'b' in mode:
5920 - open_func = open
5921 - else:
5922 - open_func = codecs.open
5923 - kargs.setdefault('encoding', _encodings['content'])
5924 - kargs.setdefault('errors', 'backslashreplace')
5925 -
5926 - if follow_links:
5927 - canonical_path = os.path.realpath(filename)
5928 - object.__setattr__(self, '_real_name', canonical_path)
5929 - tmp_name = "%s.%i" % (canonical_path, os.getpid())
5930 - try:
5931 - object.__setattr__(self, '_file',
5932 - open_func(_unicode_encode(tmp_name,
5933 - encoding=_encodings['fs'], errors='strict'),
5934 - mode=mode, **kargs))
5935 - return
5936 - except IOError as e:
5937 - if canonical_path == filename:
5938 - raise
5939 - writemsg(_("!!! Failed to open file: '%s'\n") % tmp_name,
5940 - noiselevel=-1)
5941 - writemsg("!!! %s\n" % str(e), noiselevel=-1)
5942 -
5943 - object.__setattr__(self, '_real_name', filename)
5944 - tmp_name = "%s.%i" % (filename, os.getpid())
5945 - object.__setattr__(self, '_file',
5946 - open_func(_unicode_encode(tmp_name,
5947 - encoding=_encodings['fs'], errors='strict'),
5948 - mode=mode, **kargs))
5949 -
5950 - def _get_target(self):
5951 - return object.__getattribute__(self, '_file')
5952 -
5953 - def __getattribute__(self, attr):
5954 - if attr in ('close', 'abort', '__del__'):
5955 - return object.__getattribute__(self, attr)
5956 - return getattr(object.__getattribute__(self, '_file'), attr)
5957 -
5958 - def close(self):
5959 - """Closes the temporary file, copies permissions (if possible),
5960 - and performs the atomic replacement via os.rename(). If the abort()
5961 - method has been called, then the temp file is closed and removed."""
5962 - f = object.__getattribute__(self, '_file')
5963 - real_name = object.__getattribute__(self, '_real_name')
5964 - if not f.closed:
5965 - try:
5966 - f.close()
5967 - if not object.__getattribute__(self, '_aborted'):
5968 - try:
5969 - apply_stat_permissions(f.name, os.stat(real_name))
5970 - except OperationNotPermitted:
5971 - pass
5972 - except FileNotFound:
5973 - pass
5974 - except OSError as oe: # from the above os.stat call
5975 - if oe.errno in (errno.ENOENT, errno.EPERM):
5976 - pass
5977 - else:
5978 - raise
5979 - os.rename(f.name, real_name)
5980 - finally:
5981 - # Make sure we cleanup the temp file
5982 - # even if an exception is raised.
5983 - try:
5984 - os.unlink(f.name)
5985 - except OSError as oe:
5986 - pass
5987 -
5988 - def abort(self):
5989 - """If an error occurs while writing the file, the user should
5990 - call this method in order to leave the target file unchanged.
5991 - This will call close() automatically."""
5992 - if not object.__getattribute__(self, '_aborted'):
5993 - object.__setattr__(self, '_aborted', True)
5994 - self.close()
5995 -
5996 - def __del__(self):
5997 - """If the user does not explicitely call close(), it is
5998 - assumed that an error has occurred, so we abort()."""
5999 - try:
6000 - f = object.__getattribute__(self, '_file')
6001 - except AttributeError:
6002 - pass
6003 - else:
6004 - if not f.closed:
6005 - self.abort()
6006 - # ensure destructor from the base class is called
6007 - base_destructor = getattr(ObjectProxy, '__del__', None)
6008 - if base_destructor is not None:
6009 - base_destructor(self)
6010 -
6011 -def write_atomic(file_path, content, **kwargs):
6012 - f = None
6013 - try:
6014 - f = atomic_ofstream(file_path, **kwargs)
6015 - f.write(content)
6016 - f.close()
6017 - except (IOError, OSError) as e:
6018 - if f:
6019 - f.abort()
6020 - func_call = "write_atomic('%s')" % file_path
6021 - if e.errno == errno.EPERM:
6022 - raise OperationNotPermitted(func_call)
6023 - elif e.errno == errno.EACCES:
6024 - raise PermissionDenied(func_call)
6025 - elif e.errno == errno.EROFS:
6026 - raise ReadOnlyFileSystem(func_call)
6027 - elif e.errno == errno.ENOENT:
6028 - raise FileNotFound(file_path)
6029 - else:
6030 - raise
6031 -
6032 -def ensure_dirs(dir_path, *args, **kwargs):
6033 - """Create a directory and call apply_permissions.
6034 - Returns True if a directory is created or the permissions needed to be
6035 - modified, and False otherwise."""
6036 -
6037 - created_dir = False
6038 -
6039 - try:
6040 - os.makedirs(dir_path)
6041 - created_dir = True
6042 - except OSError as oe:
6043 - func_call = "makedirs('%s')" % dir_path
6044 - if oe.errno in (errno.EEXIST, errno.EISDIR):
6045 - pass
6046 - elif oe.errno == errno.EPERM:
6047 - raise OperationNotPermitted(func_call)
6048 - elif oe.errno == errno.EACCES:
6049 - raise PermissionDenied(func_call)
6050 - elif oe.errno == errno.EROFS:
6051 - raise ReadOnlyFileSystem(func_call)
6052 - else:
6053 - raise
6054 - perms_modified = apply_permissions(dir_path, *args, **kwargs)
6055 - return created_dir or perms_modified
6056 -
6057 -class LazyItemsDict(UserDict):
6058 - """A mapping object that behaves like a standard dict except that it allows
6059 - for lazy initialization of values via callable objects. Lazy items can be
6060 - overwritten and deleted just as normal items."""
6061 -
6062 - __slots__ = ('lazy_items',)
6063 -
6064 - def __init__(self, *args, **kwargs):
6065 -
6066 - self.lazy_items = {}
6067 - UserDict.__init__(self, *args, **kwargs)
6068 -
6069 - def addLazyItem(self, item_key, value_callable, *pargs, **kwargs):
6070 - """Add a lazy item for the given key. When the item is requested,
6071 - value_callable will be called with *pargs and **kwargs arguments."""
6072 - self.lazy_items[item_key] = \
6073 - self._LazyItem(value_callable, pargs, kwargs, False)
6074 - # make it show up in self.keys(), etc...
6075 - UserDict.__setitem__(self, item_key, None)
6076 -
6077 - def addLazySingleton(self, item_key, value_callable, *pargs, **kwargs):
6078 - """This is like addLazyItem except value_callable will only be called
6079 - a maximum of 1 time and the result will be cached for future requests."""
6080 - self.lazy_items[item_key] = \
6081 - self._LazyItem(value_callable, pargs, kwargs, True)
6082 - # make it show up in self.keys(), etc...
6083 - UserDict.__setitem__(self, item_key, None)
6084 -
6085 - def update(self, *args, **kwargs):
6086 - if len(args) > 1:
6087 - raise TypeError(
6088 - "expected at most 1 positional argument, got " + \
6089 - repr(len(args)))
6090 - if args:
6091 - map_obj = args[0]
6092 - else:
6093 - map_obj = None
6094 - if map_obj is None:
6095 - pass
6096 - elif isinstance(map_obj, LazyItemsDict):
6097 - for k in map_obj:
6098 - if k in map_obj.lazy_items:
6099 - UserDict.__setitem__(self, k, None)
6100 - else:
6101 - UserDict.__setitem__(self, k, map_obj[k])
6102 - self.lazy_items.update(map_obj.lazy_items)
6103 - else:
6104 - UserDict.update(self, map_obj)
6105 - if kwargs:
6106 - UserDict.update(self, kwargs)
6107 -
6108 - def __getitem__(self, item_key):
6109 - if item_key in self.lazy_items:
6110 - lazy_item = self.lazy_items[item_key]
6111 - pargs = lazy_item.pargs
6112 - if pargs is None:
6113 - pargs = ()
6114 - kwargs = lazy_item.kwargs
6115 - if kwargs is None:
6116 - kwargs = {}
6117 - result = lazy_item.func(*pargs, **kwargs)
6118 - if lazy_item.singleton:
6119 - self[item_key] = result
6120 - return result
6121 -
6122 - else:
6123 - return UserDict.__getitem__(self, item_key)
6124 -
6125 - def __setitem__(self, item_key, value):
6126 - if item_key in self.lazy_items:
6127 - del self.lazy_items[item_key]
6128 - UserDict.__setitem__(self, item_key, value)
6129 -
6130 - def __delitem__(self, item_key):
6131 - if item_key in self.lazy_items:
6132 - del self.lazy_items[item_key]
6133 - UserDict.__delitem__(self, item_key)
6134 -
6135 - def clear(self):
6136 - self.lazy_items.clear()
6137 - UserDict.clear(self)
6138 -
6139 - def copy(self):
6140 - return self.__copy__()
6141 -
6142 - def __copy__(self):
6143 - return self.__class__(self)
6144 -
6145 - def __deepcopy__(self, memo=None):
6146 - """
6147 - WARNING: If any of the lazy items contains a bound method then it's
6148 - typical for deepcopy() to raise an exception like this:
6149 -
6150 - File "/usr/lib/python2.5/copy.py", line 189, in deepcopy
6151 - y = _reconstruct(x, rv, 1, memo)
6152 - File "/usr/lib/python2.5/copy.py", line 322, in _reconstruct
6153 - y = callable(*args)
6154 - File "/usr/lib/python2.5/copy_reg.py", line 92, in __newobj__
6155 - return cls.__new__(cls, *args)
6156 - TypeError: instancemethod expected at least 2 arguments, got 0
6157 -
6158 - If deepcopy() needs to work, this problem can be avoided by
6159 - implementing lazy items with normal (non-bound) functions.
6160 -
6161 - If deepcopy() raises a TypeError for a lazy item that has been added
6162 - via a call to addLazySingleton(), the singleton will be automatically
6163 - evaluated and deepcopy() will instead be called on the result.
6164 - """
6165 - if memo is None:
6166 - memo = {}
6167 - from copy import deepcopy
6168 - result = self.__class__()
6169 - memo[id(self)] = result
6170 - for k in self:
6171 - k_copy = deepcopy(k, memo)
6172 - if k in self.lazy_items:
6173 - lazy_item = self.lazy_items[k]
6174 - try:
6175 - result.lazy_items[k_copy] = deepcopy(lazy_item, memo)
6176 - except TypeError:
6177 - if not lazy_item.singleton:
6178 - raise
6179 - UserDict.__setitem__(result,
6180 - k_copy, deepcopy(self[k], memo))
6181 - else:
6182 - UserDict.__setitem__(result, k_copy, None)
6183 - else:
6184 - UserDict.__setitem__(result, k_copy, deepcopy(self[k], memo))
6185 - return result
6186 -
6187 - class _LazyItem(object):
6188 -
6189 - __slots__ = ('func', 'pargs', 'kwargs', 'singleton')
6190 -
6191 - def __init__(self, func, pargs, kwargs, singleton):
6192 -
6193 - if not pargs:
6194 - pargs = None
6195 - if not kwargs:
6196 - kwargs = None
6197 -
6198 - self.func = func
6199 - self.pargs = pargs
6200 - self.kwargs = kwargs
6201 - self.singleton = singleton
6202 -
6203 - def __copy__(self):
6204 - return self.__class__(self.func, self.pargs,
6205 - self.kwargs, self.singleton)
6206 -
6207 - def __deepcopy__(self, memo=None):
6208 - """
6209 - Override this since the default implementation can fail silently,
6210 - leaving some attributes unset.
6211 - """
6212 - if memo is None:
6213 - memo = {}
6214 - from copy import deepcopy
6215 - result = self.__copy__()
6216 - memo[id(self)] = result
6217 - result.func = deepcopy(self.func, memo)
6218 - result.pargs = deepcopy(self.pargs, memo)
6219 - result.kwargs = deepcopy(self.kwargs, memo)
6220 - result.singleton = deepcopy(self.singleton, memo)
6221 - return result
6222 -
6223 -class ConfigProtect(object):
6224 - def __init__(self, myroot, protect_list, mask_list):
6225 - self.myroot = myroot
6226 - self.protect_list = protect_list
6227 - self.mask_list = mask_list
6228 - self.updateprotect()
6229 -
6230 - def updateprotect(self):
6231 - """Update internal state for isprotected() calls. Nonexistent paths
6232 - are ignored."""
6233 -
6234 - os = _os_merge
6235 -
6236 - self.protect = []
6237 - self._dirs = set()
6238 - for x in self.protect_list:
6239 - ppath = normalize_path(
6240 - os.path.join(self.myroot + EPREFIX_LSTRIP, x.lstrip(os.path.sep)))
6241 - mystat = None
6242 - try:
6243 - if stat.S_ISDIR(os.stat(ppath).st_mode):
6244 - self._dirs.add(ppath)
6245 - self.protect.append(ppath)
6246 - except OSError:
6247 - # If it doesn't exist, there's no need to protect it.
6248 - pass
6249 -
6250 - self.protectmask = []
6251 - for x in self.mask_list:
6252 - ppath = normalize_path(
6253 - os.path.join(self.myroot + EPREFIX_LSTRIP, x.lstrip(os.path.sep)))
6254 - mystat = None
6255 - try:
6256 - """Use lstat so that anything, even a broken symlink can be
6257 - protected."""
6258 - if stat.S_ISDIR(os.lstat(ppath).st_mode):
6259 - self._dirs.add(ppath)
6260 - self.protectmask.append(ppath)
6261 - """Now use stat in case this is a symlink to a directory."""
6262 - if stat.S_ISDIR(os.stat(ppath).st_mode):
6263 - self._dirs.add(ppath)
6264 - except OSError:
6265 - # If it doesn't exist, there's no need to mask it.
6266 - pass
6267 -
6268 - def isprotected(self, obj):
6269 - """Returns True if obj is protected, False otherwise. The caller must
6270 - ensure that obj is normalized with a single leading slash. A trailing
6271 - slash is optional for directories."""
6272 - masked = 0
6273 - protected = 0
6274 - sep = os.path.sep
6275 - for ppath in self.protect:
6276 - if len(ppath) > masked and obj.startswith(ppath):
6277 - if ppath in self._dirs:
6278 - if obj != ppath and not obj.startswith(ppath + sep):
6279 - # /etc/foo does not match /etc/foobaz
6280 - continue
6281 - elif obj != ppath:
6282 - # force exact match when CONFIG_PROTECT lists a
6283 - # non-directory
6284 - continue
6285 - protected = len(ppath)
6286 - #config file management
6287 - for pmpath in self.protectmask:
6288 - if len(pmpath) >= protected and obj.startswith(pmpath):
6289 - if pmpath in self._dirs:
6290 - if obj != pmpath and \
6291 - not obj.startswith(pmpath + sep):
6292 - # /etc/foo does not match /etc/foobaz
6293 - continue
6294 - elif obj != pmpath:
6295 - # force exact match when CONFIG_PROTECT_MASK lists
6296 - # a non-directory
6297 - continue
6298 - #skip, it's in the mask
6299 - masked = len(pmpath)
6300 - return protected > masked
6301 -
6302 -def new_protect_filename(mydest, newmd5=None):
6303 - """Resolves a config-protect filename for merging, optionally
6304 - using the last filename if the md5 matches.
6305 - (dest,md5) ==> 'string' --- path_to_target_filename
6306 - (dest) ==> ('next', 'highest') --- next_target and most-recent_target
6307 - """
6308 -
6309 - # config protection filename format:
6310 - # ._cfg0000_foo
6311 - # 0123456789012
6312 -
6313 - os = _os_merge
6314 -
6315 - prot_num = -1
6316 - last_pfile = ""
6317 -
6318 - if not os.path.exists(mydest):
6319 - return mydest
6320 -
6321 - real_filename = os.path.basename(mydest)
6322 - real_dirname = os.path.dirname(mydest)
6323 - for pfile in os.listdir(real_dirname):
6324 - if pfile[0:5] != "._cfg":
6325 - continue
6326 - if pfile[10:] != real_filename:
6327 - continue
6328 - try:
6329 - new_prot_num = int(pfile[5:9])
6330 - if new_prot_num > prot_num:
6331 - prot_num = new_prot_num
6332 - last_pfile = pfile
6333 - except ValueError:
6334 - continue
6335 - prot_num = prot_num + 1
6336 -
6337 - new_pfile = normalize_path(os.path.join(real_dirname,
6338 - "._cfg" + str(prot_num).zfill(4) + "_" + real_filename))
6339 - old_pfile = normalize_path(os.path.join(real_dirname, last_pfile))
6340 - if last_pfile and newmd5:
6341 - import portage.checksum
6342 - try:
6343 - last_pfile_md5 = portage.checksum._perform_md5_merge(old_pfile)
6344 - except FileNotFound:
6345 - # The file suddenly disappeared or it's a broken symlink.
6346 - pass
6347 - else:
6348 - if last_pfile_md5 == newmd5:
6349 - return old_pfile
6350 - return new_pfile
6351 -
6352 -def find_updated_config_files(target_root, config_protect):
6353 - """
6354 - Return a tuple of configuration files that needs to be updated.
6355 - The tuple contains lists organized like this:
6356 - [ protected_dir, file_list ]
6357 - If the protected config isn't a protected_dir but a procted_file, list is:
6358 - [ protected_file, None ]
6359 - If no configuration files needs to be updated, None is returned
6360 - """
6361 -
6362 - os = _os_merge
6363 -
6364 - if config_protect:
6365 - # directories with some protect files in them
6366 - for x in config_protect:
6367 - files = []
6368 -
6369 - x = os.path.join(target_root, x.lstrip(os.path.sep))
6370 - if not os.access(x, os.W_OK):
6371 - continue
6372 - try:
6373 - mymode = os.lstat(x).st_mode
6374 - except OSError:
6375 - continue
6376 -
6377 - if stat.S_ISLNK(mymode):
6378 - # We want to treat it like a directory if it
6379 - # is a symlink to an existing directory.
6380 - try:
6381 - real_mode = os.stat(x).st_mode
6382 - if stat.S_ISDIR(real_mode):
6383 - mymode = real_mode
6384 - except OSError:
6385 - pass
6386 -
6387 - if stat.S_ISDIR(mymode):
6388 - mycommand = \
6389 - "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
6390 - else:
6391 - mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
6392 - os.path.split(x.rstrip(os.path.sep))
6393 - mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
6394 - a = subprocess_getstatusoutput(mycommand)
6395 -
6396 - if a[0] == 0:
6397 - files = a[1].split('\0')
6398 - # split always produces an empty string as the last element
6399 - if files and not files[-1]:
6400 - del files[-1]
6401 - if files:
6402 - if stat.S_ISDIR(mymode):
6403 - yield (x, files)
6404 - else:
6405 - yield (x, None)
6406 -
6407 -def getlibpaths(root):
6408 - """ Return a list of paths that are used for library lookups """
6409 -
6410 - # PREFIX HACK: LD_LIBRARY_PATH isn't portable, and considered
6411 - # harmfull, so better not use it. We don't need any host OS lib
6412 - # paths either, so do Prefix case.
6413 - if EPREFIX != '':
6414 - rval = []
6415 - rval.append(EPREFIX + "/usr/lib")
6416 - rval.append(EPREFIX + "/lib")
6417 - # we don't know the CHOST here, so it's a bit hard to guess
6418 - # where GCC's and ld's libs are. Though, GCC's libs should be
6419 - # in lib and usr/lib, binutils' libs rarely used
6420 - else:
6421 - # the following is based on the information from ld.so(8)
6422 - rval = os.environ.get("LD_LIBRARY_PATH", "").split(":")
6423 - rval.extend(grabfile(os.path.join(root, "etc", "ld.so.conf")))
6424 - rval.append("/usr/lib")
6425 - rval.append("/lib")
6426 -
6427 - return [normalize_path(x) for x in rval if x]