Gentoo Archives: gentoo-commits

From: "Fabian Groffen (grobian)" <grobian@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] portage r10268 - in main/branches/prefix/pym: _emerge portage/dbapi
Date: Fri, 09 May 2008 12:29:25
Message-Id: E1JuRj0-00018G-0y@stork.gentoo.org
1 Author: grobian
2 Date: 2008-05-09 12:29:20 +0000 (Fri, 09 May 2008)
3 New Revision: 10268
4
5 Modified:
6 main/branches/prefix/pym/_emerge/__init__.py
7 main/branches/prefix/pym/portage/dbapi/vartree.py
8 Log:
9 Merged from trunk 10246:10266
10
11 | 10250 | Make some confmem behavior modifications in order to try and |
12 | zmedico | make it less confusing for people who have forgotten about |
13 | | the --noconfmem option or are completely unaware of it. |
14 | | Thanks to Joe Peterson <lavajoe@g.o> for suggesting |
15 | | these: * Always behave like --noconfmem is enabled for |
16 | | downgrades. * Purge confmem entries when a package is |
17 | | unmerged rather than replaced. |
18
19 | 10252 | Bug #220987 - Fix UnboundLocalError for 'skip' that only |
20 | zmedico | occurs when using alternate ROOT. Thanks to Ryan Tandy |
21 | | <tarpman@×××××.com>. |
22
23 | 10254 | Optimize BlockerDB.findInstalledBlockers() so that it |
24 | zmedico | doesn't unnecessarily try to match packages against an empty |
25 | | set of blocker atoms. |
26
27 | 10256 | Make the fix for bug #220341 better by checking all selected |
28 | zmedico | packages for matching cpv rather than just the last one. |
29
30 | 10258 | Allow scheduled uninstalls to be selected the same way as |
31 | zmedico | other leaf nodes. |
32
33 | 10260 | Use set.instersection() instead of a for loop. |
34 | zmedico | |
35
36 | 10262 | If an uninstall task fails inside MergeTask.merge(), use an |
37 | zmedico | UninstallFailure exception to handle it instead of allowing |
38 | | unmerge() to call exit(). |
39
40 | 10264 | For the "blockers" parameter that's passed into the dblink |
41 | zmedico | constructor now, make it a callable since it really |
42 | | shouldn't be called until the vdb lock has been acquired. |
43
44 | 10266 | Simplify code for derived Package attributes. |
45 | zmedico | |
46
47
48 Modified: main/branches/prefix/pym/_emerge/__init__.py
49 ===================================================================
50 --- main/branches/prefix/pym/_emerge/__init__.py 2008-05-09 08:46:04 UTC (rev 10267)
51 +++ main/branches/prefix/pym/_emerge/__init__.py 2008-05-09 12:29:20 UTC (rev 10268)
52 @@ -991,7 +991,8 @@
53 user doesn't necessarily need write access to the vardb in cases where
54 global updates are necessary (updates are performed when necessary if there
55 is not a matching ebuild in the tree)."""
56 - def __init__(self, real_vartree, portdb, db_keys, pkg_cache):
57 + def __init__(self, real_vartree, portdb,
58 + db_keys, pkg_cache, acquire_lock=1):
59 self.root = real_vartree.root
60 self.settings = real_vartree.settings
61 mykeys = db_keys[:]
62 @@ -1008,7 +1009,7 @@
63 pass
64 vdb_lock = None
65 try:
66 - if os.access(vdb_path, os.W_OK):
67 + if acquire_lock and os.access(vdb_path, os.W_OK):
68 vdb_lock = portage.locks.lockdir(vdb_path)
69 real_dbapi = real_vartree.dbapi
70 slot_counters = {}
71 @@ -1305,10 +1306,8 @@
72 self.cp = portage.cpv_getkey(self.cpv)
73 self.slot_atom = "%s:%s" % (self.cp, self.metadata["SLOT"])
74 self.cpv_slot = "%s:%s" % (self.cpv, self.metadata["SLOT"])
75 - cpv_parts = portage.catpkgsplit(self.cpv)
76 - self.category = cpv_parts[0]
77 - self.pv_split = cpv_parts[1:]
78 - self.pf = self.cpv.replace(self.category + "/", "", 1)
79 + self.category, self.pf = portage.catsplit(self.cpv)
80 + self.pv_split = portage.catpkgsplit(self.cpv)[1:]
81
82 def _get_hash_key(self):
83 hash_key = getattr(self, "_hash_key", None)
84 @@ -1550,7 +1549,7 @@
85 "vartree" : self._vartree,
86 }}
87
88 - def findInstalledBlockers(self, new_pkg):
89 + def findInstalledBlockers(self, new_pkg, acquire_lock=0):
90 blocker_cache = self._blocker_cache
91 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
92 dep_check_trees = self._dep_check_trees
93 @@ -1558,7 +1557,8 @@
94 stale_cache = set(blocker_cache)
95 fake_vartree = \
96 FakeVartree(self._vartree,
97 - self._portdb, Package.metadata_keys, {})
98 + self._portdb, Package.metadata_keys, {},
99 + acquire_lock=acquire_lock)
100 vardb = fake_vartree.dbapi
101 installed_pkgs = list(vardb)
102
103 @@ -1629,13 +1629,14 @@
104
105 blocker_atoms = [atom[1:] for atom in atoms \
106 if atom.startswith("!")]
107 - blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
108 - for inst_pkg in installed_pkgs:
109 - try:
110 - blocker_atoms.iterAtomsForPackage(inst_pkg).next()
111 - except (portage.exception.InvalidDependString, StopIteration):
112 - continue
113 - blocking_pkgs.add(inst_pkg)
114 + if blocker_atoms:
115 + blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
116 + for inst_pkg in installed_pkgs:
117 + try:
118 + blocker_atoms.iterAtomsForPackage(inst_pkg).next()
119 + except (portage.exception.InvalidDependString, StopIteration):
120 + continue
121 + blocking_pkgs.add(inst_pkg)
122
123 return blocking_pkgs
124
125 @@ -2964,9 +2965,11 @@
126 # Therefore, assume that such SLOT dependencies are already
127 # satisfied rather than forcing a rebuild.
128 if installed and not cpv_list and matched_packages \
129 - and vardb.cpv_exists(matched_packages[-1].cpv) and \
130 - portage.dep.dep_getslot(atom):
131 - cpv_list = [matched_packages[-1].cpv]
132 + and portage.dep.dep_getslot(atom):
133 + for pkg in matched_packages:
134 + if vardb.cpv_exists(pkg.cpv):
135 + cpv_list = [pkg.cpv]
136 + break
137
138 if not cpv_list:
139 continue
140 @@ -3639,7 +3642,8 @@
141 """
142 return [node for node in mygraph.leaf_nodes(**kwargs) \
143 if isinstance(node, Package) and \
144 - node.operation != "uninstall"]
145 + (node.operation != "uninstall" or \
146 + node in scheduled_uninstalls)]
147
148 # sys-apps/portage needs special treatment if ROOT="/"
149 running_root = "/"
150 @@ -3836,13 +3840,6 @@
151 selected_nodes = list(selected_nodes)
152 selected_nodes.sort(cmp_circular_bias)
153
154 - if not selected_nodes and scheduled_uninstalls:
155 - selected_nodes = set()
156 - for node in scheduled_uninstalls:
157 - if not mygraph.child_nodes(node):
158 - selected_nodes.add(node)
159 - scheduled_uninstalls.difference_update(selected_nodes)
160 -
161 if not selected_nodes and not myblocker_uninstalls.is_empty():
162 # An Uninstall task needs to be executed in order to
163 # avoid conflict if possible.
164 @@ -3925,6 +3922,7 @@
165 # when necessary, as long as the atom will be satisfied
166 # in the final state.
167 graph_db = self.mydbapi[task.root]
168 + skip = False
169 try:
170 for atom in root_config.sets[
171 "world"].iterAtomsForPackage(task):
172 @@ -4025,6 +4023,7 @@
173 "uninstall" == node.operation:
174 have_uninstall_task = True
175 uninst_task = node
176 + scheduled_uninstalls.remove(uninst_task)
177 else:
178 vardb = self.trees[node.root]["vartree"].dbapi
179 previous_cpv = vardb.match(node.slot_atom)
180 @@ -5531,6 +5530,11 @@
181
182 class MergeTask(object):
183
184 + _opts_ignore_blockers = \
185 + frozenset(["--buildpkgonly",
186 + "--fetchonly", "--fetch-all-uri",
187 + "--nodeps", "--pretend"])
188 +
189 def __init__(self, settings, trees, myopts):
190 self.settings = settings
191 self.target_root = settings["ROOT"]
192 @@ -5551,14 +5555,22 @@
193 self._spawned_pids = []
194
195 def _find_blockers(self, new_pkg):
196 - for opt in ("--buildpkgonly", "--nodeps",
197 - "--fetchonly", "--fetch-all-uri", "--pretend"):
198 - if opt in self.myopts:
199 - return None
200 + """
201 + Returns a callable which should be called only when
202 + the vdb lock has been acquired.
203 + """
204 + def get_blockers():
205 + return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
206 + return get_blockers
207
208 + def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
209 + if self._opts_ignore_blockers.intersection(self.myopts):
210 + return None
211 +
212 blocker_dblinks = []
213 for blocking_pkg in self._blocker_db[
214 - new_pkg.root].findInstalledBlockers(new_pkg):
215 + new_pkg.root].findInstalledBlockers(new_pkg,
216 + acquire_lock=acquire_lock):
217 if new_pkg.slot_atom == blocking_pkg.slot_atom:
218 continue
219 if new_pkg.cpv == blocking_pkg.cpv:
220 @@ -5736,10 +5748,15 @@
221 mergecount += 1
222 pkg = x
223 metadata = pkg.metadata
224 +
225 if pkg.installed:
226 if not (buildpkgonly or fetchonly or pretend):
227 - unmerge(root_config, self.myopts, "unmerge",
228 - [pkg.cpv], mtimedb["ldpath"], clean_world=0)
229 + try:
230 + unmerge(root_config, self.myopts, "unmerge",
231 + [pkg.cpv], mtimedb["ldpath"], clean_world=0,
232 + raise_on_error=1)
233 + except UninstallFailure, e:
234 + return e.status
235 continue
236
237 if x[0]=="blocks":
238 @@ -6061,8 +6078,20 @@
239 sys.exit(0)
240 return os.EX_OK
241
242 +class UninstallFailure(portage.exception.PortageException):
243 + """
244 + An instance of this class is raised by unmerge() when
245 + an uninstallation fails.
246 + """
247 + status = 1
248 + def __init__(self, *pargs):
249 + portage.exception.PortageException.__init__(self, pargs)
250 + if pargs:
251 + self.status = pargs[0]
252 +
253 def unmerge(root_config, myopts, unmerge_action,
254 - unmerge_files, ldpath_mtimes, autoclean=0, clean_world=1, ordered=0):
255 + unmerge_files, ldpath_mtimes, autoclean=0,
256 + clean_world=1, ordered=0, raise_on_error=0):
257 settings = root_config.settings
258 sets = root_config.sets
259 vartree = root_config.trees["vartree"]
260 @@ -6440,6 +6469,8 @@
261 vartree=vartree, ldpath_mtimes=ldpath_mtimes)
262 if retval != os.EX_OK:
263 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
264 + if raise_on_error:
265 + raise UninstallFailure(retval)
266 sys.exit(retval)
267 else:
268 if clean_world:
269
270 Modified: main/branches/prefix/pym/portage/dbapi/vartree.py
271 ===================================================================
272 --- main/branches/prefix/pym/portage/dbapi/vartree.py 2008-05-09 08:46:04 UTC (rev 10267)
273 +++ main/branches/prefix/pym/portage/dbapi/vartree.py 2008-05-09 12:29:20 UTC (rev 10268)
274 @@ -1493,8 +1493,12 @@
275 vartree=self.vartree))
276 dest_root = normalize_path(self.vartree.root).rstrip(os.path.sep) + \
277 os.path.sep
278 - dest_root_len = len(dest_root)
279 + dest_root_len = len(dest_root) - 1
280
281 + conf_mem_file = os.path.join(dest_root, CONFIG_MEMORY_FILE)
282 + cfgfiledict = grabdict(conf_mem_file)
283 + stale_confmem = []
284 +
285 unmerge_orphans = "unmerge-orphans" in self.settings.features
286
287 if pkgfiles:
288 @@ -1558,6 +1562,9 @@
289 continue
290 if obj.startswith(dest_root):
291 relative_path = obj[dest_root_len:]
292 + if not others_in_slot and \
293 + relative_path in cfgfiledict:
294 + stale_confmem.append(relative_path)
295 is_owned = False
296 for dblnk in others_in_slot:
297 if dblnk.isowner(relative_path, dest_root):
298 @@ -1682,6 +1689,12 @@
299 show_unmerge("---", "!empty", "dir", obj)
300 del e
301
302 + # Remove stale entries from config memory.
303 + if stale_confmem:
304 + for filename in stale_confmem:
305 + del cfgfiledict[filename]
306 + writedict(cfgfiledict, conf_mem_file)
307 +
308 #remove self from vartree database so that our own virtual gets zapped if we're the last node
309 self.vartree.zap(self.mycpv)
310
311 @@ -2159,7 +2172,11 @@
312 self._preserve_libs(srcroot, destroot, myfilelist+mylinklist, counter, inforoot)
313
314 # check for package collisions
315 - blockers = self._blockers
316 + blockers = None
317 + if self._blockers is not None:
318 + # This is only supposed to be called when
319 + # the vdb is locked, like it is here.
320 + blockers = self._blockers()
321 if blockers is None:
322 blockers = []
323 collisions = self._collision_protect(srcroot, destroot,
324 @@ -2295,6 +2312,15 @@
325 else:
326 cfgfiledict["IGNORE"]=0
327
328 + # Always behave like --noconfmem is enabled for downgrades
329 + # so that people who don't know about this option are less
330 + # likely to get confused when doing upgrade/downgrade cycles.
331 + pv_split = catpkgsplit(self.mycpv)[1:]
332 + for other in others_in_slot:
333 + if pkgcmp(pv_split, catpkgsplit(other.mycpv)[1:]) < 0:
334 + cfgfiledict["IGNORE"] = 1
335 + break
336 +
337 # Don't bump mtimes on merge since some application require
338 # preservation of timestamps. This means that the unmerge phase must
339 # check to see if file belongs to an installed instance in the same
340
341 --
342 gentoo-commits@l.g.o mailing list