Gentoo Archives: gentoo-commits

From: Zac Medico <zmedico@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/portage:master commit in: pym/portage/dbapi/, pym/portage/util/_dyn_libs/, pym/portage/, pym/_emerge/
Date: Sun, 08 May 2011 04:54:18
Message-Id: dffb2901d349a66bdbba30423c358da7c9938e47.zmedico@gentoo
1 commit: dffb2901d349a66bdbba30423c358da7c9938e47
2 Author: David James <davidjames <AT> chromium <DOT> org>
3 AuthorDate: Sat May 7 04:53:31 2011 +0000
4 Commit: Zac Medico <zmedico <AT> gentoo <DOT> org>
5 CommitDate: Sun May 8 04:24:57 2011 +0000
6 URL: http://git.overlays.gentoo.org/gitweb/?p=proj/portage.git;a=commit;h=dffb2901
7
8 Use finer grained locks for install.
9
10 Narrow scope of merge locks to improve performance.
11
12 Instead of locking the DB for the entire package merge, just lock it
13 when we actually need to do so. Also add locks around conf_mem_file
14 updating and pkg_* phases.
15
16 Locking in pkg_* phases can be turned off with
17 FEATURES="no-ebuild-locks" if you use ebuilds that are careful not
18 to mess with each other during theses phases. The default is to leave
19 this locking enabled.
20
21 Given this new locking, I've improved the scheduler to run merge jobs
22 in parallel.
23
24 Time required for merging 348 packages with --usepkgonly:
25 - Before patch: 29m50s
26 - After patch: 10m2s
27 - After patch w/o locks: 7m9s
28
29 Change-Id: I63588c4cc59fa6fe2f8327ea1e4a9e71b241d4fe
30
31 Review URL: http://gerrit.chromium.org/gerrit/498
32
33 ---
34 pym/_emerge/EbuildPhase.py | 15 +++-
35 pym/_emerge/Scheduler.py | 1 +
36 pym/portage/const.py | 4 +-
37 pym/portage/dbapi/vartree.py | 168 ++++++++++++++++-----------
38 pym/portage/util/_dyn_libs/LinkageMapELF.py | 23 +++--
39 5 files changed, 132 insertions(+), 79 deletions(-)
40
41 diff --git a/pym/_emerge/EbuildPhase.py b/pym/_emerge/EbuildPhase.py
42 index 07fb69c..77b3a4d 100644
43 --- a/pym/_emerge/EbuildPhase.py
44 +++ b/pym/_emerge/EbuildPhase.py
45 @@ -10,6 +10,8 @@ from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
46 from _emerge.EbuildProcess import EbuildProcess
47 from _emerge.CompositeTask import CompositeTask
48 from portage.util import writemsg
49 +from portage.locks import lockdir
50 +from portage.locks import unlockdir
51 from portage.xml.metadata import MetaDataXML
52 import portage
53 portage.proxy.lazyimport.lazyimport(globals(),
54 @@ -28,7 +30,7 @@ from portage import _unicode_encode
55
56 class EbuildPhase(CompositeTask):
57
58 - __slots__ = ("actionmap", "phase", "settings")
59 + __slots__ = ("actionmap", "ebuild_lock", "phase", "settings")
60
61 # FEATURES displayed prior to setup phase
62 _features_display = ("ccache", "distcc", "fakeroot",
63 @@ -37,6 +39,9 @@ class EbuildPhase(CompositeTask):
64 "splitdebug", "suidctl", "test", "userpriv",
65 "usersandbox")
66
67 + # Locked phases
68 + _locked_phases = ("setup", "preinst", "postinst", "prerm", "postrm")
69 +
70 def _start(self):
71
72 need_builddir = self.phase not in EbuildProcess._phases_without_builddir
73 @@ -138,10 +143,18 @@ class EbuildPhase(CompositeTask):
74 phase=self.phase, scheduler=self.scheduler,
75 settings=self.settings)
76
77 + if (self.phase in self._locked_phases and
78 + "no-ebuild-locks" not in self.settings.features):
79 + root = self.settings["ROOT"]
80 + lock_path = os.path.join(root, portage.VDB_PATH + "-ebuild")
81 + self.ebuild_lock = lockdir(lock_path)
82 self._start_task(ebuild_process, self._ebuild_exit)
83
84 def _ebuild_exit(self, ebuild_process):
85
86 + if self.ebuild_lock:
87 + unlockdir(self.ebuild_lock)
88 +
89 fail = False
90 if self._default_exit(ebuild_process) != os.EX_OK:
91 if self.phase == "test" and \
92
93 diff --git a/pym/_emerge/Scheduler.py b/pym/_emerge/Scheduler.py
94 index df13b6b..fc69d48 100644
95 --- a/pym/_emerge/Scheduler.py
96 +++ b/pym/_emerge/Scheduler.py
97 @@ -387,6 +387,7 @@ class Scheduler(PollScheduler):
98 def _set_max_jobs(self, max_jobs):
99 self._max_jobs = max_jobs
100 self._task_queues.jobs.max_jobs = max_jobs
101 + self._task_queues.merge.max_jobs = max_jobs
102
103 def _background_mode(self):
104 """
105
106 diff --git a/pym/portage/const.py b/pym/portage/const.py
107 index db3f841..dbbaa3e 100644
108 --- a/pym/portage/const.py
109 +++ b/pym/portage/const.py
110 @@ -92,8 +92,8 @@ SUPPORTED_FEATURES = frozenset([
111 "fail-clean", "fixpackages", "force-mirror", "getbinpkg",
112 "installsources", "keeptemp", "keepwork", "fixlafiles", "lmirror",
113 "metadata-transfer", "mirror", "multilib-strict", "news",
114 - "noauto", "noclean", "nodoc", "noinfo", "noman", "nostrip",
115 - "notitles", "parallel-fetch", "parse-eapi-ebuild-head",
116 + "no-ebuild-locks", "noauto", "noclean", "nodoc", "noinfo", "noman",
117 + "nostrip", "notitles", "parallel-fetch", "parse-eapi-ebuild-head",
118 "prelink-checksums", "preserve-libs",
119 "protect-owned", "python-trace", "sandbox",
120 "selinux", "sesandbox", "severe", "sfperms",
121
122 diff --git a/pym/portage/dbapi/vartree.py b/pym/portage/dbapi/vartree.py
123 index d8fe7b5..7f9fb99 100644
124 --- a/pym/portage/dbapi/vartree.py
125 +++ b/pym/portage/dbapi/vartree.py
126 @@ -15,7 +15,7 @@ portage.proxy.lazyimport.lazyimport(globals(),
127 'use_reduce,_slot_re',
128 'portage.elog:collect_ebuild_messages,collect_messages,' + \
129 'elog_process,_merge_logentries',
130 - 'portage.locks:lockdir,unlockdir',
131 + 'portage.locks:lockdir,unlockdir,lockfile,unlockfile',
132 'portage.output:bold,colorize',
133 'portage.package.ebuild.doebuild:doebuild_environment,' + \
134 '_spawn_phase',
135 @@ -1228,6 +1228,7 @@ class dblink(object):
136 self.dbdir = self.dbpkgdir
137
138 self._lock_vdb = None
139 + self._lock_vdb_count = 0
140
141 self.settings = mysettings
142 self._verbose = self.settings.get("PORTAGE_VERBOSE") == "1"
143 @@ -1268,25 +1269,19 @@ class dblink(object):
144 self._get_protect_obj().updateprotect()
145
146 def lockdb(self):
147 - if self._lock_vdb:
148 - raise AssertionError("Lock already held.")
149 - # At least the parent needs to exist for the lock file.
150 - ensure_dirs(self.dbroot)
151 - if self._scheduler is None:
152 - self._lock_vdb = lockdir(self.dbroot)
153 + if self._lock_vdb_count:
154 + self._lock_vdb_count += 1
155 else:
156 - async_lock = AsynchronousLock(path=self.dbroot,
157 - scheduler=self._scheduler)
158 - async_lock.start()
159 - async_lock.wait()
160 - self._lock_vdb = async_lock
161 + # At least the parent needs to exist for the lock file.
162 + ensure_dirs(self.dbroot)
163 + self._lock_vdb = lockdir(self.dbroot)
164
165 def unlockdb(self):
166 - if self._lock_vdb is not None:
167 - if isinstance(self._lock_vdb, AsynchronousLock):
168 - self._lock_vdb.unlock()
169 - else:
170 - unlockdir(self._lock_vdb)
171 + if self._lock_vdb_count > 1:
172 + self._lock_vdb_count -= 1
173 + else:
174 + self._lock_vdb_count = 0
175 + unlockdir(self._lock_vdb)
176 self._lock_vdb = None
177
178 def getpath(self):
179 @@ -1322,8 +1317,12 @@ class dblink(object):
180 """
181 For a given db entry (self), erase the CONTENTS values.
182 """
183 - if os.path.exists(self.dbdir+"/CONTENTS"):
184 - os.unlink(self.dbdir+"/CONTENTS")
185 + self.lockdb()
186 + try:
187 + if os.path.exists(self.dbdir+"/CONTENTS"):
188 + os.unlink(self.dbdir+"/CONTENTS")
189 + finally:
190 + self.unlockdb()
191
192 def _clear_contents_cache(self):
193 self.contentscache = None
194 @@ -1506,10 +1505,6 @@ class dblink(object):
195 @returns:
196 1. os.EX_OK if everything went well.
197 2. return code of the failed phase (for prerm, postrm, cleanrm)
198 -
199 - Notes:
200 - The caller must ensure that lockdb() and unlockdb() are called
201 - before and after this method.
202 """
203
204 if trimworld is not None:
205 @@ -1617,7 +1612,12 @@ class dblink(object):
206 showMessage(_("!!! FAILED prerm: %s\n") % retval,
207 level=logging.ERROR, noiselevel=-1)
208
209 - self._unmerge_pkgfiles(pkgfiles, others_in_slot)
210 + conf_mem_file = os.path.join(self._eroot, CONFIG_MEMORY_FILE)
211 + conf_mem_lock = lockfile(conf_mem_file)
212 + try:
213 + self._unmerge_pkgfiles(pkgfiles, others_in_slot, conf_mem_file)
214 + finally:
215 + unlockfile(conf_mem_lock)
216 self._clear_contents_cache()
217
218 if myebuildpath:
219 @@ -1728,10 +1728,18 @@ class dblink(object):
220 else:
221 self.settings.pop("PORTAGE_LOG_FILE", None)
222
223 - env_update(target_root=self.settings['ROOT'],
224 - prev_mtimes=ldpath_mtimes,
225 - contents=contents, env=self.settings.environ(),
226 - writemsg_level=self._display_merge)
227 + # Lock the config memory file to prevent symlink creation
228 + # in merge_contents from overlapping with env-update.
229 + conf_mem_file = os.path.join(self._eroot, CONFIG_MEMORY_FILE)
230 + conf_mem_lock = lockfile(conf_mem_file)
231 + try:
232 + env_update(target_root=self.settings['ROOT'],
233 + prev_mtimes=ldpath_mtimes,
234 + contents=contents, env=self.settings.environ(),
235 + writemsg_level=self._display_merge)
236 + finally:
237 + unlockfile(conf_mem_lock)
238 +
239 return os.EX_OK
240
241 def _display_merge(self, msg, level=0, noiselevel=0):
242 @@ -1753,7 +1761,7 @@ class dblink(object):
243 log_path=log_path, background=background,
244 level=level, noiselevel=noiselevel)
245
246 - def _unmerge_pkgfiles(self, pkgfiles, others_in_slot):
247 + def _unmerge_pkgfiles(self, pkgfiles, others_in_slot, conf_mem_file):
248 """
249
250 Unmerges the contents of a package from the liveFS
251 @@ -1789,7 +1797,6 @@ class dblink(object):
252 dest_root = self._eroot
253 dest_root_len = len(dest_root) - 1
254
255 - conf_mem_file = os.path.join(dest_root, CONFIG_MEMORY_FILE)
256 cfgfiledict = grabdict(conf_mem_file)
257 stale_confmem = []
258
259 @@ -3167,8 +3174,12 @@ class dblink(object):
260 # get_owners is slow for large numbers of files, so
261 # don't look them all up.
262 collisions = collisions[:20]
263 - owners = self.vartree.dbapi._owners.get_owners(collisions)
264 - self.vartree.dbapi.flush_cache()
265 + self.lockdb()
266 + try:
267 + owners = self.vartree.dbapi._owners.get_owners(collisions)
268 + self.vartree.dbapi.flush_cache()
269 + finally:
270 + self.unlockdb()
271
272 for pkg, owned_files in owners.items():
273 cpv = pkg.mycpv
274 @@ -3247,25 +3258,29 @@ class dblink(object):
275
276 #if we have a file containing previously-merged config file md5sums, grab it.
277 conf_mem_file = os.path.join(self._eroot, CONFIG_MEMORY_FILE)
278 - cfgfiledict = grabdict(conf_mem_file)
279 - if "NOCONFMEM" in self.settings:
280 - cfgfiledict["IGNORE"]=1
281 - else:
282 - cfgfiledict["IGNORE"]=0
283 -
284 - # Always behave like --noconfmem is enabled for downgrades
285 - # so that people who don't know about this option are less
286 - # likely to get confused when doing upgrade/downgrade cycles.
287 - pv_split = catpkgsplit(self.mycpv)[1:]
288 - for other in others_in_slot:
289 - if pkgcmp(pv_split, catpkgsplit(other.mycpv)[1:]) < 0:
290 - cfgfiledict["IGNORE"] = 1
291 - break
292 + conf_mem_lock = lockfile(conf_mem_file)
293 + try:
294 + cfgfiledict = grabdict(conf_mem_file)
295 + if "NOCONFMEM" in self.settings:
296 + cfgfiledict["IGNORE"]=1
297 + else:
298 + cfgfiledict["IGNORE"]=0
299 +
300 + # Always behave like --noconfmem is enabled for downgrades
301 + # so that people who don't know about this option are less
302 + # likely to get confused when doing upgrade/downgrade cycles.
303 + pv_split = catpkgsplit(self.mycpv)[1:]
304 + for other in others_in_slot:
305 + if pkgcmp(pv_split, catpkgsplit(other.mycpv)[1:]) < 0:
306 + cfgfiledict["IGNORE"] = 1
307 + break
308
309 - rval = self._merge_contents(srcroot, destroot, cfgfiledict,
310 - conf_mem_file)
311 - if rval != os.EX_OK:
312 - return rval
313 + rval = self._merge_contents(srcroot, destroot, cfgfiledict,
314 + conf_mem_file)
315 + if rval != os.EX_OK:
316 + return rval
317 + finally:
318 + unlockfile(conf_mem_lock)
319
320 # These caches are populated during collision-protect and the data
321 # they contain is now invalid. It's very important to invalidate
322 @@ -3337,8 +3352,12 @@ class dblink(object):
323 else:
324 emerge_log(_(" !!! unmerge FAILURE: %s") % (dblnk.mycpv,))
325
326 - # TODO: Check status and abort if necessary.
327 - dblnk.delete()
328 + self.lockdb()
329 + try:
330 + # TODO: Check status and abort if necessary.
331 + dblnk.delete()
332 + finally:
333 + self.unlockdb()
334 showMessage(_(">>> Original instance of package unmerged safely.\n"))
335
336 if len(others_in_slot) > 1:
337 @@ -3349,8 +3368,12 @@ class dblink(object):
338
339 # We hold both directory locks.
340 self.dbdir = self.dbpkgdir
341 - self.delete()
342 - _movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
343 + self.lockdb()
344 + try:
345 + self.delete()
346 + _movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
347 + finally:
348 + self.unlockdb()
349
350 # Check for file collisions with blocking packages
351 # and remove any colliding files from their CONTENTS
352 @@ -3358,9 +3381,13 @@ class dblink(object):
353 self._clear_contents_cache()
354 contents = self.getcontents()
355 destroot_len = len(destroot) - 1
356 - for blocker in blockers:
357 - self.vartree.dbapi.removeFromContents(blocker, iter(contents),
358 - relative_paths=False)
359 + self.lockdb()
360 + try:
361 + for blocker in blockers:
362 + self.vartree.dbapi.removeFromContents(blocker, iter(contents),
363 + relative_paths=False)
364 + finally:
365 + self.lockdb()
366
367 plib_registry = self.vartree.dbapi._plib_registry
368 if plib_registry:
369 @@ -3423,11 +3450,18 @@ class dblink(object):
370 if pkgcmp(catpkgsplit(self.pkg)[1:], catpkgsplit(v)[1:]) < 0:
371 downgrade = True
372
373 - #update environment settings, library paths. DO NOT change symlinks.
374 - env_update(makelinks=(not downgrade),
375 - target_root=self.settings['ROOT'], prev_mtimes=prev_mtimes,
376 - contents=contents, env=self.settings.environ(),
377 - writemsg_level=self._display_merge)
378 + # Lock the config memory file to prevent symlink creation
379 + # in merge_contents from overlapping with env-update.
380 + conf_mem_file = os.path.join(self._eroot, CONFIG_MEMORY_FILE)
381 + conf_mem_lock = lockfile(conf_mem_file)
382 + try:
383 + #update environment settings, library paths. DO NOT change symlinks.
384 + env_update(makelinks=(not downgrade),
385 + target_root=self.settings['ROOT'], prev_mtimes=prev_mtimes,
386 + contents=contents, env=self.settings.environ(),
387 + writemsg_level=self._display_merge)
388 + finally:
389 + unlockfile(conf_mem_lock)
390
391 # For gcc upgrades, preserved libs have to be removed after the
392 # the library path has been updated.
393 @@ -3850,7 +3884,6 @@ class dblink(object):
394 """
395 myroot = None
396 retval = -1
397 - self.lockdb()
398 self.vartree.dbapi._bump_mtime(self.mycpv)
399 try:
400 retval = self.treewalk(mergeroot, myroot, inforoot, myebuild,
401 @@ -3902,7 +3935,6 @@ class dblink(object):
402 pass
403 else:
404 self.vartree.dbapi._linkmap._clear_cache()
405 - self.unlockdb()
406 self.vartree.dbapi._bump_mtime(self.mycpv)
407 return retval
408
409 @@ -4005,11 +4037,14 @@ def unmerge(cat, pkg, myroot=None, settings=None,
410 vartree=vartree, scheduler=scheduler)
411 vartree = mylink.vartree
412 try:
413 - mylink.lockdb()
414 if mylink.exists():
415 retval = mylink.unmerge(ldpath_mtimes=ldpath_mtimes)
416 if retval == os.EX_OK:
417 - mylink.delete()
418 + self.lockdb()
419 + try:
420 + mylink.delete()
421 + finally:
422 + self.unlockdb()
423 return retval
424 return os.EX_OK
425 finally:
426 @@ -4018,7 +4053,6 @@ def unmerge(cat, pkg, myroot=None, settings=None,
427 pass
428 else:
429 vartree.dbapi._linkmap._clear_cache()
430 - mylink.unlockdb()
431
432 def write_contents(contents, root, f):
433 """
434
435 diff --git a/pym/portage/util/_dyn_libs/LinkageMapELF.py b/pym/portage/util/_dyn_libs/LinkageMapELF.py
436 index 9e79bd8..ce77bb4 100644
437 --- a/pym/portage/util/_dyn_libs/LinkageMapELF.py
438 +++ b/pym/portage/util/_dyn_libs/LinkageMapELF.py
439 @@ -13,6 +13,8 @@ from portage import _unicode_encode
440 from portage.cache.mappings import slot_dict_class
441 from portage.exception import CommandNotFound
442 from portage.localization import _
443 +from portage.locks import lockdir
444 +from portage.locks import unlockdir
445 from portage.util import getlibpaths
446 from portage.util import grabfile
447 from portage.util import normalize_path
448 @@ -181,15 +183,18 @@ class LinkageMapELF(object):
449 lines.append((include_file, line))
450
451 aux_keys = [self._needed_aux_key]
452 - for cpv in self._dbapi.cpv_all():
453 - if exclude_pkgs is not None and cpv in exclude_pkgs:
454 - continue
455 - needed_file = self._dbapi.getpath(cpv,
456 - filename=self._needed_aux_key)
457 - for line in self._dbapi.aux_get(cpv, aux_keys)[0].splitlines():
458 - lines.append((needed_file, line))
459 - # Cache NEEDED.* files avoid doing excessive IO for every rebuild.
460 - self._dbapi.flush_cache()
461 + vdb_path = os.path.join(self._root, portage.VDB_PATH)
462 + vdb_lock = lockdir(vdb_path)
463 + try:
464 + for cpv in self._dbapi.cpv_all():
465 + if exclude_pkgs is not None and cpv in exclude_pkgs:
466 + continue
467 + needed_file = self._dbapi.getpath(cpv,
468 + filename=self._needed_aux_key)
469 + for line in self._dbapi.aux_get(cpv, aux_keys)[0].splitlines():
470 + lines.append((needed_file, line))
471 + finally:
472 + unlockdir(vdb_lock)
473
474 # have to call scanelf for preserved libs here as they aren't
475 # registered in NEEDED.ELF.2 files