Gentoo Archives: gentoo-commits

From: "Jorge Manuel B. S. Vicetto" <jmbsvicetto@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/releng:master commit in: scripts/
Date: Mon, 09 Jul 2012 17:49:54
Message-Id: 1341856124.498f935c119b4b8093b2abeb1f29916aab67232f.jmbsvicetto@gentoo
1 commit: 498f935c119b4b8093b2abeb1f29916aab67232f
2 Author: Jorge Manuel B. S. Vicetto (jmbsvicetto) <jmbsvicetto <AT> gentoo <DOT> org>
3 AuthorDate: Mon Jul 9 17:47:42 2012 +0000
4 Commit: Jorge Manuel B. S. Vicetto <jmbsvicetto <AT> gentoo <DOT> org>
5 CommitDate: Mon Jul 9 17:48:44 2012 +0000
6 URL: http://git.overlays.gentoo.org/gitweb/?p=proj/releng.git;a=commit;h=498f935c
7
8 Add releng scripts to the repository.
9
10 ---
11 scripts/backup_snapshot_repo | 10 +
12 scripts/cache-tools.py | 700 ++++++++++++++++++++++++++++++++++++++++++
13 scripts/copy_buildsync.sh | 127 ++++++++
14 scripts/run_catalyst | 2 +
15 scripts/run_official | 39 +++
16 scripts/run_snapshot | 2 +
17 scripts/stage_build.sh | 162 ++++++++++
18 scripts/sudo_catalyst | 28 ++
19 scripts/sudo_official | 46 +++
20 scripts/sudo_snapshot | 20 ++
21 scripts/update_auto_tree | 2 +
22 scripts/update_official_tree | 2 +
23 scripts/update_snapshot_tree | 2 +
24 13 files changed, 1142 insertions(+), 0 deletions(-)
25
26 diff --git a/scripts/backup_snapshot_repo b/scripts/backup_snapshot_repo
27 new file mode 100755
28 index 0000000..94b2828
29 --- /dev/null
30 +++ b/scripts/backup_snapshot_repo
31 @@ -0,0 +1,10 @@
32 +#!/bin/bash
33 +
34 +# Start our rsyncs
35 +RSYNC_OPTS="--archive --delete --sparse --whole-file"
36 +
37 +if [ -e /release/repos/snapshot-tree/hooks/post-commit ]
38 +then
39 + echo "$(date): Starting rsync of trees from tmpfs to disk..." >> /var/log/snapshot-tree-backup.log
40 + rsync ${RSYNC_OPTS} /release/repos/snapshot-tree/* /release/repos/snapshot-tree-disk 2>&1 >> /var/log/snapshot-tree-backup.log || echo "$(date): rsync failed!" >> /var/log/snapshot-tree-backup.log
41 +fi
42
43 diff --git a/scripts/cache-tools.py b/scripts/cache-tools.py
44 new file mode 100755
45 index 0000000..0364450
46 --- /dev/null
47 +++ b/scripts/cache-tools.py
48 @@ -0,0 +1,700 @@
49 +#!/usr/bin/env python
50 +# Copyright 1999-2006 Gentoo Foundation
51 +# Distributed under the terms of the GNU General Public License v2
52 +# $Header: $
53 +#
54 +# Zac Medico <zmedico@g.o>
55 +#
56 +
57 +import errno, fpformat, os, sys, time
58 +
59 +if not hasattr(__builtins__, "set"):
60 + from sets import Set as set
61 +from itertools import chain
62 +
63 +def create_syncronized_func(myfunc, mylock):
64 + def newfunc(*pargs, **kwargs):
65 + mylock.acquire()
66 + try:
67 + myfunc(*pargs, **kwargs)
68 + finally:
69 + mylock.release()
70 + return myfunc
71 +
72 +class ConsoleUpdate(object):
73 +
74 + _synchronized_methods = ["append", "carriageReturn",
75 + "newLine", "reset", "update"]
76 +
77 + def __init__(self):
78 + self.offset = 0
79 + import sys
80 + self.stream = sys.stdout
81 + self.quiet = False
82 + import threading
83 + self._lock = threading.RLock()
84 + for method_name in self._synchronized_methods:
85 + myfunc = create_syncronized_func(
86 + getattr(self, method_name), self._lock)
87 + setattr(self, method_name, myfunc)
88 + # ANSI code that clears from the cursor to the end of the line
89 + self._CLEAR_EOL = None
90 + try:
91 + import curses
92 + try:
93 + curses.setupterm()
94 + self._CLEAR_EOL = curses.tigetstr('el')
95 + except curses.error:
96 + pass
97 + except ImportError:
98 + pass
99 + if not self._CLEAR_EOL:
100 + self._CLEAR_EOL = '\x1b[K'
101 +
102 + def acquire(self, **kwargs):
103 + return self._lock.acquire(**kwargs)
104 +
105 + def release(self):
106 + self._lock.release()
107 +
108 + def reset(self):
109 + self.offset = 0
110 +
111 + def carriageReturn(self):
112 + if not self.quiet:
113 + self.stream.write("\r")
114 + self.stream.write(self._CLEAR_EOL)
115 + self.offset = 0
116 +
117 + def newLine(self):
118 + if not self.quiet:
119 + self.stream.write("\n")
120 + self.stream.flush()
121 + self.reset()
122 +
123 + def update(self, msg):
124 + if not self.quiet:
125 + self.carriageReturn()
126 + self.append(msg)
127 +
128 + def append(self, msg):
129 + if not self.quiet:
130 + self.offset += len(msg)
131 + self.stream.write(msg)
132 + self.stream.flush()
133 +
134 +class ProgressCounter(object):
135 + def __init__(self):
136 + self.total = 0
137 + self.current = 0
138 +
139 +class ProgressAnalyzer(ProgressCounter):
140 + def __init__(self):
141 + self.start_time = time.time()
142 + self.currentTime = self.start_time
143 + self._samples = []
144 + self.sampleCount = 20
145 + def percentage(self, digs=0):
146 + if self.total > 0:
147 + float_percent = 100 * float(self.current) / float(self.total)
148 + else:
149 + float_percent = 0.0
150 + return fpformat.fix(float_percent,digs)
151 + def totalTime(self):
152 + self._samples.append((self.currentTime, self.current))
153 + while len(self._samples) > self.sampleCount:
154 + self._samples.pop(0)
155 + prev_time, prev_count = self._samples[0]
156 + time_delta = self.currentTime - prev_time
157 + if time_delta > 0:
158 + rate = (self.current - prev_count) / time_delta
159 + if rate > 0:
160 + return self.total / rate
161 + return 0
162 + def remaining_time(self):
163 + return self.totalTime() - self.elapsed_time()
164 + def elapsed_time(self):
165 + return self.currentTime - self.start_time
166 +
167 +class ConsoleProgress(object):
168 + def __init__(self, name="Progress", console=None):
169 + self.name = name
170 + self.analyzer = ProgressAnalyzer()
171 + if console is None:
172 + self.console = ConsoleUpdate()
173 + else:
174 + self.console = console
175 + self.time_format="%H:%M:%S"
176 + self.quiet = False
177 + self.lastUpdate = 0
178 + self.latency = 0.5
179 +
180 + def formatTime(self, t):
181 + return time.strftime(self.time_format, time.gmtime(t))
182 +
183 + def displayProgress(self, current, total):
184 + if self.quiet:
185 + return
186 +
187 + self.analyzer.currentTime = time.time()
188 + if self.analyzer.currentTime - self.lastUpdate < self.latency:
189 + return
190 + self.lastUpdate = self.analyzer.currentTime
191 + self.analyzer.current = current
192 + self.analyzer.total = total
193 +
194 + output = ((self.name, self.analyzer.percentage(1).rjust(4) + "%"),
195 + ("Elapsed", self.formatTime(self.analyzer.elapsed_time())),
196 + ("Remaining", self.formatTime(self.analyzer.remaining_time())),
197 + ("Total", self.formatTime(self.analyzer.totalTime())))
198 + self.console.update(" ".join([ x[0] + ": " + x[1] for x in output ]))
199 +
200 +class ProgressHandler(object):
201 + def __init__(self):
202 + self.curval = 0
203 + self.maxval = 0
204 + self.last_update = 0
205 + self.min_display_latency = 0.2
206 +
207 + def onProgress(self, maxval, curval):
208 + self.maxval = maxval
209 + self.curval = curval
210 + cur_time = time.time()
211 + if cur_time - self.last_update >= self.min_display_latency:
212 + self.last_update = cur_time
213 + self.display()
214 +
215 + def display(self):
216 + raise NotImplementedError(self)
217 +
218 +def open_file(filename=None):
219 + if filename is None:
220 + f = sys.stderr
221 + elif filename == "-":
222 + f = sys.stdout
223 + else:
224 + try:
225 + filename = os.path.expanduser(filename)
226 + f = open(filename, "a")
227 + except (IOError, OSError), e:
228 + sys.stderr.write("%s\n" % e)
229 + sys.exit(e.errno)
230 + return f
231 +
232 +def create_log(name="", logfile=None, loglevel=0):
233 + import logging
234 + log = logging.getLogger(name)
235 + log.setLevel(loglevel)
236 + handler = logging.StreamHandler(open_file(logfile))
237 + handler.setFormatter(logging.Formatter("%(levelname)s %(message)s"))
238 + log.addHandler(handler)
239 + return log
240 +
241 +def is_interrupt(e):
242 + if isinstance(e, (SystemExit, KeyboardInterrupt)):
243 + return True
244 + return hasattr(e, "errno") and e.errno == errno.EINTR
245 +
246 +def mirror_cache(valid_nodes_iterable, src_cache, trg_cache, log,
247 + eclass_cache, cleanse_on_transfer_failure):
248 +
249 + cleanse_candidates = set(trg_cache.iterkeys())
250 + update_count = 0
251 +
252 + # Since the loop below is mission critical, we continue after *any*
253 + # exception that is not an interrupt.
254 +
255 + for x in valid_nodes_iterable:
256 + log.debug("%s mirroring" % x)
257 + if not cleanse_on_transfer_failure:
258 + cleanse_candidates.discard(x)
259 +
260 + try:
261 + entry = copy_dict(src_cache[x])
262 + except KeyError, e:
263 + log.error("%s missing source: %s" % (x, str(e)))
264 + del e
265 + continue
266 + except Exception, e:
267 + if is_interrupt(e):
268 + raise
269 + log.error("%s reading source: %s" % (x, str(e)))
270 + del e
271 + continue
272 +
273 + write_it = True
274 + trg = None
275 +
276 + try:
277 + trg = copy_dict(trg_cache[x])
278 + if long(trg["_mtime_"]) == long(entry["_mtime_"]) and \
279 + eclass_cache.is_eclass_data_valid(trg["_eclasses_"]) and \
280 + set(trg["_eclasses_"]) == set(entry["_eclasses_"]):
281 + write_it = False
282 + except KeyError:
283 + pass
284 + except Exception, e:
285 + if is_interrupt(e):
286 + raise
287 + log.error("%s reading target: %s" % (x, str(e)))
288 + del e
289 +
290 + if trg and not write_it:
291 + """ We don't want to skip the write unless we're really sure that
292 + the existing cache is identical, so don't trust _mtime_ and
293 + _eclasses_ alone."""
294 + for d in (entry, trg):
295 + if "EAPI" in d and d["EAPI"] in ("", "0"):
296 + del d["EAPI"]
297 + for k in set(chain(entry, trg)).difference(
298 + ("_mtime_", "_eclasses_")):
299 + if trg.get(k, "") != entry.get(k, ""):
300 + write_it = True
301 + break
302 +
303 + if write_it:
304 + update_count += 1
305 + log.info("%s transferring" % x)
306 + inherited = entry.get("INHERITED", None)
307 + if inherited:
308 + if src_cache.complete_eclass_entries:
309 + if not "_eclasses_" in entry:
310 + log.error("%s missing _eclasses_" % x)
311 + continue
312 + if not eclass_cache.is_eclass_data_valid(entry["_eclasses_"]):
313 + log.error("%s stale _eclasses_" % x)
314 + continue
315 + else:
316 + entry["_eclasses_"] = eclass_cache.get_eclass_data(entry["INHERITED"].split(), \
317 + from_master_only=True)
318 + if not entry["_eclasses_"]:
319 + log.error("%s stale _eclasses_" % x)
320 + continue
321 + try:
322 + trg_cache[x] = entry
323 + cleanse_candidates.discard(x)
324 + except Exception, e:
325 + if is_interrupt(e):
326 + raise
327 + log.error("%s writing target: %s" % (x, str(e)))
328 + del e
329 + else:
330 + cleanse_candidates.discard(x)
331 +
332 + if not trg_cache.autocommits:
333 + try:
334 + trg_cache.commit()
335 + except Exception, e:
336 + if is_interrupt(e):
337 + raise
338 + log.error("committing target: %s" % str(e))
339 + del e
340 +
341 + return update_count, cleanse_candidates
342 +
343 +def copy_dict(src, dest=None):
344 + """Some cache implementations throw cache errors when accessing the values.
345 + We grab all the values at once here so that we don't have to be concerned
346 + about exceptions later."""
347 + if dest is None:
348 + dest = {}
349 + for k, v in src.iteritems():
350 + dest[k] = v
351 + return dest
352 +
353 +class ListPackages(object):
354 + def __init__(self, portdb, log, shuffle=False):
355 + self._portdb = portdb
356 + self._log = log
357 + self._shuffle = shuffle
358 +
359 + def run(self):
360 + log = self._log
361 + cp_list = self._portdb.cp_list
362 + cp_all = self._portdb.cp_all()
363 + if self._shuffle:
364 + from random import shuffle
365 + shuffle(cp_all)
366 + else:
367 + cp_all.sort()
368 + cpv_all = []
369 + # Since the loop below is mission critical, we continue after *any*
370 + # exception that is not an interrupt.
371 + for cp in cp_all:
372 + log.debug("%s cp_list" % cp)
373 + try:
374 + cpv_all.extend(cp_list(cp))
375 + except Exception, e:
376 + if is_interrupt(e):
377 + raise
378 + self._log.error("%s cp_list: %s" % (cp, str(e)))
379 +
380 + self.cpv_all = cpv_all
381 +
382 +class MetadataGenerate(object):
383 + """When cache generation fails for some reason, cleanse the stale cache
384 + entry if it exists. This prevents the master mirror from distributing
385 + stale cache, and will allow clients to safely assume that all cache is
386 + valid. The mtime requirement is especially annoying due to bug #139134
387 + (timestamps of cache entries don't change when an eclass changes) and the
388 + interaction of timestamps with rsync."""
389 + def __init__(self, portdb, cpv_all, log):
390 + self._portdb = portdb
391 + self._cpv_all = cpv_all
392 + self._log = log
393 +
394 + def run(self, onProgress=None):
395 + log = self._log
396 + portdb = self._portdb
397 + cpv_all = self._cpv_all
398 + auxdb = portdb.auxdb[portdb.porttree_root]
399 + cleanse_candidates = set(auxdb.iterkeys())
400 +
401 + # Since the loop below is mission critical, we continue after *any*
402 + # exception that is not an interrupt.
403 + maxval = len(cpv_all)
404 + curval = 0
405 + if onProgress:
406 + onProgress(maxval, curval)
407 + while cpv_all:
408 + cpv = cpv_all.pop(0)
409 + log.debug("%s generating" % cpv)
410 + try:
411 + portdb.aux_get(cpv, ["EAPI"])
412 + # Cleanse if the above doesn't succeed (prevent clients from
413 + # receiving stale cache, and let them assume it is valid).
414 + cleanse_candidates.discard(cpv)
415 + except Exception, e:
416 + if is_interrupt(e):
417 + raise
418 + log.error("%s generating: %s" % (cpv, str(e)))
419 + del e
420 + curval += 1
421 + if onProgress:
422 + onProgress(maxval, curval)
423 +
424 + self.target_cache = auxdb
425 + self.dead_nodes = cleanse_candidates
426 +
427 +class MetadataTransfer(object):
428 + def __init__(self, portdb, cpv_all, forward, cleanse_on_transfer_failure,
429 + log):
430 + self._portdb = portdb
431 + self._cpv_all = cpv_all
432 + self._log = log
433 + self._forward = forward
434 + self._cleanse_on_transfer_failure = cleanse_on_transfer_failure
435 +
436 + def run(self, onProgress=None):
437 + log = self._log
438 + portdb = self._portdb
439 + cpv_all = self._cpv_all
440 + aux_cache = portdb.auxdb[portdb.porttree_root]
441 + import portage
442 + auxdbkeys = portage.auxdbkeys[:]
443 + metadbmodule = portdb.mysettings.load_best_module("portdbapi.metadbmodule")
444 + portdir_cache = metadbmodule(portdb.porttree_root, "metadata/cache",
445 + auxdbkeys)
446 +
447 + maxval = len(cpv_all)
448 + curval = 0
449 + if onProgress:
450 + onProgress(maxval, curval)
451 + class pkg_iter(object):
452 + def __init__(self, pkg_list, onProgress=None):
453 + self.pkg_list = pkg_list
454 + self.maxval = len(pkg_list)
455 + self.curval = 0
456 + self.onProgress = onProgress
457 + def __iter__(self):
458 + while self.pkg_list:
459 + yield self.pkg_list.pop()
460 + self.curval += 1
461 + if self.onProgress:
462 + self.onProgress(self.maxval, self.curval)
463 +
464 + if self._forward:
465 + src_cache = portdir_cache
466 + trg_cache = aux_cache
467 + else:
468 + src_cache = aux_cache
469 + trg_cache = portdir_cache
470 +
471 + """ This encapsulates validation of eclass timestamps and also fills in
472 + missing data (mtimes and/or paths) as necessary for the given cache
473 + format."""
474 + eclass_cache = portage.eclass_cache.cache(portdb.porttree_root)
475 +
476 + if not trg_cache.autocommits:
477 + trg_cache.sync(100)
478 +
479 + self.target_cache = trg_cache
480 + self.update_count, self.dead_nodes = mirror_cache(
481 + pkg_iter(cpv_all, onProgress=onProgress),
482 + src_cache, trg_cache, log, eclass_cache,
483 + self._cleanse_on_transfer_failure)
484 +
485 +class CacheCleanse(object):
486 + def __init__(self, auxdb, dead_nodes, log):
487 + self._auxdb = auxdb
488 + self._dead_nodes = dead_nodes
489 + self._log = log
490 + def run(self):
491 + auxdb = self._auxdb
492 + log = self._log
493 + for cpv in self._dead_nodes:
494 + try:
495 + log.info("%s cleansing" % cpv)
496 + del auxdb[cpv]
497 + except Exception, e:
498 + if is_interrupt(e):
499 + raise
500 + log.error("%s cleansing: %s" % (cpv, str(e)))
501 + del e
502 +
503 +def import_portage():
504 + try:
505 + from portage import data as portage_data
506 + except ImportError:
507 + import portage_data
508 + # If we're not already root or in the portage group, we make the gid of the
509 + # current process become portage_gid.
510 + if os.getgid() != 0 and portage_data.portage_gid not in os.getgroups():
511 + portage_data.portage_gid = os.getgid()
512 + portage_data.secpass = 1
513 +
514 + os.environ["PORTAGE_LEGACY_GLOBALS"] = "false"
515 + import portage
516 + del os.environ["PORTAGE_LEGACY_GLOBALS"]
517 + return portage
518 +
519 +def create_portdb(portdir=None, cachedir=None, config_root=None,
520 + target_root=None, profile=None, **kwargs):
521 +
522 + if cachedir is not None:
523 + os.environ["PORTAGE_DEPCACHEDIR"] = cachedir
524 + if config_root is None:
525 + config_root = os.environ.get("PORTAGE_CONFIGROOT", "/")
526 + if target_root is None:
527 + target_root = os.environ.get("ROOT", "/")
528 + if profile is None:
529 + profile = ""
530 +
531 + portage = import_portage()
532 + try:
533 + from portage import const as portage_const
534 + except ImportError:
535 + import portage_const
536 +
537 + # Disable overlays because we only generate metadata for the main repo.
538 + os.environ["PORTDIR_OVERLAY"] = ""
539 + conf = portage.config(config_profile_path=profile,
540 + config_incrementals=portage_const.INCREMENTALS,
541 + target_root=target_root,
542 + config_root=config_root)
543 +
544 + if portdir is None:
545 + portdir = conf["PORTDIR"]
546 +
547 + # The cannonical path is the key for portdb.auxdb.
548 + portdir = os.path.realpath(portdir)
549 + conf["PORTDIR"] = portdir
550 + conf.backup_changes("PORTDIR")
551 +
552 + portdb = portage.portdbapi(portdir,
553 + mysettings=conf)
554 +
555 + return portdb
556 +
557 +def parse_args(myargv):
558 + description = "This program will ensure that the metadata cache is up to date for entire portage tree."
559 + usage = "usage: cache-tools [options] --generate || --transfer"
560 + from optparse import OptionParser
561 + parser = OptionParser(description=description, usage=usage)
562 + parser.add_option("--portdir",
563 + help="location of the portage tree",
564 + dest="portdir")
565 + parser.add_option("--cachedir",
566 + help="location of the metadata cache",
567 + dest="cachedir")
568 + parser.add_option("--profile",
569 + help="location of the profile",
570 + dest="profile")
571 + parser.add_option("--generate",
572 + help="generate metadata as necessary to ensure that the cache is fully populated",
573 + action="store_true", dest="generate", default=False)
574 + parser.add_option("--shuffle",
575 + help="generate cache in random rather than sorted order (useful to prevent two separate instances from competing to generate metadata for the same packages simultaneously)",
576 + action="store_true", dest="shuffle", default=False)
577 + parser.add_option("--transfer",
578 + help="transfer metadata from portdir to cachedir or vice versa",
579 + action="store_true", dest="transfer", default=False)
580 + parser.add_option("--cleanse-on-transfer-failure",
581 + help="cleanse target cache when transfer fails for any reason (such as the source being unavailable)",
582 + action="store_true", dest="cleanse_on_transfer_failure", default=False)
583 + parser.add_option("--forward",
584 + help="forward metadata transfer flows from portdir to cachedir (default)",
585 + action="store_true", dest="forward", default=True)
586 + parser.add_option("--reverse",
587 + help="reverse metadata transfer flows from cachedir to portdir",
588 + action="store_false", dest="forward", default=True)
589 + parser.add_option("--logfile",
590 + help="send status messages to a file (default is stderr)",
591 + dest="logfile", default=None)
592 + parser.add_option("--loglevel",
593 + help="numeric log level (defauls to 0 and may range from 0 to 50 corresponding to the default levels of the python logging module)",
594 + dest="loglevel", default="0")
595 + parser.add_option("--reportfile",
596 + help="send a report to a file",
597 + dest="reportfile", default=None)
598 + parser.add_option("--spawn-outfile",
599 + help="redirect ouput of spawned processes to a file instead of stdout/stderr",
600 + dest="spawn_outfile", default=None)
601 + parser.add_option("--no-progress",
602 + action="store_false", dest="progress", default=True,
603 + help="disable progress output to tty")
604 + options, args = parser.parse_args(args=myargv)
605 +
606 + # Conversion to dict allows us to use **opts as function args later on.
607 + opts = {}
608 + all_options = ("portdir", "cachedir", "profile", "progress", "logfile",
609 + "loglevel", "generate", "transfer", "forward", "shuffle",
610 + "spawn_outfile", "reportfile", "cleanse_on_transfer_failure")
611 + for opt_name in all_options:
612 + v = getattr(options, opt_name)
613 + opts[opt_name] = v
614 + return opts
615 +
616 +def run_command(args):
617 + opts = parse_args(sys.argv[1:])
618 +
619 + if opts["spawn_outfile"]:
620 + fd = os.dup(1)
621 + sys.stdout = os.fdopen(fd, 'w')
622 + fd = os.dup(2)
623 + sys.stderr = os.fdopen(fd, 'w')
624 + f = open_file(opts["spawn_outfile"])
625 + os.dup2(f.fileno(), 1)
626 + os.dup2(f.fileno(), 2)
627 + del fd, f
628 +
629 + console = ConsoleUpdate()
630 + if not opts["progress"] or not sys.stdout.isatty():
631 + console.quiet = True
632 + job = None
633 + import signal, thread, threading
634 + shutdown_initiated = threading.Event()
635 + shutdown_complete = threading.Event()
636 + def shutdown_console():
637 + console.acquire()
638 + try:
639 + console.update("Interrupted.")
640 + console.newLine()
641 + console.quiet = True
642 + shutdown_complete.set()
643 + # Kill the main thread if necessary.
644 + # This causes the SIGINT signal handler to be invoked in the
645 + # main thread. The signal handler needs to be an actual
646 + # callable object (rather than something like signal.SIG_DFL)
647 + # in order to avoid TypeError: 'int' object is not callable.
648 + thread.interrupt_main()
649 + thread.exit()
650 + finally:
651 + console.release()
652 +
653 + def handle_interrupt(*args):
654 + if shutdown_complete.isSet():
655 + sys.exit(1)
656 + # Lock the console from a new thread so that the main thread is allowed
657 + # to cleanly complete any console interaction that may have been in
658 + # progress when this interrupt arrived.
659 + if not shutdown_initiated.isSet():
660 + thread.start_new_thread(shutdown_console, ())
661 + shutdown_initiated.set()
662 +
663 + signal.signal(signal.SIGINT, handle_interrupt)
664 + signal.signal(signal.SIGTERM, handle_interrupt)
665 +
666 + try:
667 + import datetime
668 + datestamp = str(datetime.datetime.now())
669 + time_begin = time.time()
670 + log = create_log(name="MetadataGenerate",
671 + logfile=opts["logfile"], loglevel=int(opts["loglevel"]))
672 + if opts["reportfile"]:
673 + reportfile = open_file(opts["reportfile"])
674 + portdb = create_portdb(**opts)
675 + try:
676 + os.nice(int(portdb.mysettings.get("PORTAGE_NICENESS", "0")))
677 + except (OSError, ValueError), e:
678 + log.error("PORTAGE_NICENESS failed: '%s'" % str(e))
679 + del e
680 +
681 + job = ListPackages(portdb, log, shuffle=opts["shuffle"])
682 + console.update("Listing packages in repository...")
683 + job.run()
684 + cpv_all = job.cpv_all
685 + total_count = len(cpv_all)
686 + if opts["generate"]:
687 + job = MetadataGenerate(portdb, cpv_all, log)
688 + name = "Cache generation"
689 + complete_msg = "Metadata generation is complete."
690 + elif opts["transfer"]:
691 + job = MetadataTransfer(portdb, cpv_all, opts["forward"],
692 + opts["cleanse_on_transfer_failure"], log)
693 + if opts["forward"]:
694 + name = "Forward transfer"
695 + complete_msg = "Forward metadata transfer is complete."
696 + else:
697 + name = "Reverse transfer"
698 + complete_msg = "Reverse metadata transfer is complete."
699 + else:
700 + sys.stderr.write("required options: --generate || --transfer\n")
701 + sys.exit(os.EX_USAGE)
702 + job.opts = opts
703 +
704 + onProgress = None
705 + if not console.quiet:
706 + ui = ConsoleProgress(name=name, console=console)
707 + progressHandler = ProgressHandler()
708 + onProgress = progressHandler.onProgress
709 + def display():
710 + ui.displayProgress(progressHandler.curval, progressHandler.maxval)
711 + progressHandler.display = display
712 +
713 + job.run(onProgress=onProgress)
714 +
715 + if not console.quiet:
716 + # make sure the final progress is displayed
717 + progressHandler.display()
718 +
719 + update_count = None
720 + if opts["transfer"]:
721 + update_count = job.update_count
722 + target_cache = job.target_cache
723 + dead_nodes = job.dead_nodes
724 + cleanse_count = len(dead_nodes)
725 + console.update("Cleansing cache...")
726 + job = CacheCleanse(target_cache, dead_nodes, log)
727 + job.run()
728 + console.update(complete_msg)
729 + console.newLine()
730 + time_end = time.time()
731 + if opts["reportfile"]:
732 + width = 20
733 + reportfile.write(name.ljust(width) + "%s\n" % datestamp)
734 + reportfile.write("Elapsed seconds".ljust(width) + "%f\n" % (time_end - time_begin))
735 + reportfile.write("Total packages".ljust(width) + "%i\n" % total_count)
736 + if update_count is not None:
737 + reportfile.write("Updated packages".ljust(width) + "%i\n" % update_count)
738 + reportfile.write("Cleansed packages".ljust(width) + "%i\n" % cleanse_count)
739 + reportfile.write(("-"*50)+"\n")
740 + except Exception, e:
741 + if not is_interrupt(e):
742 + raise
743 + del e
744 + handle_interrupt()
745 + sys.exit(0)
746 +
747 +if __name__ == "__main__":
748 + run_command(sys.argv[1:])
749
750 diff --git a/scripts/copy_buildsync.sh b/scripts/copy_buildsync.sh
751 new file mode 100755
752 index 0000000..729ff38
753 --- /dev/null
754 +++ b/scripts/copy_buildsync.sh
755 @@ -0,0 +1,127 @@
756 +#!/bin/bash
757 +
758 +ARCHES="alpha amd64 arm hppa ia64 ppc sparc x86 sh s390"
759 + #alpha amd64 arm hppa ia64 mips ppc s390 sh sparc x86
760 +#ARCHES="s390"
761 +RSYNC_OPTS="-aO --delay-updates"
762 +DEBUG=
763 +VERBOSE=
764 +
765 +OUT_STAGE3="latest-stage3.txt"
766 +OUT_ISO="latest-iso.txt"
767 +
768 +# Nothing to edit beyond this point
769 +
770 +DEBUGP=
771 +VERBOSEP=
772 +[ -n "$DEBUG" ] && DEBUGP=echo
773 +[ -n "$DEBUG" ] && RSYNC_OPTS="${RSYNC_OPTS} -n"
774 +[ -n "$VERBOSE" ] && RSYNC_OPTS="${RSYNC_OPTS} -v"
775 +[ -n "$VERBOSEP" ] && VERBOSEP="-v"
776 +
777 +for ARCH in $ARCHES; do
778 + rc=0
779 + fail=0
780 +
781 + indir=/home/buildsync/builds/${ARCH}
782 + outdir=/release/weekly/${ARCH}
783 + tmpdir=/release/tmp/buildsync/partial/${ARCH}
784 +
785 + mkdir -p ${tmpdir} 2>/dev/null
786 + # Copying
787 + if [ -d "${indir}" ]; then
788 + for i in $(find ${indir} -type f | grep -- '-20[0123][0-9]\{5\}' | sed -e 's:^.*-\(20[^.]\+\).*$:\1:' | sort -ur); do
789 + #echo "Doing $i"
790 + t="${outdir}/${i}"
791 + mkdir -p ${t} 2>/dev/null
792 + rsync ${RSYNC_OPTS} --temp-dir=${tmpdir} --partial-dir=${tmpdir} ${indir}/ --filter "S *${i}*" --filter 'S **/' --filter 'H *' ${t}
793 + rc=$?
794 + if [ $rc -eq 0 ]; then
795 + find ${indir} -type f -name "*${i}*" -print0 | xargs -0 --no-run-if-empty $DEBUGP rm $VERBOSEP -f
796 + else
797 + echo "Not deleting ${indir}/*${i}*, rsync failed!" 1>&2
798 + fail=1
799 + fi
800 + done
801 + find ${outdir} -mindepth 1 -type d \
802 + | egrep -v current \
803 + | sort -r \
804 + | tr '\n' '\0' \
805 + |xargs -0 --no-run-if-empty rmdir --ignore-fail-on-non-empty
806 + fi
807 +
808 + # ================================================================
809 + # Build data for revealing latest:
810 + # *.iso
811 + # stage3*bz2
812 + cd "${outdir}"
813 + # %T@
814 +
815 + iso_list="$(find 20* -name '*.iso' -printf '%h %f %h/%f\n' |grep -v hardened | sort -n)"
816 + stage3_list="$(find 20* -name 'stage3*bz2' -printf '%h %f %h/%f\n' |grep -v hardened | sort -n)"
817 + latest_iso_date="$(echo -e "${iso_list}" |awk '{print $1}' |cut -d/ -f1 | tail -n1)"
818 + latest_stage3_date="$(echo -e "${stage3_list}" |awk '{print $1}' |cut -d/ -f1 | tail -n1)"
819 + header="$(echo -e "# Latest as of $(date -uR)\n# ts=$(date -u +%s)")"
820 +
821 + # Do not remove this
822 + [ -z "${latest_iso_date}" ] && latest_iso_date="NONE-FOUND"
823 + [ -z "${latest_stage3_date}" ] && latest_stage3_date="NONE-FOUND"
824 +
825 + if [ -n "${iso_list}" ]; then
826 + echo -e "${header}" >"${OUT_ISO}"
827 + echo -e "${iso_list}" |awk '{print $3}' | grep "$latest_iso_date" >>${OUT_ISO}
828 + rm -f current-iso
829 + ln -sf "$latest_iso_date" current-iso
830 + fi
831 + if [ -n "${stage3_list}" ]; then
832 + echo -e "${header}" >"${OUT_STAGE3}"
833 + echo -e "${stage3_list}" |awk '{print $3}' |grep "$latest_stage3_date" >>${OUT_STAGE3}
834 + rm -f current-stage3
835 + # The "latest stage3" concept doesn't apply to the arm variants
836 + # that are pushed on different days of the week.
837 + if [[ ! $(echo ${outdir} | grep arm) ]]; then
838 + ln -sf "$latest_stage3_date" current-stage3
839 + fi
840 + fi
841 +
842 + # new variant preserve code
843 + variants="$(find 20* \( -iname '*.iso' -o -iname '*.tar.bz2' \) -printf '%f\n' |sed -e 's,-20[012][0-9]\{5\}.*,,g' -r | sort | uniq)"
844 + echo -n '' >"${tmpdir}"/.keep.${ARCH}.txt
845 + for v in $variants ; do
846 + #date_variant=$(find 20* -iname "${v}*" \( -name '*.tar.bz2' -o -iname '*.iso' \) -printf '%h\n' | sed -e "s,.*/$a/autobuilds/,,g" -e 's,/.*,,g' |sort -n | tail -n1 )
847 + variant_path=$(find 20* -iname "${v}*" \( -name '*.tar.bz2' -o -iname '*.iso' \) -print | sed -e "s,.*/$a/autobuilds/,,g" | sort -k1,1 -t/ | tail -n1 )
848 + f="latest-${v}.txt"
849 + echo -e "${header}" >"${f}"
850 + echo -e "${variant_path}" >>${f}
851 + rm -f "current-$v"
852 + ln -sf "${variant_path%/*}" "current-$v"
853 + echo "${variant_path}" | sed -e 's,/.*,,g' -e 's,^,/,g' -e 's,$,$,g' >>"${tmpdir}"/.keep.${ARCH}.txt
854 + done
855 + #echo "$date_variant" \
856 + #| sort | uniq | sed -e 's,^,/,g' -e 's,$,$,g' >"${tmpdir}"/.keep.${ARCH}.txt
857 +
858 + # ================================================================
859 + # Cleanup
860 + if [ $fail -eq 0 ]; then
861 + # Clean up all but latest 4 from mirror dir
862 + cd "${outdir}"
863 + #echo regex "/${latest_iso_date}\$|/${latest_stage3_date}\$"
864 + for i in $(find -regextype posix-basic -mindepth 1 -maxdepth 1 -type d -regex '.*20[012][0-9]\{5\}.*' \
865 + | sed -e 's:^.*-\(20[^.]\+\).*$:\1:' \
866 + | sort -ur \
867 + | egrep -v "/${latest_iso_date}\$|/${latest_stage3_date}\$" \
868 + | egrep -v -f "${tmpdir}"/.keep.${ARCH}.txt \
869 + | tail -n +5); do
870 +
871 + $DEBUGP rm $VERBOSEP -rf $(pwd)/${i}
872 + done
873 +
874 + $DEBUGP rm $VERBOSEP -rf ${tmpdir}
875 +
876 + else
877 + echo "There was some failure for $ARCH during the weekly sync. Not doing cleanup for fear of dataloss." 1>&2
878 + fi
879 +
880 +done
881 +
882 +# vim:ts=2 sw=2 noet ft=sh:
883
884 diff --git a/scripts/run_catalyst b/scripts/run_catalyst
885 new file mode 100755
886 index 0000000..997f652
887 --- /dev/null
888 +++ b/scripts/run_catalyst
889 @@ -0,0 +1,2 @@
890 +#!/bin/bash
891 +sudo /release/bin/sudo_catalyst "$@"
892
893 diff --git a/scripts/run_official b/scripts/run_official
894 new file mode 100755
895 index 0000000..dfb29f2
896 --- /dev/null
897 +++ b/scripts/run_official
898 @@ -0,0 +1,39 @@
899 +#!/bin/bash
900 +
901 +email_from="auto"
902 +email_to="releng@g.o"
903 +url="https://poseidon.amd64.dev.gentoo.org/snapshots"
904 +snapshot_uri="/release/webroot/snapshots"
905 +svn_repo="/release/repos/snapshot-tree"
906 +
907 +send_email() {
908 + subject="[Snapshot] ${1}"
909 +
910 + echo -e "From: ${email_from}\r\nTo: ${email_to}\r\nSubject: ${subject}\r\n\r\nA new snapshot has been built from revision `svnlook history ${svn_repo} | head -n 3 | tail -n 1 | sed -e 's:^ *::' -e 's: .*$::'` of ${svn_repo}. You can find it at ${url}.\r\n\r\n$(cat /release/snapshots/portage-${1}.tar.bz2.DIGESTS)\r\n" | /usr/sbin/sendmail -f ${email_from} ${email_to}
911 +}
912 +
913 +if [ "${email_from}" == "auto" ]
914 +then
915 + username="$(whoami)"
916 + if [ "${username}" == "root" ]
917 + then
918 + email_from="catalyst@×××××××××××××××××××××××××.org"
919 + else
920 + email_from="${username}@gentoo.org"
921 + fi
922 +fi
923 +
924 +sudo /release/bin/sudo_official "$@" && \
925 +echo "Starting rsync from /release/snapshots/portage-${1}.tar.bz2* to ${snapshot_uri}" && \
926 +rsync --archive --stats --progress /release/snapshots/portage-${1}.tar.bz2* \
927 + ${snapshot_uri}
928 +ret=$?
929 +
930 +if [ "${email_from}" == "none" ]
931 +then
932 + echo "Skipping email step as configured..."
933 +else
934 + [ $ret -eq 0 ] && send_email ${1}
935 +fi
936 +
937 +exit $ret
938
939 diff --git a/scripts/run_snapshot b/scripts/run_snapshot
940 new file mode 100755
941 index 0000000..20cc460
942 --- /dev/null
943 +++ b/scripts/run_snapshot
944 @@ -0,0 +1,2 @@
945 +#!/bin/bash
946 +sudo /release/bin/sudo_snapshot "$@"
947
948 diff --git a/scripts/stage_build.sh b/scripts/stage_build.sh
949 new file mode 100755
950 index 0000000..0dd89a9
951 --- /dev/null
952 +++ b/scripts/stage_build.sh
953 @@ -0,0 +1,162 @@
954 +#!/bin/bash
955 +
956 +PID=$$
957 +
958 +profile=
959 +version_stamp=
960 +subarch=
961 +stage1_seed=
962 +snapshot=
963 +config=/etc/catalyst/catalyst.conf
964 +email_from="catalyst@localhost"
965 +email_to="root@localhost"
966 +verbose=0
967 +
968 +usage() {
969 + msg=$1
970 +
971 + if [ -n "${msg}" ]; then
972 + echo -e "${msg}\n";
973 + fi
974 +
975 + cat <<EOH
976 +Usage:
977 + stage_build [-p|--profile <profile>] [-v|--version-stamp <stamp>]
978 + [-a|--arch <arch>] [-s|--stage1-seed <seed>] [--verbose]
979 + [-f|--email-from <from>] [-t|--email-to <to>] [-h|--help]
980 +
981 +Options:
982 + -p|--profile Sets the portage profile (required)
983 + -v|--version-stamp Sets the version stamp (required)
984 + -a|--arch Sets the 'subarch' in the spec (required)
985 + -s|--stage1-seed Sets the seed for the stage1 (required)
986 + -S|--snapshot Sets the snapshot name (if not given defaults to today's
987 + date)
988 + -c|--config catalyst config to use, defaults to catalyst default
989 + --verbose Send output of commands to console as well as log
990 + -f|--email-from Sets the 'From' on emails sent from this script (defaults
991 + to catalyst@localhost)
992 + -t|--email-to Sets the 'To' on emails sent from this script (defaults
993 + to root@localhost)
994 + -h|--help Show this message and quit
995 +
996 +Example:
997 + stage_build -p default-linux/x86/2006.1 -v 2007.0_pre -a i686 -s default/stage3-i686-2006.1
998 +EOH
999 +}
1000 +
1001 +send_email() {
1002 + subject="[${subarch}] $1"
1003 + body=$2
1004 +
1005 + echo -e "From: ${email_from}\r\nTo: ${email_to}\r\nSubject: ${subject}\r\n\r\nArch: ${subarch}\r\nProfile: ${profile}\r\nVersion stamp: ${version_stamp}\r\nStage1 seed: ${stage1_seed}\r\nSnapshot: ${snapshot}\r\n\r\n${body}\r\n" | /usr/sbin/sendmail -f ${email_from} ${email_to}
1006 +}
1007 +
1008 +run_cmd() {
1009 + cmd=$1
1010 + logfile=$2
1011 +
1012 + if [ $verbose = 1 ]; then
1013 + ${cmd} 2>&1 | tee ${logfile}
1014 + else
1015 + ${cmd} &> ${logfile}
1016 + fi
1017 +}
1018 +
1019 +# Parse args
1020 +params=${#}
1021 +while [ ${#} -gt 0 ]
1022 +do
1023 + a=${1}
1024 + shift
1025 + case "${a}" in
1026 + -h|--help)
1027 + usage
1028 + exit 0
1029 + ;;
1030 + -p|--profile)
1031 + profile=$1
1032 + shift
1033 + ;;
1034 + -v|--version-stamp)
1035 + version_stamp=$1
1036 + shift
1037 + ;;
1038 + -a|--arch)
1039 + subarch=$1
1040 + shift
1041 + ;;
1042 + -f|--email-from)
1043 + email_from=$1
1044 + shift
1045 + ;;
1046 + -t|--email-to)
1047 + email_to=$1
1048 + shift
1049 + ;;
1050 + -s|--stage1-seed)
1051 + stage1_seed=$1
1052 + shift
1053 + ;;
1054 + -S|--snapshot)
1055 + snapshot=$1
1056 + shift
1057 + ;;
1058 + -c|--config)
1059 + config=$1
1060 + shift
1061 + ;;
1062 + --verbose)
1063 + verbose=1
1064 + ;;
1065 + -*)
1066 + echo "You have specified an invalid option: ${a}"
1067 + usage
1068 + exit 1
1069 + ;;
1070 + esac
1071 +done
1072 +
1073 +# Make sure all required values were specified
1074 +if [ -z "${profile}" ]; then
1075 + usage "You must specify a profile."
1076 + exit 1
1077 +fi
1078 +if [ -z "${version_stamp}" ]; then
1079 + usage "You must specify a version stamp."
1080 + exit 1
1081 +fi
1082 +if [ -z "${subarch}" ]; then
1083 + usage "You must specify an arch."
1084 + exit 1
1085 +fi
1086 +if [ -z "${stage1_seed}" ]; then
1087 + usage "You must specify a stage1 seed."
1088 + exit 1
1089 +fi
1090 +cd /tmp
1091 +
1092 +if [ -z "${snapshot}" ]; then
1093 + snapshot=`date +%Y%m%d`
1094 + run_cmd "catalyst -c ${config} -s '${snapshot}'" "/tmp/catalyst_build_snapshot.${PID}.log"
1095 + if [ $? != 0 ]; then
1096 + send_email "Catalyst build error - snapshot" "$(</tmp/catalyst_build_snapshot.${PID}.log)"
1097 + exit 1
1098 + fi
1099 +fi
1100 +
1101 +for i in 1 2 3; do
1102 + echo -e "subarch: ${subarch}\ntarget: stage${i}\nversion_stamp: ${version_stamp}\nrel_type: default\nprofile: ${profile}\nsnapshot: ${snapshot}" > stage${i}.spec
1103 + if [ ${i} = 1 ]; then
1104 + echo "source_subpath: ${stage1_seed}" >> stage${i}.spec
1105 + else
1106 + echo "source_subpath: default/stage$(expr ${i} - 1)-${subarch}-${version_stamp}" >> stage${i}.spec
1107 + fi
1108 + run_cmd "catalyst -a -p -c ${config} -f stage${i}.spec" "/tmp/catalyst_build_stage${i}.${PID}.log"
1109 + if [ $? != 0 ]; then
1110 + send_email "Catalyst build error - stage${i}" "$(tail -n 200 /tmp/catalyst_build_stage${i}.${PID}.log)\r\n\r\nFull build log at /tmp/catalyst_build_stage${i}.${PID}.log"
1111 + exit 1
1112 + fi
1113 +done
1114 +
1115 +send_email "Catalyst build success" "Everything finished successfully."
1116
1117 diff --git a/scripts/sudo_catalyst b/scripts/sudo_catalyst
1118 new file mode 100755
1119 index 0000000..19ecc90
1120 --- /dev/null
1121 +++ b/scripts/sudo_catalyst
1122 @@ -0,0 +1,28 @@
1123 +#!/bin/bash
1124 +
1125 +usage() {
1126 + echo "Usage: $(basename ${0}) <arch> <target> <spec>"
1127 + echo "Where arch is either amd64 or x86, target is default, dev, hardened,"
1128 + echo "or uclibc, and spec is your spec file."
1129 + echo
1130 +}
1131 +
1132 +if [ -z "$1" -o -z "$2" -o -z "$3" ]
1133 +then
1134 + usage
1135 +else
1136 + target="$(grep target ${3} | cut -d' ' -f2)"
1137 + /usr/bin/catalyst -c /etc/catalyst/${1}-${2}.conf -f ${3} ${4} ${5}
1138 +# && \
1139 +# case ${target} in
1140 +# stage*|grp*|livecd-stage2)
1141 +# echo "Cleaning out ${target} temp files"
1142 +# rel_type="$(grep rel_type ${3} | cut -d' ' -f2)"
1143 +# subarch="$(grep subarch ${3} | cut -d' ' -f2)"
1144 +# version="$(grep version ${3} | cut -d' ' -f2)"
1145 +# storedir="$(grep storedir /etc/catalyst/${1}-${2}.conf | cut -d\" -f2)"
1146 +# echo "Removing ${storedir}/tmp/${rel_type}/${target}-${subarch}-${version}"
1147 +# rm -rf ${storedir}/tmp/${rel_type}/${target}-${subarch}-${version}
1148 +# ;;
1149 +# esac
1150 +fi
1151
1152 diff --git a/scripts/sudo_official b/scripts/sudo_official
1153 new file mode 100755
1154 index 0000000..80e7ca0
1155 --- /dev/null
1156 +++ b/scripts/sudo_official
1157 @@ -0,0 +1,46 @@
1158 +#!/bin/bash
1159 +
1160 +tree="/release/trees/snapshot-tree"
1161 +portdir="${tree}/${1/_beta2/}/portage"
1162 +cache_args="--portdir=${portdir} --cachedir=/release/tmp/depcache"
1163 +
1164 +usage() {
1165 + echo "Usage: $(basename ${0}) <version>"
1166 +}
1167 +
1168 +if [ -z "${1}" ]
1169 +then
1170 + usage
1171 +else
1172 + cd ${tree}
1173 + echo "Clearing out old metadata cache"
1174 + rm -rf ${portdir}/metadata/cache
1175 + echo "Performing a svn up on ${tree}"
1176 + svn up || exit 1
1177 + mkdir -p ${portdir}/metadata/cache
1178 + echo "Recreating portage metadata cache"
1179 + cache-tools.py ${cache_args} --generate || exit 1
1180 + cache-tools.py ${cache_args} --transfer --reverse \
1181 + --cleanse-on-transfer-failure || exit 1
1182 + if [ ! -d ${portdir}/metadata/cache/sys-kernel ]
1183 + then
1184 + echo "Metadata update failed! Bailing out!"
1185 + exit 1
1186 + fi
1187 + catalyst -c /etc/catalyst/snapshot-official.conf -s ${1} \
1188 + -C portdir="${portdir}" || exit 1
1189 + for i in amd64 x86
1190 + do
1191 + for j in default dev hardened uclibc
1192 + do
1193 + cd /release/buildroot/${i}-${j}/snapshots
1194 + rm -f portage-official.tar.bz2 portage-${1}.tar.bz2*
1195 + ln -sf /release/snapshots/portage-${1}.tar.bz2 \
1196 + portage-official.tar.bz2
1197 + ln -sf /release/snapshots/portage-${1}.tar.bz2 \
1198 + portage-${1}.tar.bz2
1199 + ln -sf /release/snapshots/portage-${1}.tar.bz2.DIGESTS \
1200 + portage-${1}.tar.bz2.DIGESTS
1201 + done
1202 + done
1203 +fi
1204
1205 diff --git a/scripts/sudo_snapshot b/scripts/sudo_snapshot
1206 new file mode 100755
1207 index 0000000..1ba6485
1208 --- /dev/null
1209 +++ b/scripts/sudo_snapshot
1210 @@ -0,0 +1,20 @@
1211 +#!/bin/bash
1212 +usage() {
1213 + echo "Usage: $(basename ${0}) <version>"
1214 +}
1215 +if [ -z "${1}" ]
1216 +then
1217 + usage
1218 +else
1219 + catalyst -c /etc/catalyst/snapshot.conf -s ${1}
1220 + for i in amd64 x86
1221 + do
1222 + for j in default dev hardened uclibc
1223 + do
1224 + cd /release/buildroot/${i}-${j}/snapshots
1225 + rm -f portage-${1}.tar.bz2
1226 + ln -sf /release/snapshots/portage-${1}.tar.bz2 \
1227 + portage-${1}.tar.bz2
1228 + done
1229 + done
1230 +fi
1231
1232 diff --git a/scripts/update_auto_tree b/scripts/update_auto_tree
1233 new file mode 100755
1234 index 0000000..08909e7
1235 --- /dev/null
1236 +++ b/scripts/update_auto_tree
1237 @@ -0,0 +1,2 @@
1238 +#!/bin/bash
1239 +PORTDIR="/release/trees/portage-auto/" FEATURES="$FEATURES -news" emerge --sync -q
1240
1241 diff --git a/scripts/update_official_tree b/scripts/update_official_tree
1242 new file mode 100755
1243 index 0000000..250e905
1244 --- /dev/null
1245 +++ b/scripts/update_official_tree
1246 @@ -0,0 +1,2 @@
1247 +#!/bin/bash
1248 +PORTDIR="/release/trees/portage-official/" emerge --sync
1249
1250 diff --git a/scripts/update_snapshot_tree b/scripts/update_snapshot_tree
1251 new file mode 100755
1252 index 0000000..f64742d
1253 --- /dev/null
1254 +++ b/scripts/update_snapshot_tree
1255 @@ -0,0 +1,2 @@
1256 +#!/bin/bash
1257 +PORTDIR="/release/trees/portage-snapshot/" emerge --sync