1 |
--- |
2 |
catalyst/arch/alpha.py | 6 +- |
3 |
catalyst/arch/amd64.py | 2 +- |
4 |
catalyst/arch/arm.py | 6 +- |
5 |
catalyst/arch/hppa.py | 6 +- |
6 |
catalyst/arch/ia64.py | 6 +- |
7 |
catalyst/arch/mips.py | 6 +- |
8 |
catalyst/arch/powerpc.py | 6 +- |
9 |
catalyst/arch/s390.py | 6 +- |
10 |
catalyst/arch/sh.py | 6 +- |
11 |
catalyst/arch/sparc.py | 6 +- |
12 |
catalyst/arch/x86.py | 6 +- |
13 |
catalyst/builder.py | 20 + |
14 |
catalyst/config.py | 3 +- |
15 |
catalyst/lock.py | 468 ++++++++++++++++++++ |
16 |
catalyst/main.py | 6 +- |
17 |
catalyst/modules/builder.py | 20 - |
18 |
catalyst/modules/catalyst_lock.py | 468 -------------------- |
19 |
catalyst/modules/catalyst_support.py | 718 ------------------------------- |
20 |
catalyst/modules/embedded_target.py | 2 +- |
21 |
catalyst/modules/generic_stage_target.py | 8 +- |
22 |
catalyst/modules/generic_target.py | 2 +- |
23 |
catalyst/modules/grp_target.py | 2 +- |
24 |
catalyst/modules/livecd_stage1_target.py | 2 +- |
25 |
catalyst/modules/livecd_stage2_target.py | 2 +- |
26 |
catalyst/modules/netboot2_target.py | 2 +- |
27 |
catalyst/modules/netboot_target.py | 2 +- |
28 |
catalyst/modules/snapshot_target.py | 2 +- |
29 |
catalyst/modules/stage1_target.py | 2 +- |
30 |
catalyst/modules/stage2_target.py | 2 +- |
31 |
catalyst/modules/stage3_target.py | 2 +- |
32 |
catalyst/modules/stage4_target.py | 2 +- |
33 |
catalyst/modules/tinderbox_target.py | 2 +- |
34 |
catalyst/support.py | 718 +++++++++++++++++++++++++++++++ |
35 |
33 files changed, 1269 insertions(+), 1248 deletions(-) |
36 |
create mode 100644 catalyst/builder.py |
37 |
create mode 100644 catalyst/lock.py |
38 |
delete mode 100644 catalyst/modules/builder.py |
39 |
delete mode 100644 catalyst/modules/catalyst_lock.py |
40 |
delete mode 100644 catalyst/modules/catalyst_support.py |
41 |
create mode 100644 catalyst/support.py |
42 |
|
43 |
diff --git a/catalyst/arch/alpha.py b/catalyst/arch/alpha.py |
44 |
index f0fc95a..7248020 100644 |
45 |
--- a/catalyst/arch/alpha.py |
46 |
+++ b/catalyst/arch/alpha.py |
47 |
@@ -1,6 +1,8 @@ |
48 |
|
49 |
-import builder,os |
50 |
-from catalyst_support import * |
51 |
+import os |
52 |
+ |
53 |
+from catalyst import builder |
54 |
+from catalyst.support import * |
55 |
|
56 |
class generic_alpha(builder.generic): |
57 |
"abstract base class for all alpha builders" |
58 |
diff --git a/catalyst/arch/amd64.py b/catalyst/arch/amd64.py |
59 |
index 262b55a..13e7563 100644 |
60 |
--- a/catalyst/arch/amd64.py |
61 |
+++ b/catalyst/arch/amd64.py |
62 |
@@ -1,5 +1,5 @@ |
63 |
|
64 |
-import builder |
65 |
+from catalyst import builder |
66 |
|
67 |
class generic_amd64(builder.generic): |
68 |
"abstract base class for all amd64 builders" |
69 |
diff --git a/catalyst/arch/arm.py b/catalyst/arch/arm.py |
70 |
index 2de3942..8f207ff 100644 |
71 |
--- a/catalyst/arch/arm.py |
72 |
+++ b/catalyst/arch/arm.py |
73 |
@@ -1,6 +1,8 @@ |
74 |
|
75 |
-import builder,os |
76 |
-from catalyst_support import * |
77 |
+import os |
78 |
+ |
79 |
+from catalyst import builder |
80 |
+from catalyst.support import * |
81 |
|
82 |
class generic_arm(builder.generic): |
83 |
"Abstract base class for all arm (little endian) builders" |
84 |
diff --git a/catalyst/arch/hppa.py b/catalyst/arch/hppa.py |
85 |
index f804398..3aac9b6 100644 |
86 |
--- a/catalyst/arch/hppa.py |
87 |
+++ b/catalyst/arch/hppa.py |
88 |
@@ -1,6 +1,8 @@ |
89 |
|
90 |
-import builder,os |
91 |
-from catalyst_support import * |
92 |
+import os |
93 |
+ |
94 |
+from catalyst import builder |
95 |
+from catalyst.support import * |
96 |
|
97 |
class generic_hppa(builder.generic): |
98 |
"Abstract base class for all hppa builders" |
99 |
diff --git a/catalyst/arch/ia64.py b/catalyst/arch/ia64.py |
100 |
index 825af70..4003085 100644 |
101 |
--- a/catalyst/arch/ia64.py |
102 |
+++ b/catalyst/arch/ia64.py |
103 |
@@ -1,6 +1,8 @@ |
104 |
|
105 |
-import builder,os |
106 |
-from catalyst_support import * |
107 |
+import os |
108 |
+ |
109 |
+from catalyst import builder |
110 |
+from catalyst.support import * |
111 |
|
112 |
class arch_ia64(builder.generic): |
113 |
"builder class for ia64" |
114 |
diff --git a/catalyst/arch/mips.py b/catalyst/arch/mips.py |
115 |
index b3730fa..7cce392 100644 |
116 |
--- a/catalyst/arch/mips.py |
117 |
+++ b/catalyst/arch/mips.py |
118 |
@@ -1,6 +1,8 @@ |
119 |
|
120 |
-import builder,os |
121 |
-from catalyst_support import * |
122 |
+import os |
123 |
+ |
124 |
+from catalyst import builder |
125 |
+from catalyst.support import * |
126 |
|
127 |
class generic_mips(builder.generic): |
128 |
"Abstract base class for all mips builders [Big-endian]" |
129 |
diff --git a/catalyst/arch/powerpc.py b/catalyst/arch/powerpc.py |
130 |
index e9f611b..6cec580 100644 |
131 |
--- a/catalyst/arch/powerpc.py |
132 |
+++ b/catalyst/arch/powerpc.py |
133 |
@@ -1,6 +1,8 @@ |
134 |
|
135 |
-import os,builder |
136 |
-from catalyst_support import * |
137 |
+import os |
138 |
+ |
139 |
+from catalyst import builder |
140 |
+from catalyst.support import * |
141 |
|
142 |
class generic_ppc(builder.generic): |
143 |
"abstract base class for all 32-bit powerpc builders" |
144 |
diff --git a/catalyst/arch/s390.py b/catalyst/arch/s390.py |
145 |
index bf22f66..c49e0b7 100644 |
146 |
--- a/catalyst/arch/s390.py |
147 |
+++ b/catalyst/arch/s390.py |
148 |
@@ -1,6 +1,8 @@ |
149 |
|
150 |
-import builder,os |
151 |
-from catalyst_support import * |
152 |
+import os |
153 |
+ |
154 |
+from catalyst import builder |
155 |
+from catalyst.support import * |
156 |
|
157 |
class generic_s390(builder.generic): |
158 |
"abstract base class for all s390 builders" |
159 |
diff --git a/catalyst/arch/sh.py b/catalyst/arch/sh.py |
160 |
index 2fc9531..1fa1b0b 100644 |
161 |
--- a/catalyst/arch/sh.py |
162 |
+++ b/catalyst/arch/sh.py |
163 |
@@ -1,6 +1,8 @@ |
164 |
|
165 |
-import builder,os |
166 |
-from catalyst_support import * |
167 |
+import os |
168 |
+ |
169 |
+from catalyst import builder |
170 |
+from catalyst.support import * |
171 |
|
172 |
class generic_sh(builder.generic): |
173 |
"Abstract base class for all sh builders [Little-endian]" |
174 |
diff --git a/catalyst/arch/sparc.py b/catalyst/arch/sparc.py |
175 |
index 5eb5344..2889528 100644 |
176 |
--- a/catalyst/arch/sparc.py |
177 |
+++ b/catalyst/arch/sparc.py |
178 |
@@ -1,6 +1,8 @@ |
179 |
|
180 |
-import builder,os |
181 |
-from catalyst_support import * |
182 |
+import os |
183 |
+ |
184 |
+from catalyst import builder |
185 |
+from catalyst.support import * |
186 |
|
187 |
class generic_sparc(builder.generic): |
188 |
"abstract base class for all sparc builders" |
189 |
diff --git a/catalyst/arch/x86.py b/catalyst/arch/x86.py |
190 |
index 0391b79..c8d1911 100644 |
191 |
--- a/catalyst/arch/x86.py |
192 |
+++ b/catalyst/arch/x86.py |
193 |
@@ -1,6 +1,8 @@ |
194 |
|
195 |
-import builder,os |
196 |
-from catalyst_support import * |
197 |
+import os |
198 |
+ |
199 |
+from catalyst import builder |
200 |
+from catalyst.support import * |
201 |
|
202 |
class generic_x86(builder.generic): |
203 |
"abstract base class for all x86 builders" |
204 |
diff --git a/catalyst/builder.py b/catalyst/builder.py |
205 |
new file mode 100644 |
206 |
index 0000000..ad27d78 |
207 |
--- /dev/null |
208 |
+++ b/catalyst/builder.py |
209 |
@@ -0,0 +1,20 @@ |
210 |
+ |
211 |
+class generic: |
212 |
+ def __init__(self,myspec): |
213 |
+ self.settings=myspec |
214 |
+ |
215 |
+ def mount_safety_check(self): |
216 |
+ """ |
217 |
+ Make sure that no bind mounts exist in chrootdir (to use before |
218 |
+ cleaning the directory, to make sure we don't wipe the contents of |
219 |
+ a bind mount |
220 |
+ """ |
221 |
+ pass |
222 |
+ |
223 |
+ def mount_all(self): |
224 |
+ """do all bind mounts""" |
225 |
+ pass |
226 |
+ |
227 |
+ def umount_all(self): |
228 |
+ """unmount all bind mounts""" |
229 |
+ pass |
230 |
diff --git a/catalyst/config.py b/catalyst/config.py |
231 |
index 726bf74..460bbd5 100644 |
232 |
--- a/catalyst/config.py |
233 |
+++ b/catalyst/config.py |
234 |
@@ -1,5 +1,6 @@ |
235 |
+ |
236 |
import re |
237 |
-from modules.catalyst_support import * |
238 |
+from catalyst.support import * |
239 |
|
240 |
class ParserBase: |
241 |
|
242 |
diff --git a/catalyst/lock.py b/catalyst/lock.py |
243 |
new file mode 100644 |
244 |
index 0000000..2d10d2f |
245 |
--- /dev/null |
246 |
+++ b/catalyst/lock.py |
247 |
@@ -0,0 +1,468 @@ |
248 |
+#!/usr/bin/python |
249 |
+import os |
250 |
+import fcntl |
251 |
+import errno |
252 |
+import sys |
253 |
+import string |
254 |
+import time |
255 |
+from catalyst.support import * |
256 |
+ |
257 |
+def writemsg(mystr): |
258 |
+ sys.stderr.write(mystr) |
259 |
+ sys.stderr.flush() |
260 |
+ |
261 |
+class LockDir: |
262 |
+ locking_method=fcntl.flock |
263 |
+ lock_dirs_in_use=[] |
264 |
+ die_on_failed_lock=True |
265 |
+ def __del__(self): |
266 |
+ self.clean_my_hardlocks() |
267 |
+ self.delete_lock_from_path_list() |
268 |
+ if self.islocked(): |
269 |
+ self.fcntl_unlock() |
270 |
+ |
271 |
+ def __init__(self,lockdir): |
272 |
+ self.locked=False |
273 |
+ self.myfd=None |
274 |
+ self.set_gid(250) |
275 |
+ self.locking_method=LockDir.locking_method |
276 |
+ self.set_lockdir(lockdir) |
277 |
+ self.set_lockfilename(".catalyst_lock") |
278 |
+ self.set_lockfile() |
279 |
+ |
280 |
+ if LockDir.lock_dirs_in_use.count(lockdir)>0: |
281 |
+ raise "This directory already associated with a lock object" |
282 |
+ else: |
283 |
+ LockDir.lock_dirs_in_use.append(lockdir) |
284 |
+ |
285 |
+ self.hardlock_paths={} |
286 |
+ |
287 |
+ def delete_lock_from_path_list(self): |
288 |
+ i=0 |
289 |
+ try: |
290 |
+ if LockDir.lock_dirs_in_use: |
291 |
+ for x in LockDir.lock_dirs_in_use: |
292 |
+ if LockDir.lock_dirs_in_use[i] == self.lockdir: |
293 |
+ del LockDir.lock_dirs_in_use[i] |
294 |
+ break |
295 |
+ i=i+1 |
296 |
+ except AttributeError: |
297 |
+ pass |
298 |
+ |
299 |
+ def islocked(self): |
300 |
+ if self.locked: |
301 |
+ return True |
302 |
+ else: |
303 |
+ return False |
304 |
+ |
305 |
+ def set_gid(self,gid): |
306 |
+ if not self.islocked(): |
307 |
+# if "DEBUG" in self.settings: |
308 |
+# print "setting gid to", gid |
309 |
+ self.gid=gid |
310 |
+ |
311 |
+ def set_lockdir(self,lockdir): |
312 |
+ if not os.path.exists(lockdir): |
313 |
+ os.makedirs(lockdir) |
314 |
+ if os.path.isdir(lockdir): |
315 |
+ if not self.islocked(): |
316 |
+ if lockdir[-1] == "/": |
317 |
+ lockdir=lockdir[:-1] |
318 |
+ self.lockdir=normpath(lockdir) |
319 |
+# if "DEBUG" in self.settings: |
320 |
+# print "setting lockdir to", self.lockdir |
321 |
+ else: |
322 |
+ raise "the lock object needs a path to a dir" |
323 |
+ |
324 |
+ def set_lockfilename(self,lockfilename): |
325 |
+ if not self.islocked(): |
326 |
+ self.lockfilename=lockfilename |
327 |
+# if "DEBUG" in self.settings: |
328 |
+# print "setting lockfilename to", self.lockfilename |
329 |
+ |
330 |
+ def set_lockfile(self): |
331 |
+ if not self.islocked(): |
332 |
+ self.lockfile=normpath(self.lockdir+'/'+self.lockfilename) |
333 |
+# if "DEBUG" in self.settings: |
334 |
+# print "setting lockfile to", self.lockfile |
335 |
+ |
336 |
+ def read_lock(self): |
337 |
+ if not self.locking_method == "HARDLOCK": |
338 |
+ self.fcntl_lock("read") |
339 |
+ else: |
340 |
+ print "HARDLOCKING doesnt support shared-read locks" |
341 |
+ print "using exclusive write locks" |
342 |
+ self.hard_lock() |
343 |
+ |
344 |
+ def write_lock(self): |
345 |
+ if not self.locking_method == "HARDLOCK": |
346 |
+ self.fcntl_lock("write") |
347 |
+ else: |
348 |
+ self.hard_lock() |
349 |
+ |
350 |
+ def unlock(self): |
351 |
+ if not self.locking_method == "HARDLOCK": |
352 |
+ self.fcntl_unlock() |
353 |
+ else: |
354 |
+ self.hard_unlock() |
355 |
+ |
356 |
+ def fcntl_lock(self,locktype): |
357 |
+ if self.myfd==None: |
358 |
+ if not os.path.exists(os.path.dirname(self.lockdir)): |
359 |
+ raise DirectoryNotFound, os.path.dirname(self.lockdir) |
360 |
+ if not os.path.exists(self.lockfile): |
361 |
+ old_mask=os.umask(000) |
362 |
+ self.myfd = os.open(self.lockfile, os.O_CREAT|os.O_RDWR,0660) |
363 |
+ try: |
364 |
+ if os.stat(self.lockfile).st_gid != self.gid: |
365 |
+ os.chown(self.lockfile,os.getuid(),self.gid) |
366 |
+ except SystemExit, e: |
367 |
+ raise |
368 |
+ except OSError, e: |
369 |
+ if e[0] == 2: #XXX: No such file or directory |
370 |
+ return self.fcntl_locking(locktype) |
371 |
+ else: |
372 |
+ writemsg("Cannot chown a lockfile. This could cause inconvenience later.\n") |
373 |
+ |
374 |
+ os.umask(old_mask) |
375 |
+ else: |
376 |
+ self.myfd = os.open(self.lockfile, os.O_CREAT|os.O_RDWR,0660) |
377 |
+ |
378 |
+ try: |
379 |
+ if locktype == "read": |
380 |
+ self.locking_method(self.myfd,fcntl.LOCK_SH|fcntl.LOCK_NB) |
381 |
+ else: |
382 |
+ self.locking_method(self.myfd,fcntl.LOCK_EX|fcntl.LOCK_NB) |
383 |
+ except IOError, e: |
384 |
+ if "errno" not in dir(e): |
385 |
+ raise |
386 |
+ if e.errno == errno.EAGAIN: |
387 |
+ if not LockDir.die_on_failed_lock: |
388 |
+ # Resource temp unavailable; eg, someone beat us to the lock. |
389 |
+ writemsg("waiting for lock on %s\n" % self.lockfile) |
390 |
+ |
391 |
+ # Try for the exclusive or shared lock again. |
392 |
+ if locktype == "read": |
393 |
+ self.locking_method(self.myfd,fcntl.LOCK_SH) |
394 |
+ else: |
395 |
+ self.locking_method(self.myfd,fcntl.LOCK_EX) |
396 |
+ else: |
397 |
+ raise LockInUse,self.lockfile |
398 |
+ elif e.errno == errno.ENOLCK: |
399 |
+ pass |
400 |
+ else: |
401 |
+ raise |
402 |
+ if not os.path.exists(self.lockfile): |
403 |
+ os.close(self.myfd) |
404 |
+ self.myfd=None |
405 |
+ #writemsg("lockfile recurse\n") |
406 |
+ self.fcntl_lock(locktype) |
407 |
+ else: |
408 |
+ self.locked=True |
409 |
+ #writemsg("Lockfile obtained\n") |
410 |
+ |
411 |
+ def fcntl_unlock(self): |
412 |
+ import fcntl |
413 |
+ unlinkfile = 1 |
414 |
+ if not os.path.exists(self.lockfile): |
415 |
+ print "lockfile does not exist '%s'" % self.lockfile |
416 |
+ if (self.myfd != None): |
417 |
+ try: |
418 |
+ os.close(myfd) |
419 |
+ self.myfd=None |
420 |
+ except: |
421 |
+ pass |
422 |
+ return False |
423 |
+ |
424 |
+ try: |
425 |
+ if self.myfd == None: |
426 |
+ self.myfd = os.open(self.lockfile, os.O_WRONLY,0660) |
427 |
+ unlinkfile = 1 |
428 |
+ self.locking_method(self.myfd,fcntl.LOCK_UN) |
429 |
+ except SystemExit, e: |
430 |
+ raise |
431 |
+ except Exception, e: |
432 |
+ os.close(self.myfd) |
433 |
+ self.myfd=None |
434 |
+ raise IOError, "Failed to unlock file '%s'\n" % self.lockfile |
435 |
+ try: |
436 |
+ # This sleep call was added to allow other processes that are |
437 |
+ # waiting for a lock to be able to grab it before it is deleted. |
438 |
+ # lockfile() already accounts for this situation, however, and |
439 |
+ # the sleep here adds more time than is saved overall, so am |
440 |
+ # commenting until it is proved necessary. |
441 |
+ #time.sleep(0.0001) |
442 |
+ if unlinkfile: |
443 |
+ InUse=False |
444 |
+ try: |
445 |
+ self.locking_method(self.myfd,fcntl.LOCK_EX|fcntl.LOCK_NB) |
446 |
+ except: |
447 |
+ print "Read lock may be in effect. skipping lockfile delete..." |
448 |
+ InUse=True |
449 |
+ # We won the lock, so there isn't competition for it. |
450 |
+ # We can safely delete the file. |
451 |
+ #writemsg("Got the lockfile...\n") |
452 |
+ #writemsg("Unlinking...\n") |
453 |
+ self.locking_method(self.myfd,fcntl.LOCK_UN) |
454 |
+ if not InUse: |
455 |
+ os.unlink(self.lockfile) |
456 |
+ os.close(self.myfd) |
457 |
+ self.myfd=None |
458 |
+# if "DEBUG" in self.settings: |
459 |
+# print "Unlinked lockfile..." |
460 |
+ except SystemExit, e: |
461 |
+ raise |
462 |
+ except Exception, e: |
463 |
+ # We really don't care... Someone else has the lock. |
464 |
+ # So it is their problem now. |
465 |
+ print "Failed to get lock... someone took it." |
466 |
+ print str(e) |
467 |
+ |
468 |
+ # Why test lockfilename? Because we may have been handed an |
469 |
+ # fd originally, and the caller might not like having their |
470 |
+ # open fd closed automatically on them. |
471 |
+ #if type(lockfilename) == types.StringType: |
472 |
+ # os.close(myfd) |
473 |
+ |
474 |
+ if (self.myfd != None): |
475 |
+ os.close(self.myfd) |
476 |
+ self.myfd=None |
477 |
+ self.locked=False |
478 |
+ time.sleep(.0001) |
479 |
+ |
480 |
+ def hard_lock(self,max_wait=14400): |
481 |
+ """Does the NFS, hardlink shuffle to ensure locking on the disk. |
482 |
+ We create a PRIVATE lockfile, that is just a placeholder on the disk. |
483 |
+ Then we HARDLINK the real lockfile to that private file. |
484 |
+ If our file can 2 references, then we have the lock. :) |
485 |
+ Otherwise we lather, rise, and repeat. |
486 |
+ We default to a 4 hour timeout. |
487 |
+ """ |
488 |
+ |
489 |
+ self.myhardlock = self.hardlock_name(self.lockdir) |
490 |
+ |
491 |
+ start_time = time.time() |
492 |
+ reported_waiting = False |
493 |
+ |
494 |
+ while(time.time() < (start_time + max_wait)): |
495 |
+ # We only need it to exist. |
496 |
+ self.myfd = os.open(self.myhardlock, os.O_CREAT|os.O_RDWR,0660) |
497 |
+ os.close(self.myfd) |
498 |
+ |
499 |
+ self.add_hardlock_file_to_cleanup() |
500 |
+ if not os.path.exists(self.myhardlock): |
501 |
+ raise FileNotFound, "Created lockfile is missing: %(filename)s" % {"filename":self.myhardlock} |
502 |
+ try: |
503 |
+ res = os.link(self.myhardlock, self.lockfile) |
504 |
+ except SystemExit, e: |
505 |
+ raise |
506 |
+ except Exception, e: |
507 |
+# if "DEBUG" in self.settings: |
508 |
+# print "lockfile(): Hardlink: Link failed." |
509 |
+# print "Exception: ",e |
510 |
+ pass |
511 |
+ |
512 |
+ if self.hardlink_is_mine(self.myhardlock, self.lockfile): |
513 |
+ # We have the lock. |
514 |
+ if reported_waiting: |
515 |
+ print |
516 |
+ return True |
517 |
+ |
518 |
+ if reported_waiting: |
519 |
+ writemsg(".") |
520 |
+ else: |
521 |
+ reported_waiting = True |
522 |
+ print |
523 |
+ print "Waiting on (hardlink) lockfile: (one '.' per 3 seconds)" |
524 |
+ print "Lockfile: " + self.lockfile |
525 |
+ time.sleep(3) |
526 |
+ |
527 |
+ os.unlink(self.myhardlock) |
528 |
+ return False |
529 |
+ |
530 |
+ def hard_unlock(self): |
531 |
+ try: |
532 |
+ if os.path.exists(self.myhardlock): |
533 |
+ os.unlink(self.myhardlock) |
534 |
+ if os.path.exists(self.lockfile): |
535 |
+ os.unlink(self.lockfile) |
536 |
+ except SystemExit, e: |
537 |
+ raise |
538 |
+ except: |
539 |
+ writemsg("Something strange happened to our hardlink locks.\n") |
540 |
+ |
541 |
+ def add_hardlock_file_to_cleanup(self): |
542 |
+ #mypath = self.normpath(path) |
543 |
+ if os.path.isdir(self.lockdir) and os.path.isfile(self.myhardlock): |
544 |
+ self.hardlock_paths[self.lockdir]=self.myhardlock |
545 |
+ |
546 |
+ def remove_hardlock_file_from_cleanup(self): |
547 |
+ if self.lockdir in self.hardlock_paths: |
548 |
+ del self.hardlock_paths[self.lockdir] |
549 |
+ print self.hardlock_paths |
550 |
+ |
551 |
+ def hardlock_name(self, path): |
552 |
+ mypath=path+"/.hardlock-"+os.uname()[1]+"-"+str(os.getpid()) |
553 |
+ newpath = os.path.normpath(mypath) |
554 |
+ if len(newpath) > 1: |
555 |
+ if newpath[1] == "/": |
556 |
+ newpath = "/"+newpath.lstrip("/") |
557 |
+ return newpath |
558 |
+ |
559 |
+ def hardlink_is_mine(self,link,lock): |
560 |
+ import stat |
561 |
+ try: |
562 |
+ myhls = os.stat(link) |
563 |
+ mylfs = os.stat(lock) |
564 |
+ except SystemExit, e: |
565 |
+ raise |
566 |
+ except: |
567 |
+ myhls = None |
568 |
+ mylfs = None |
569 |
+ |
570 |
+ if myhls: |
571 |
+ if myhls[stat.ST_NLINK] == 2: |
572 |
+ return True |
573 |
+ if mylfs: |
574 |
+ if mylfs[stat.ST_INO] == myhls[stat.ST_INO]: |
575 |
+ return True |
576 |
+ return False |
577 |
+ |
578 |
+ def hardlink_active(lock): |
579 |
+ if not os.path.exists(lock): |
580 |
+ return False |
581 |
+ |
582 |
+ def clean_my_hardlocks(self): |
583 |
+ try: |
584 |
+ for x in self.hardlock_paths.keys(): |
585 |
+ self.hardlock_cleanup(x) |
586 |
+ except AttributeError: |
587 |
+ pass |
588 |
+ |
589 |
+ def hardlock_cleanup(self,path): |
590 |
+ mypid = str(os.getpid()) |
591 |
+ myhost = os.uname()[1] |
592 |
+ mydl = os.listdir(path) |
593 |
+ results = [] |
594 |
+ mycount = 0 |
595 |
+ |
596 |
+ mylist = {} |
597 |
+ for x in mydl: |
598 |
+ filepath=path+"/"+x |
599 |
+ if os.path.isfile(filepath): |
600 |
+ parts = filepath.split(".hardlock-") |
601 |
+ if len(parts) == 2: |
602 |
+ filename = parts[0] |
603 |
+ hostpid = parts[1].split("-") |
604 |
+ host = "-".join(hostpid[:-1]) |
605 |
+ pid = hostpid[-1] |
606 |
+ if filename not in mylist: |
607 |
+ mylist[filename] = {} |
608 |
+ |
609 |
+ if host not in mylist[filename]: |
610 |
+ mylist[filename][host] = [] |
611 |
+ mylist[filename][host].append(pid) |
612 |
+ mycount += 1 |
613 |
+ else: |
614 |
+ mylist[filename][host].append(pid) |
615 |
+ mycount += 1 |
616 |
+ |
617 |
+ |
618 |
+ results.append("Found %(count)s locks" % {"count":mycount}) |
619 |
+ for x in mylist.keys(): |
620 |
+ if myhost in mylist[x]: |
621 |
+ mylockname = self.hardlock_name(x) |
622 |
+ if self.hardlink_is_mine(mylockname, self.lockfile) or \ |
623 |
+ not os.path.exists(self.lockfile): |
624 |
+ for y in mylist[x].keys(): |
625 |
+ for z in mylist[x][y]: |
626 |
+ filename = x+".hardlock-"+y+"-"+z |
627 |
+ if filename == mylockname: |
628 |
+ self.hard_unlock() |
629 |
+ continue |
630 |
+ try: |
631 |
+ # We're sweeping through, unlinking everyone's locks. |
632 |
+ os.unlink(filename) |
633 |
+ results.append("Unlinked: " + filename) |
634 |
+ except SystemExit, e: |
635 |
+ raise |
636 |
+ except Exception,e: |
637 |
+ pass |
638 |
+ try: |
639 |
+ os.unlink(x) |
640 |
+ results.append("Unlinked: " + x) |
641 |
+ os.unlink(mylockname) |
642 |
+ results.append("Unlinked: " + mylockname) |
643 |
+ except SystemExit, e: |
644 |
+ raise |
645 |
+ except Exception,e: |
646 |
+ pass |
647 |
+ else: |
648 |
+ try: |
649 |
+ os.unlink(mylockname) |
650 |
+ results.append("Unlinked: " + mylockname) |
651 |
+ except SystemExit, e: |
652 |
+ raise |
653 |
+ except Exception,e: |
654 |
+ pass |
655 |
+ return results |
656 |
+ |
657 |
+if __name__ == "__main__": |
658 |
+ |
659 |
+ def lock_work(): |
660 |
+ print |
661 |
+ for i in range(1,6): |
662 |
+ print i,time.time() |
663 |
+ time.sleep(1) |
664 |
+ print |
665 |
+ def normpath(mypath): |
666 |
+ newpath = os.path.normpath(mypath) |
667 |
+ if len(newpath) > 1: |
668 |
+ if newpath[1] == "/": |
669 |
+ newpath = "/"+newpath.lstrip("/") |
670 |
+ return newpath |
671 |
+ |
672 |
+ print "Lock 5 starting" |
673 |
+ import time |
674 |
+ Lock1=LockDir("/tmp/lock_path") |
675 |
+ Lock1.write_lock() |
676 |
+ print "Lock1 write lock" |
677 |
+ |
678 |
+ lock_work() |
679 |
+ |
680 |
+ Lock1.unlock() |
681 |
+ print "Lock1 unlock" |
682 |
+ |
683 |
+ Lock1.read_lock() |
684 |
+ print "Lock1 read lock" |
685 |
+ |
686 |
+ lock_work() |
687 |
+ |
688 |
+ Lock1.unlock() |
689 |
+ print "Lock1 unlock" |
690 |
+ |
691 |
+ Lock1.read_lock() |
692 |
+ print "Lock1 read lock" |
693 |
+ |
694 |
+ Lock1.write_lock() |
695 |
+ print "Lock1 write lock" |
696 |
+ |
697 |
+ lock_work() |
698 |
+ |
699 |
+ Lock1.unlock() |
700 |
+ print "Lock1 unlock" |
701 |
+ |
702 |
+ Lock1.read_lock() |
703 |
+ print "Lock1 read lock" |
704 |
+ |
705 |
+ lock_work() |
706 |
+ |
707 |
+ Lock1.unlock() |
708 |
+ print "Lock1 unlock" |
709 |
+ |
710 |
+#Lock1.write_lock() |
711 |
+#time.sleep(2) |
712 |
+#Lock1.unlock() |
713 |
+ ##Lock1.write_lock() |
714 |
+ #time.sleep(2) |
715 |
+ #Lock1.unlock() |
716 |
diff --git a/catalyst/main.py b/catalyst/main.py |
717 |
index d972b97..90ee722 100644 |
718 |
--- a/catalyst/main.py |
719 |
+++ b/catalyst/main.py |
720 |
@@ -21,7 +21,7 @@ sys.path.append(__selfpath__ + "/modules") |
721 |
|
722 |
import catalyst.config |
723 |
import catalyst.util |
724 |
-from catalyst.modules.catalyst_support import (required_build_targets, |
725 |
+from catalyst.support import (required_build_targets, |
726 |
valid_build_targets, CatalystError, hash_map, find_binary, LockInUse) |
727 |
|
728 |
__maintainer__="Catalyst <catalyst@g.o>" |
729 |
@@ -197,7 +197,7 @@ def parse_config(myconfig): |
730 |
|
731 |
def import_modules(): |
732 |
# import catalyst's own modules |
733 |
- # (i.e. catalyst_support and the arch modules) |
734 |
+ # (i.e. stage and the arch modules) |
735 |
targetmap={} |
736 |
|
737 |
try: |
738 |
@@ -354,7 +354,7 @@ def main(): |
739 |
parse_config(myconfig) |
740 |
|
741 |
# Start checking that digests are valid now that the hash_map was imported |
742 |
- # from catalyst_support |
743 |
+ # from catalyst.support |
744 |
if "digests" in conf_values: |
745 |
for i in conf_values["digests"].split(): |
746 |
if i not in hash_map: |
747 |
diff --git a/catalyst/modules/builder.py b/catalyst/modules/builder.py |
748 |
deleted file mode 100644 |
749 |
index ad27d78..0000000 |
750 |
--- a/catalyst/modules/builder.py |
751 |
+++ /dev/null |
752 |
@@ -1,20 +0,0 @@ |
753 |
- |
754 |
-class generic: |
755 |
- def __init__(self,myspec): |
756 |
- self.settings=myspec |
757 |
- |
758 |
- def mount_safety_check(self): |
759 |
- """ |
760 |
- Make sure that no bind mounts exist in chrootdir (to use before |
761 |
- cleaning the directory, to make sure we don't wipe the contents of |
762 |
- a bind mount |
763 |
- """ |
764 |
- pass |
765 |
- |
766 |
- def mount_all(self): |
767 |
- """do all bind mounts""" |
768 |
- pass |
769 |
- |
770 |
- def umount_all(self): |
771 |
- """unmount all bind mounts""" |
772 |
- pass |
773 |
diff --git a/catalyst/modules/catalyst_lock.py b/catalyst/modules/catalyst_lock.py |
774 |
deleted file mode 100644 |
775 |
index 5311cf8..0000000 |
776 |
--- a/catalyst/modules/catalyst_lock.py |
777 |
+++ /dev/null |
778 |
@@ -1,468 +0,0 @@ |
779 |
-#!/usr/bin/python |
780 |
-import os |
781 |
-import fcntl |
782 |
-import errno |
783 |
-import sys |
784 |
-import string |
785 |
-import time |
786 |
-from catalyst_support import * |
787 |
- |
788 |
-def writemsg(mystr): |
789 |
- sys.stderr.write(mystr) |
790 |
- sys.stderr.flush() |
791 |
- |
792 |
-class LockDir: |
793 |
- locking_method=fcntl.flock |
794 |
- lock_dirs_in_use=[] |
795 |
- die_on_failed_lock=True |
796 |
- def __del__(self): |
797 |
- self.clean_my_hardlocks() |
798 |
- self.delete_lock_from_path_list() |
799 |
- if self.islocked(): |
800 |
- self.fcntl_unlock() |
801 |
- |
802 |
- def __init__(self,lockdir): |
803 |
- self.locked=False |
804 |
- self.myfd=None |
805 |
- self.set_gid(250) |
806 |
- self.locking_method=LockDir.locking_method |
807 |
- self.set_lockdir(lockdir) |
808 |
- self.set_lockfilename(".catalyst_lock") |
809 |
- self.set_lockfile() |
810 |
- |
811 |
- if LockDir.lock_dirs_in_use.count(lockdir)>0: |
812 |
- raise "This directory already associated with a lock object" |
813 |
- else: |
814 |
- LockDir.lock_dirs_in_use.append(lockdir) |
815 |
- |
816 |
- self.hardlock_paths={} |
817 |
- |
818 |
- def delete_lock_from_path_list(self): |
819 |
- i=0 |
820 |
- try: |
821 |
- if LockDir.lock_dirs_in_use: |
822 |
- for x in LockDir.lock_dirs_in_use: |
823 |
- if LockDir.lock_dirs_in_use[i] == self.lockdir: |
824 |
- del LockDir.lock_dirs_in_use[i] |
825 |
- break |
826 |
- i=i+1 |
827 |
- except AttributeError: |
828 |
- pass |
829 |
- |
830 |
- def islocked(self): |
831 |
- if self.locked: |
832 |
- return True |
833 |
- else: |
834 |
- return False |
835 |
- |
836 |
- def set_gid(self,gid): |
837 |
- if not self.islocked(): |
838 |
-# if "DEBUG" in self.settings: |
839 |
-# print "setting gid to", gid |
840 |
- self.gid=gid |
841 |
- |
842 |
- def set_lockdir(self,lockdir): |
843 |
- if not os.path.exists(lockdir): |
844 |
- os.makedirs(lockdir) |
845 |
- if os.path.isdir(lockdir): |
846 |
- if not self.islocked(): |
847 |
- if lockdir[-1] == "/": |
848 |
- lockdir=lockdir[:-1] |
849 |
- self.lockdir=normpath(lockdir) |
850 |
-# if "DEBUG" in self.settings: |
851 |
-# print "setting lockdir to", self.lockdir |
852 |
- else: |
853 |
- raise "the lock object needs a path to a dir" |
854 |
- |
855 |
- def set_lockfilename(self,lockfilename): |
856 |
- if not self.islocked(): |
857 |
- self.lockfilename=lockfilename |
858 |
-# if "DEBUG" in self.settings: |
859 |
-# print "setting lockfilename to", self.lockfilename |
860 |
- |
861 |
- def set_lockfile(self): |
862 |
- if not self.islocked(): |
863 |
- self.lockfile=normpath(self.lockdir+'/'+self.lockfilename) |
864 |
-# if "DEBUG" in self.settings: |
865 |
-# print "setting lockfile to", self.lockfile |
866 |
- |
867 |
- def read_lock(self): |
868 |
- if not self.locking_method == "HARDLOCK": |
869 |
- self.fcntl_lock("read") |
870 |
- else: |
871 |
- print "HARDLOCKING doesnt support shared-read locks" |
872 |
- print "using exclusive write locks" |
873 |
- self.hard_lock() |
874 |
- |
875 |
- def write_lock(self): |
876 |
- if not self.locking_method == "HARDLOCK": |
877 |
- self.fcntl_lock("write") |
878 |
- else: |
879 |
- self.hard_lock() |
880 |
- |
881 |
- def unlock(self): |
882 |
- if not self.locking_method == "HARDLOCK": |
883 |
- self.fcntl_unlock() |
884 |
- else: |
885 |
- self.hard_unlock() |
886 |
- |
887 |
- def fcntl_lock(self,locktype): |
888 |
- if self.myfd==None: |
889 |
- if not os.path.exists(os.path.dirname(self.lockdir)): |
890 |
- raise DirectoryNotFound, os.path.dirname(self.lockdir) |
891 |
- if not os.path.exists(self.lockfile): |
892 |
- old_mask=os.umask(000) |
893 |
- self.myfd = os.open(self.lockfile, os.O_CREAT|os.O_RDWR,0660) |
894 |
- try: |
895 |
- if os.stat(self.lockfile).st_gid != self.gid: |
896 |
- os.chown(self.lockfile,os.getuid(),self.gid) |
897 |
- except SystemExit, e: |
898 |
- raise |
899 |
- except OSError, e: |
900 |
- if e[0] == 2: #XXX: No such file or directory |
901 |
- return self.fcntl_locking(locktype) |
902 |
- else: |
903 |
- writemsg("Cannot chown a lockfile. This could cause inconvenience later.\n") |
904 |
- |
905 |
- os.umask(old_mask) |
906 |
- else: |
907 |
- self.myfd = os.open(self.lockfile, os.O_CREAT|os.O_RDWR,0660) |
908 |
- |
909 |
- try: |
910 |
- if locktype == "read": |
911 |
- self.locking_method(self.myfd,fcntl.LOCK_SH|fcntl.LOCK_NB) |
912 |
- else: |
913 |
- self.locking_method(self.myfd,fcntl.LOCK_EX|fcntl.LOCK_NB) |
914 |
- except IOError, e: |
915 |
- if "errno" not in dir(e): |
916 |
- raise |
917 |
- if e.errno == errno.EAGAIN: |
918 |
- if not LockDir.die_on_failed_lock: |
919 |
- # Resource temp unavailable; eg, someone beat us to the lock. |
920 |
- writemsg("waiting for lock on %s\n" % self.lockfile) |
921 |
- |
922 |
- # Try for the exclusive or shared lock again. |
923 |
- if locktype == "read": |
924 |
- self.locking_method(self.myfd,fcntl.LOCK_SH) |
925 |
- else: |
926 |
- self.locking_method(self.myfd,fcntl.LOCK_EX) |
927 |
- else: |
928 |
- raise LockInUse,self.lockfile |
929 |
- elif e.errno == errno.ENOLCK: |
930 |
- pass |
931 |
- else: |
932 |
- raise |
933 |
- if not os.path.exists(self.lockfile): |
934 |
- os.close(self.myfd) |
935 |
- self.myfd=None |
936 |
- #writemsg("lockfile recurse\n") |
937 |
- self.fcntl_lock(locktype) |
938 |
- else: |
939 |
- self.locked=True |
940 |
- #writemsg("Lockfile obtained\n") |
941 |
- |
942 |
- def fcntl_unlock(self): |
943 |
- import fcntl |
944 |
- unlinkfile = 1 |
945 |
- if not os.path.exists(self.lockfile): |
946 |
- print "lockfile does not exist '%s'" % self.lockfile |
947 |
- if (self.myfd != None): |
948 |
- try: |
949 |
- os.close(myfd) |
950 |
- self.myfd=None |
951 |
- except: |
952 |
- pass |
953 |
- return False |
954 |
- |
955 |
- try: |
956 |
- if self.myfd == None: |
957 |
- self.myfd = os.open(self.lockfile, os.O_WRONLY,0660) |
958 |
- unlinkfile = 1 |
959 |
- self.locking_method(self.myfd,fcntl.LOCK_UN) |
960 |
- except SystemExit, e: |
961 |
- raise |
962 |
- except Exception, e: |
963 |
- os.close(self.myfd) |
964 |
- self.myfd=None |
965 |
- raise IOError, "Failed to unlock file '%s'\n" % self.lockfile |
966 |
- try: |
967 |
- # This sleep call was added to allow other processes that are |
968 |
- # waiting for a lock to be able to grab it before it is deleted. |
969 |
- # lockfile() already accounts for this situation, however, and |
970 |
- # the sleep here adds more time than is saved overall, so am |
971 |
- # commenting until it is proved necessary. |
972 |
- #time.sleep(0.0001) |
973 |
- if unlinkfile: |
974 |
- InUse=False |
975 |
- try: |
976 |
- self.locking_method(self.myfd,fcntl.LOCK_EX|fcntl.LOCK_NB) |
977 |
- except: |
978 |
- print "Read lock may be in effect. skipping lockfile delete..." |
979 |
- InUse=True |
980 |
- # We won the lock, so there isn't competition for it. |
981 |
- # We can safely delete the file. |
982 |
- #writemsg("Got the lockfile...\n") |
983 |
- #writemsg("Unlinking...\n") |
984 |
- self.locking_method(self.myfd,fcntl.LOCK_UN) |
985 |
- if not InUse: |
986 |
- os.unlink(self.lockfile) |
987 |
- os.close(self.myfd) |
988 |
- self.myfd=None |
989 |
-# if "DEBUG" in self.settings: |
990 |
-# print "Unlinked lockfile..." |
991 |
- except SystemExit, e: |
992 |
- raise |
993 |
- except Exception, e: |
994 |
- # We really don't care... Someone else has the lock. |
995 |
- # So it is their problem now. |
996 |
- print "Failed to get lock... someone took it." |
997 |
- print str(e) |
998 |
- |
999 |
- # Why test lockfilename? Because we may have been handed an |
1000 |
- # fd originally, and the caller might not like having their |
1001 |
- # open fd closed automatically on them. |
1002 |
- #if type(lockfilename) == types.StringType: |
1003 |
- # os.close(myfd) |
1004 |
- |
1005 |
- if (self.myfd != None): |
1006 |
- os.close(self.myfd) |
1007 |
- self.myfd=None |
1008 |
- self.locked=False |
1009 |
- time.sleep(.0001) |
1010 |
- |
1011 |
- def hard_lock(self,max_wait=14400): |
1012 |
- """Does the NFS, hardlink shuffle to ensure locking on the disk. |
1013 |
- We create a PRIVATE lockfile, that is just a placeholder on the disk. |
1014 |
- Then we HARDLINK the real lockfile to that private file. |
1015 |
- If our file can 2 references, then we have the lock. :) |
1016 |
- Otherwise we lather, rise, and repeat. |
1017 |
- We default to a 4 hour timeout. |
1018 |
- """ |
1019 |
- |
1020 |
- self.myhardlock = self.hardlock_name(self.lockdir) |
1021 |
- |
1022 |
- start_time = time.time() |
1023 |
- reported_waiting = False |
1024 |
- |
1025 |
- while(time.time() < (start_time + max_wait)): |
1026 |
- # We only need it to exist. |
1027 |
- self.myfd = os.open(self.myhardlock, os.O_CREAT|os.O_RDWR,0660) |
1028 |
- os.close(self.myfd) |
1029 |
- |
1030 |
- self.add_hardlock_file_to_cleanup() |
1031 |
- if not os.path.exists(self.myhardlock): |
1032 |
- raise FileNotFound, "Created lockfile is missing: %(filename)s" % {"filename":self.myhardlock} |
1033 |
- try: |
1034 |
- res = os.link(self.myhardlock, self.lockfile) |
1035 |
- except SystemExit, e: |
1036 |
- raise |
1037 |
- except Exception, e: |
1038 |
-# if "DEBUG" in self.settings: |
1039 |
-# print "lockfile(): Hardlink: Link failed." |
1040 |
-# print "Exception: ",e |
1041 |
- pass |
1042 |
- |
1043 |
- if self.hardlink_is_mine(self.myhardlock, self.lockfile): |
1044 |
- # We have the lock. |
1045 |
- if reported_waiting: |
1046 |
- print |
1047 |
- return True |
1048 |
- |
1049 |
- if reported_waiting: |
1050 |
- writemsg(".") |
1051 |
- else: |
1052 |
- reported_waiting = True |
1053 |
- print |
1054 |
- print "Waiting on (hardlink) lockfile: (one '.' per 3 seconds)" |
1055 |
- print "Lockfile: " + self.lockfile |
1056 |
- time.sleep(3) |
1057 |
- |
1058 |
- os.unlink(self.myhardlock) |
1059 |
- return False |
1060 |
- |
1061 |
- def hard_unlock(self): |
1062 |
- try: |
1063 |
- if os.path.exists(self.myhardlock): |
1064 |
- os.unlink(self.myhardlock) |
1065 |
- if os.path.exists(self.lockfile): |
1066 |
- os.unlink(self.lockfile) |
1067 |
- except SystemExit, e: |
1068 |
- raise |
1069 |
- except: |
1070 |
- writemsg("Something strange happened to our hardlink locks.\n") |
1071 |
- |
1072 |
- def add_hardlock_file_to_cleanup(self): |
1073 |
- #mypath = self.normpath(path) |
1074 |
- if os.path.isdir(self.lockdir) and os.path.isfile(self.myhardlock): |
1075 |
- self.hardlock_paths[self.lockdir]=self.myhardlock |
1076 |
- |
1077 |
- def remove_hardlock_file_from_cleanup(self): |
1078 |
- if self.lockdir in self.hardlock_paths: |
1079 |
- del self.hardlock_paths[self.lockdir] |
1080 |
- print self.hardlock_paths |
1081 |
- |
1082 |
- def hardlock_name(self, path): |
1083 |
- mypath=path+"/.hardlock-"+os.uname()[1]+"-"+str(os.getpid()) |
1084 |
- newpath = os.path.normpath(mypath) |
1085 |
- if len(newpath) > 1: |
1086 |
- if newpath[1] == "/": |
1087 |
- newpath = "/"+newpath.lstrip("/") |
1088 |
- return newpath |
1089 |
- |
1090 |
- def hardlink_is_mine(self,link,lock): |
1091 |
- import stat |
1092 |
- try: |
1093 |
- myhls = os.stat(link) |
1094 |
- mylfs = os.stat(lock) |
1095 |
- except SystemExit, e: |
1096 |
- raise |
1097 |
- except: |
1098 |
- myhls = None |
1099 |
- mylfs = None |
1100 |
- |
1101 |
- if myhls: |
1102 |
- if myhls[stat.ST_NLINK] == 2: |
1103 |
- return True |
1104 |
- if mylfs: |
1105 |
- if mylfs[stat.ST_INO] == myhls[stat.ST_INO]: |
1106 |
- return True |
1107 |
- return False |
1108 |
- |
1109 |
- def hardlink_active(lock): |
1110 |
- if not os.path.exists(lock): |
1111 |
- return False |
1112 |
- |
1113 |
- def clean_my_hardlocks(self): |
1114 |
- try: |
1115 |
- for x in self.hardlock_paths.keys(): |
1116 |
- self.hardlock_cleanup(x) |
1117 |
- except AttributeError: |
1118 |
- pass |
1119 |
- |
1120 |
- def hardlock_cleanup(self,path): |
1121 |
- mypid = str(os.getpid()) |
1122 |
- myhost = os.uname()[1] |
1123 |
- mydl = os.listdir(path) |
1124 |
- results = [] |
1125 |
- mycount = 0 |
1126 |
- |
1127 |
- mylist = {} |
1128 |
- for x in mydl: |
1129 |
- filepath=path+"/"+x |
1130 |
- if os.path.isfile(filepath): |
1131 |
- parts = filepath.split(".hardlock-") |
1132 |
- if len(parts) == 2: |
1133 |
- filename = parts[0] |
1134 |
- hostpid = parts[1].split("-") |
1135 |
- host = "-".join(hostpid[:-1]) |
1136 |
- pid = hostpid[-1] |
1137 |
- if filename not in mylist: |
1138 |
- mylist[filename] = {} |
1139 |
- |
1140 |
- if host not in mylist[filename]: |
1141 |
- mylist[filename][host] = [] |
1142 |
- mylist[filename][host].append(pid) |
1143 |
- mycount += 1 |
1144 |
- else: |
1145 |
- mylist[filename][host].append(pid) |
1146 |
- mycount += 1 |
1147 |
- |
1148 |
- |
1149 |
- results.append("Found %(count)s locks" % {"count":mycount}) |
1150 |
- for x in mylist.keys(): |
1151 |
- if myhost in mylist[x]: |
1152 |
- mylockname = self.hardlock_name(x) |
1153 |
- if self.hardlink_is_mine(mylockname, self.lockfile) or \ |
1154 |
- not os.path.exists(self.lockfile): |
1155 |
- for y in mylist[x].keys(): |
1156 |
- for z in mylist[x][y]: |
1157 |
- filename = x+".hardlock-"+y+"-"+z |
1158 |
- if filename == mylockname: |
1159 |
- self.hard_unlock() |
1160 |
- continue |
1161 |
- try: |
1162 |
- # We're sweeping through, unlinking everyone's locks. |
1163 |
- os.unlink(filename) |
1164 |
- results.append("Unlinked: " + filename) |
1165 |
- except SystemExit, e: |
1166 |
- raise |
1167 |
- except Exception,e: |
1168 |
- pass |
1169 |
- try: |
1170 |
- os.unlink(x) |
1171 |
- results.append("Unlinked: " + x) |
1172 |
- os.unlink(mylockname) |
1173 |
- results.append("Unlinked: " + mylockname) |
1174 |
- except SystemExit, e: |
1175 |
- raise |
1176 |
- except Exception,e: |
1177 |
- pass |
1178 |
- else: |
1179 |
- try: |
1180 |
- os.unlink(mylockname) |
1181 |
- results.append("Unlinked: " + mylockname) |
1182 |
- except SystemExit, e: |
1183 |
- raise |
1184 |
- except Exception,e: |
1185 |
- pass |
1186 |
- return results |
1187 |
- |
1188 |
-if __name__ == "__main__": |
1189 |
- |
1190 |
- def lock_work(): |
1191 |
- print |
1192 |
- for i in range(1,6): |
1193 |
- print i,time.time() |
1194 |
- time.sleep(1) |
1195 |
- print |
1196 |
- def normpath(mypath): |
1197 |
- newpath = os.path.normpath(mypath) |
1198 |
- if len(newpath) > 1: |
1199 |
- if newpath[1] == "/": |
1200 |
- newpath = "/"+newpath.lstrip("/") |
1201 |
- return newpath |
1202 |
- |
1203 |
- print "Lock 5 starting" |
1204 |
- import time |
1205 |
- Lock1=LockDir("/tmp/lock_path") |
1206 |
- Lock1.write_lock() |
1207 |
- print "Lock1 write lock" |
1208 |
- |
1209 |
- lock_work() |
1210 |
- |
1211 |
- Lock1.unlock() |
1212 |
- print "Lock1 unlock" |
1213 |
- |
1214 |
- Lock1.read_lock() |
1215 |
- print "Lock1 read lock" |
1216 |
- |
1217 |
- lock_work() |
1218 |
- |
1219 |
- Lock1.unlock() |
1220 |
- print "Lock1 unlock" |
1221 |
- |
1222 |
- Lock1.read_lock() |
1223 |
- print "Lock1 read lock" |
1224 |
- |
1225 |
- Lock1.write_lock() |
1226 |
- print "Lock1 write lock" |
1227 |
- |
1228 |
- lock_work() |
1229 |
- |
1230 |
- Lock1.unlock() |
1231 |
- print "Lock1 unlock" |
1232 |
- |
1233 |
- Lock1.read_lock() |
1234 |
- print "Lock1 read lock" |
1235 |
- |
1236 |
- lock_work() |
1237 |
- |
1238 |
- Lock1.unlock() |
1239 |
- print "Lock1 unlock" |
1240 |
- |
1241 |
-#Lock1.write_lock() |
1242 |
-#time.sleep(2) |
1243 |
-#Lock1.unlock() |
1244 |
- ##Lock1.write_lock() |
1245 |
- #time.sleep(2) |
1246 |
- #Lock1.unlock() |
1247 |
diff --git a/catalyst/modules/catalyst_support.py b/catalyst/modules/catalyst_support.py |
1248 |
deleted file mode 100644 |
1249 |
index 316dfa3..0000000 |
1250 |
--- a/catalyst/modules/catalyst_support.py |
1251 |
+++ /dev/null |
1252 |
@@ -1,718 +0,0 @@ |
1253 |
- |
1254 |
-import sys,string,os,types,re,signal,traceback,time |
1255 |
-#import md5,sha |
1256 |
-selinux_capable = False |
1257 |
-#userpriv_capable = (os.getuid() == 0) |
1258 |
-#fakeroot_capable = False |
1259 |
-BASH_BINARY = "/bin/bash" |
1260 |
- |
1261 |
-try: |
1262 |
- import resource |
1263 |
- max_fd_limit=resource.getrlimit(RLIMIT_NOFILE) |
1264 |
-except SystemExit, e: |
1265 |
- raise |
1266 |
-except: |
1267 |
- # hokay, no resource module. |
1268 |
- max_fd_limit=256 |
1269 |
- |
1270 |
-# pids this process knows of. |
1271 |
-spawned_pids = [] |
1272 |
- |
1273 |
-try: |
1274 |
- import urllib |
1275 |
-except SystemExit, e: |
1276 |
- raise |
1277 |
- |
1278 |
-def cleanup(pids,block_exceptions=True): |
1279 |
- """function to go through and reap the list of pids passed to it""" |
1280 |
- global spawned_pids |
1281 |
- if type(pids) == int: |
1282 |
- pids = [pids] |
1283 |
- for x in pids: |
1284 |
- try: |
1285 |
- os.kill(x,signal.SIGTERM) |
1286 |
- if os.waitpid(x,os.WNOHANG)[1] == 0: |
1287 |
- # feisty bugger, still alive. |
1288 |
- os.kill(x,signal.SIGKILL) |
1289 |
- os.waitpid(x,0) |
1290 |
- |
1291 |
- except OSError, oe: |
1292 |
- if block_exceptions: |
1293 |
- pass |
1294 |
- if oe.errno not in (10,3): |
1295 |
- raise oe |
1296 |
- except SystemExit: |
1297 |
- raise |
1298 |
- except Exception: |
1299 |
- if block_exceptions: |
1300 |
- pass |
1301 |
- try: spawned_pids.remove(x) |
1302 |
- except IndexError: pass |
1303 |
- |
1304 |
- |
1305 |
- |
1306 |
-# a function to turn a string of non-printable characters into a string of |
1307 |
-# hex characters |
1308 |
-def hexify(str): |
1309 |
- hexStr = string.hexdigits |
1310 |
- r = '' |
1311 |
- for ch in str: |
1312 |
- i = ord(ch) |
1313 |
- r = r + hexStr[(i >> 4) & 0xF] + hexStr[i & 0xF] |
1314 |
- return r |
1315 |
-# hexify() |
1316 |
- |
1317 |
-def generate_contents(file,contents_function="auto",verbose=False): |
1318 |
- try: |
1319 |
- _ = contents_function |
1320 |
- if _ == 'auto' and file.endswith('.iso'): |
1321 |
- _ = 'isoinfo-l' |
1322 |
- if (_ in ['tar-tv','auto']): |
1323 |
- if file.endswith('.tgz') or file.endswith('.tar.gz'): |
1324 |
- _ = 'tar-tvz' |
1325 |
- elif file.endswith('.tbz2') or file.endswith('.tar.bz2'): |
1326 |
- _ = 'tar-tvj' |
1327 |
- elif file.endswith('.tar'): |
1328 |
- _ = 'tar-tv' |
1329 |
- |
1330 |
- if _ == 'auto': |
1331 |
- warn('File %r has unknown type for automatic detection.' % (file, )) |
1332 |
- return None |
1333 |
- else: |
1334 |
- contents_function = _ |
1335 |
- _ = contents_map[contents_function] |
1336 |
- return _[0](file,_[1],verbose) |
1337 |
- except: |
1338 |
- raise CatalystError,\ |
1339 |
- "Error generating contents, is appropriate utility (%s) installed on your system?" \ |
1340 |
- % (contents_function, ) |
1341 |
- |
1342 |
-def calc_contents(file,cmd,verbose): |
1343 |
- args={ 'file': file } |
1344 |
- cmd=cmd % dict(args) |
1345 |
- a=os.popen(cmd) |
1346 |
- mylines=a.readlines() |
1347 |
- a.close() |
1348 |
- result="".join(mylines) |
1349 |
- if verbose: |
1350 |
- print result |
1351 |
- return result |
1352 |
- |
1353 |
-# This has map must be defined after the function calc_content |
1354 |
-# It is possible to call different functions from this but they must be defined |
1355 |
-# before hash_map |
1356 |
-# Key,function,cmd |
1357 |
-contents_map={ |
1358 |
- # 'find' is disabled because it requires the source path, which is not |
1359 |
- # always available |
1360 |
- #"find" :[calc_contents,"find %(path)s"], |
1361 |
- "tar-tv":[calc_contents,"tar tvf %(file)s"], |
1362 |
- "tar-tvz":[calc_contents,"tar tvzf %(file)s"], |
1363 |
- "tar-tvj":[calc_contents,"tar -I lbzip2 -tvf %(file)s"], |
1364 |
- "isoinfo-l":[calc_contents,"isoinfo -l -i %(file)s"], |
1365 |
- # isoinfo-f should be a last resort only |
1366 |
- "isoinfo-f":[calc_contents,"isoinfo -f -i %(file)s"], |
1367 |
-} |
1368 |
- |
1369 |
-def generate_hash(file,hash_function="crc32",verbose=False): |
1370 |
- try: |
1371 |
- return hash_map[hash_function][0](file,hash_map[hash_function][1],hash_map[hash_function][2],\ |
1372 |
- hash_map[hash_function][3],verbose) |
1373 |
- except: |
1374 |
- raise CatalystError,"Error generating hash, is appropriate utility installed on your system?" |
1375 |
- |
1376 |
-def calc_hash(file,cmd,cmd_args,id_string="MD5",verbose=False): |
1377 |
- a=os.popen(cmd+" "+cmd_args+" "+file) |
1378 |
- mylines=a.readlines() |
1379 |
- a.close() |
1380 |
- mylines=mylines[0].split() |
1381 |
- result=mylines[0] |
1382 |
- if verbose: |
1383 |
- print id_string+" (%s) = %s" % (file, result) |
1384 |
- return result |
1385 |
- |
1386 |
-def calc_hash2(file,cmd,cmd_args,id_string="MD5",verbose=False): |
1387 |
- a=os.popen(cmd+" "+cmd_args+" "+file) |
1388 |
- header=a.readline() |
1389 |
- mylines=a.readline().split() |
1390 |
- hash=mylines[0] |
1391 |
- short_file=os.path.split(mylines[1])[1] |
1392 |
- a.close() |
1393 |
- result=header+hash+" "+short_file+"\n" |
1394 |
- if verbose: |
1395 |
- print header+" (%s) = %s" % (short_file, result) |
1396 |
- return result |
1397 |
- |
1398 |
-# This has map must be defined after the function calc_hash |
1399 |
-# It is possible to call different functions from this but they must be defined |
1400 |
-# before hash_map |
1401 |
-# Key,function,cmd,cmd_args,Print string |
1402 |
-hash_map={ |
1403 |
- "adler32":[calc_hash2,"shash","-a ADLER32","ADLER32"],\ |
1404 |
- "crc32":[calc_hash2,"shash","-a CRC32","CRC32"],\ |
1405 |
- "crc32b":[calc_hash2,"shash","-a CRC32B","CRC32B"],\ |
1406 |
- "gost":[calc_hash2,"shash","-a GOST","GOST"],\ |
1407 |
- "haval128":[calc_hash2,"shash","-a HAVAL128","HAVAL128"],\ |
1408 |
- "haval160":[calc_hash2,"shash","-a HAVAL160","HAVAL160"],\ |
1409 |
- "haval192":[calc_hash2,"shash","-a HAVAL192","HAVAL192"],\ |
1410 |
- "haval224":[calc_hash2,"shash","-a HAVAL224","HAVAL224"],\ |
1411 |
- "haval256":[calc_hash2,"shash","-a HAVAL256","HAVAL256"],\ |
1412 |
- "md2":[calc_hash2,"shash","-a MD2","MD2"],\ |
1413 |
- "md4":[calc_hash2,"shash","-a MD4","MD4"],\ |
1414 |
- "md5":[calc_hash2,"shash","-a MD5","MD5"],\ |
1415 |
- "ripemd128":[calc_hash2,"shash","-a RIPEMD128","RIPEMD128"],\ |
1416 |
- "ripemd160":[calc_hash2,"shash","-a RIPEMD160","RIPEMD160"],\ |
1417 |
- "ripemd256":[calc_hash2,"shash","-a RIPEMD256","RIPEMD256"],\ |
1418 |
- "ripemd320":[calc_hash2,"shash","-a RIPEMD320","RIPEMD320"],\ |
1419 |
- "sha1":[calc_hash2,"shash","-a SHA1","SHA1"],\ |
1420 |
- "sha224":[calc_hash2,"shash","-a SHA224","SHA224"],\ |
1421 |
- "sha256":[calc_hash2,"shash","-a SHA256","SHA256"],\ |
1422 |
- "sha384":[calc_hash2,"shash","-a SHA384","SHA384"],\ |
1423 |
- "sha512":[calc_hash2,"shash","-a SHA512","SHA512"],\ |
1424 |
- "snefru128":[calc_hash2,"shash","-a SNEFRU128","SNEFRU128"],\ |
1425 |
- "snefru256":[calc_hash2,"shash","-a SNEFRU256","SNEFRU256"],\ |
1426 |
- "tiger":[calc_hash2,"shash","-a TIGER","TIGER"],\ |
1427 |
- "tiger128":[calc_hash2,"shash","-a TIGER128","TIGER128"],\ |
1428 |
- "tiger160":[calc_hash2,"shash","-a TIGER160","TIGER160"],\ |
1429 |
- "whirlpool":[calc_hash2,"shash","-a WHIRLPOOL","WHIRLPOOL"],\ |
1430 |
- } |
1431 |
- |
1432 |
-def read_from_clst(file): |
1433 |
- line = '' |
1434 |
- myline = '' |
1435 |
- try: |
1436 |
- myf=open(file,"r") |
1437 |
- except: |
1438 |
- return -1 |
1439 |
- #raise CatalystError, "Could not open file "+file |
1440 |
- for line in myf.readlines(): |
1441 |
- #line = string.replace(line, "\n", "") # drop newline |
1442 |
- myline = myline + line |
1443 |
- myf.close() |
1444 |
- return myline |
1445 |
-# read_from_clst |
1446 |
- |
1447 |
-# these should never be touched |
1448 |
-required_build_targets=["generic_target","generic_stage_target"] |
1449 |
- |
1450 |
-# new build types should be added here |
1451 |
-valid_build_targets=["stage1_target","stage2_target","stage3_target","stage4_target","grp_target", |
1452 |
- "livecd_stage1_target","livecd_stage2_target","embedded_target", |
1453 |
- "tinderbox_target","snapshot_target","netboot_target","netboot2_target"] |
1454 |
- |
1455 |
-required_config_file_values=["storedir","sharedir","distdir","portdir"] |
1456 |
-valid_config_file_values=required_config_file_values[:] |
1457 |
-valid_config_file_values.append("PKGCACHE") |
1458 |
-valid_config_file_values.append("KERNCACHE") |
1459 |
-valid_config_file_values.append("CCACHE") |
1460 |
-valid_config_file_values.append("DISTCC") |
1461 |
-valid_config_file_values.append("ICECREAM") |
1462 |
-valid_config_file_values.append("ENVSCRIPT") |
1463 |
-valid_config_file_values.append("AUTORESUME") |
1464 |
-valid_config_file_values.append("FETCH") |
1465 |
-valid_config_file_values.append("CLEAR_AUTORESUME") |
1466 |
-valid_config_file_values.append("options") |
1467 |
-valid_config_file_values.append("DEBUG") |
1468 |
-valid_config_file_values.append("VERBOSE") |
1469 |
-valid_config_file_values.append("PURGE") |
1470 |
-valid_config_file_values.append("PURGEONLY") |
1471 |
-valid_config_file_values.append("SNAPCACHE") |
1472 |
-valid_config_file_values.append("snapshot_cache") |
1473 |
-valid_config_file_values.append("hash_function") |
1474 |
-valid_config_file_values.append("digests") |
1475 |
-valid_config_file_values.append("contents") |
1476 |
-valid_config_file_values.append("SEEDCACHE") |
1477 |
- |
1478 |
-verbosity=1 |
1479 |
- |
1480 |
-def list_bashify(mylist): |
1481 |
- if type(mylist)==types.StringType: |
1482 |
- mypack=[mylist] |
1483 |
- else: |
1484 |
- mypack=mylist[:] |
1485 |
- for x in range(0,len(mypack)): |
1486 |
- # surround args with quotes for passing to bash, |
1487 |
- # allows things like "<" to remain intact |
1488 |
- mypack[x]="'"+mypack[x]+"'" |
1489 |
- mypack=string.join(mypack) |
1490 |
- return mypack |
1491 |
- |
1492 |
-def list_to_string(mylist): |
1493 |
- if type(mylist)==types.StringType: |
1494 |
- mypack=[mylist] |
1495 |
- else: |
1496 |
- mypack=mylist[:] |
1497 |
- for x in range(0,len(mypack)): |
1498 |
- # surround args with quotes for passing to bash, |
1499 |
- # allows things like "<" to remain intact |
1500 |
- mypack[x]=mypack[x] |
1501 |
- mypack=string.join(mypack) |
1502 |
- return mypack |
1503 |
- |
1504 |
-class CatalystError(Exception): |
1505 |
- def __init__(self, message): |
1506 |
- if message: |
1507 |
- (type,value)=sys.exc_info()[:2] |
1508 |
- if value!=None: |
1509 |
- print |
1510 |
- print traceback.print_exc(file=sys.stdout) |
1511 |
- print |
1512 |
- print "!!! catalyst: "+message |
1513 |
- print |
1514 |
- |
1515 |
-class LockInUse(Exception): |
1516 |
- def __init__(self, message): |
1517 |
- if message: |
1518 |
- #(type,value)=sys.exc_info()[:2] |
1519 |
- #if value!=None: |
1520 |
- #print |
1521 |
- #kprint traceback.print_exc(file=sys.stdout) |
1522 |
- print |
1523 |
- print "!!! catalyst lock file in use: "+message |
1524 |
- print |
1525 |
- |
1526 |
-def die(msg=None): |
1527 |
- warn(msg) |
1528 |
- sys.exit(1) |
1529 |
- |
1530 |
-def warn(msg): |
1531 |
- print "!!! catalyst: "+msg |
1532 |
- |
1533 |
-def find_binary(myc): |
1534 |
- """look through the environmental path for an executable file named whatever myc is""" |
1535 |
- # this sucks. badly. |
1536 |
- p=os.getenv("PATH") |
1537 |
- if p == None: |
1538 |
- return None |
1539 |
- for x in p.split(":"): |
1540 |
- #if it exists, and is executable |
1541 |
- if os.path.exists("%s/%s" % (x,myc)) and os.stat("%s/%s" % (x,myc))[0] & 0x0248: |
1542 |
- return "%s/%s" % (x,myc) |
1543 |
- return None |
1544 |
- |
1545 |
-def spawn_bash(mycommand,env={},debug=False,opt_name=None,**keywords): |
1546 |
- """spawn mycommand as an arguement to bash""" |
1547 |
- args=[BASH_BINARY] |
1548 |
- if not opt_name: |
1549 |
- opt_name=mycommand.split()[0] |
1550 |
- if "BASH_ENV" not in env: |
1551 |
- env["BASH_ENV"] = "/etc/spork/is/not/valid/profile.env" |
1552 |
- if debug: |
1553 |
- args.append("-x") |
1554 |
- args.append("-c") |
1555 |
- args.append(mycommand) |
1556 |
- return spawn(args,env=env,opt_name=opt_name,**keywords) |
1557 |
- |
1558 |
-#def spawn_get_output(mycommand,spawn_type=spawn,raw_exit_code=False,emulate_gso=True, \ |
1559 |
-# collect_fds=[1],fd_pipes=None,**keywords): |
1560 |
- |
1561 |
-def spawn_get_output(mycommand,raw_exit_code=False,emulate_gso=True, \ |
1562 |
- collect_fds=[1],fd_pipes=None,**keywords): |
1563 |
- """call spawn, collecting the output to fd's specified in collect_fds list |
1564 |
- emulate_gso is a compatability hack to emulate commands.getstatusoutput's return, minus the |
1565 |
- requirement it always be a bash call (spawn_type controls the actual spawn call), and minus the |
1566 |
- 'lets let log only stdin and let stderr slide by'. |
1567 |
- |
1568 |
- emulate_gso was deprecated from the day it was added, so convert your code over. |
1569 |
- spawn_type is the passed in function to call- typically spawn_bash, spawn, spawn_sandbox, or spawn_fakeroot""" |
1570 |
- global selinux_capable |
1571 |
- pr,pw=os.pipe() |
1572 |
- |
1573 |
- #if type(spawn_type) not in [types.FunctionType, types.MethodType]: |
1574 |
- # s="spawn_type must be passed a function, not",type(spawn_type),spawn_type |
1575 |
- # raise Exception,s |
1576 |
- |
1577 |
- if fd_pipes==None: |
1578 |
- fd_pipes={} |
1579 |
- fd_pipes[0] = 0 |
1580 |
- |
1581 |
- for x in collect_fds: |
1582 |
- fd_pipes[x] = pw |
1583 |
- keywords["returnpid"]=True |
1584 |
- |
1585 |
- mypid=spawn_bash(mycommand,fd_pipes=fd_pipes,**keywords) |
1586 |
- os.close(pw) |
1587 |
- if type(mypid) != types.ListType: |
1588 |
- os.close(pr) |
1589 |
- return [mypid, "%s: No such file or directory" % mycommand.split()[0]] |
1590 |
- |
1591 |
- fd=os.fdopen(pr,"r") |
1592 |
- mydata=fd.readlines() |
1593 |
- fd.close() |
1594 |
- if emulate_gso: |
1595 |
- mydata=string.join(mydata) |
1596 |
- if len(mydata) and mydata[-1] == "\n": |
1597 |
- mydata=mydata[:-1] |
1598 |
- retval=os.waitpid(mypid[0],0)[1] |
1599 |
- cleanup(mypid) |
1600 |
- if raw_exit_code: |
1601 |
- return [retval,mydata] |
1602 |
- retval=process_exit_code(retval) |
1603 |
- return [retval, mydata] |
1604 |
- |
1605 |
-# base spawn function |
1606 |
-def spawn(mycommand,env={},raw_exit_code=False,opt_name=None,fd_pipes=None,returnpid=False,\ |
1607 |
- uid=None,gid=None,groups=None,umask=None,logfile=None,path_lookup=True,\ |
1608 |
- selinux_context=None, raise_signals=False, func_call=False): |
1609 |
- """base fork/execve function. |
1610 |
- mycommand is the desired command- if you need a command to execute in a bash/sandbox/fakeroot |
1611 |
- environment, use the appropriate spawn call. This is a straight fork/exec code path. |
1612 |
- Can either have a tuple, or a string passed in. If uid/gid/groups/umask specified, it changes |
1613 |
- the forked process to said value. If path_lookup is on, a non-absolute command will be converted |
1614 |
- to an absolute command, otherwise it returns None. |
1615 |
- |
1616 |
- selinux_context is the desired context, dependant on selinux being available. |
1617 |
- opt_name controls the name the processor goes by. |
1618 |
- fd_pipes controls which file descriptor numbers are left open in the forked process- it's a dict of |
1619 |
- current fd's raw fd #, desired #. |
1620 |
- |
1621 |
- func_call is a boolean for specifying to execute a python function- use spawn_func instead. |
1622 |
- raise_signals is questionable. Basically throw an exception if signal'd. No exception is thrown |
1623 |
- if raw_input is on. |
1624 |
- |
1625 |
- logfile overloads the specified fd's to write to a tee process which logs to logfile |
1626 |
- returnpid returns the relevant pids (a list, including the logging process if logfile is on). |
1627 |
- |
1628 |
- non-returnpid calls to spawn will block till the process has exited, returning the exitcode/signal |
1629 |
- raw_exit_code controls whether the actual waitpid result is returned, or intrepretted.""" |
1630 |
- |
1631 |
- myc='' |
1632 |
- if not func_call: |
1633 |
- if type(mycommand)==types.StringType: |
1634 |
- mycommand=mycommand.split() |
1635 |
- myc = mycommand[0] |
1636 |
- if not os.access(myc, os.X_OK): |
1637 |
- if not path_lookup: |
1638 |
- return None |
1639 |
- myc = find_binary(myc) |
1640 |
- if myc == None: |
1641 |
- return None |
1642 |
- mypid=[] |
1643 |
- if logfile: |
1644 |
- pr,pw=os.pipe() |
1645 |
- mypid.extend(spawn(('tee','-i','-a',logfile),returnpid=True,fd_pipes={0:pr,1:1,2:2})) |
1646 |
- retval=os.waitpid(mypid[-1],os.WNOHANG)[1] |
1647 |
- if retval != 0: |
1648 |
- # he's dead jim. |
1649 |
- if raw_exit_code: |
1650 |
- return retval |
1651 |
- return process_exit_code(retval) |
1652 |
- |
1653 |
- if fd_pipes == None: |
1654 |
- fd_pipes={} |
1655 |
- fd_pipes[0] = 0 |
1656 |
- fd_pipes[1]=pw |
1657 |
- fd_pipes[2]=pw |
1658 |
- |
1659 |
- if not opt_name: |
1660 |
- opt_name = mycommand[0] |
1661 |
- myargs=[opt_name] |
1662 |
- myargs.extend(mycommand[1:]) |
1663 |
- global spawned_pids |
1664 |
- mypid.append(os.fork()) |
1665 |
- if mypid[-1] != 0: |
1666 |
- #log the bugger. |
1667 |
- spawned_pids.extend(mypid) |
1668 |
- |
1669 |
- if mypid[-1] == 0: |
1670 |
- if func_call: |
1671 |
- spawned_pids = [] |
1672 |
- |
1673 |
- # this may look ugly, but basically it moves file descriptors around to ensure no |
1674 |
- # handles that are needed are accidentally closed during the final dup2 calls. |
1675 |
- trg_fd=[] |
1676 |
- if type(fd_pipes)==types.DictType: |
1677 |
- src_fd=[] |
1678 |
- k=fd_pipes.keys() |
1679 |
- k.sort() |
1680 |
- |
1681 |
- #build list of which fds will be where, and where they are at currently |
1682 |
- for x in k: |
1683 |
- trg_fd.append(x) |
1684 |
- src_fd.append(fd_pipes[x]) |
1685 |
- |
1686 |
- # run through said list dup'ing descriptors so that they won't be waxed |
1687 |
- # by other dup calls. |
1688 |
- for x in range(0,len(trg_fd)): |
1689 |
- if trg_fd[x] == src_fd[x]: |
1690 |
- continue |
1691 |
- if trg_fd[x] in src_fd[x+1:]: |
1692 |
- new=os.dup2(trg_fd[x],max(src_fd) + 1) |
1693 |
- os.close(trg_fd[x]) |
1694 |
- try: |
1695 |
- while True: |
1696 |
- src_fd[s.index(trg_fd[x])]=new |
1697 |
- except SystemExit, e: |
1698 |
- raise |
1699 |
- except: |
1700 |
- pass |
1701 |
- |
1702 |
- # transfer the fds to their final pre-exec position. |
1703 |
- for x in range(0,len(trg_fd)): |
1704 |
- if trg_fd[x] != src_fd[x]: |
1705 |
- os.dup2(src_fd[x], trg_fd[x]) |
1706 |
- else: |
1707 |
- trg_fd=[0,1,2] |
1708 |
- |
1709 |
- # wax all open descriptors that weren't requested be left open. |
1710 |
- for x in range(0,max_fd_limit): |
1711 |
- if x not in trg_fd: |
1712 |
- try: |
1713 |
- os.close(x) |
1714 |
- except SystemExit, e: |
1715 |
- raise |
1716 |
- except: |
1717 |
- pass |
1718 |
- |
1719 |
- # note this order must be preserved- can't change gid/groups if you change uid first. |
1720 |
- if selinux_capable and selinux_context: |
1721 |
- import selinux |
1722 |
- selinux.setexec(selinux_context) |
1723 |
- if gid: |
1724 |
- os.setgid(gid) |
1725 |
- if groups: |
1726 |
- os.setgroups(groups) |
1727 |
- if uid: |
1728 |
- os.setuid(uid) |
1729 |
- if umask: |
1730 |
- os.umask(umask) |
1731 |
- else: |
1732 |
- os.umask(022) |
1733 |
- |
1734 |
- try: |
1735 |
- #print "execing", myc, myargs |
1736 |
- if func_call: |
1737 |
- # either use a passed in func for interpretting the results, or return if no exception. |
1738 |
- # note the passed in list, and dict are expanded. |
1739 |
- if len(mycommand) == 4: |
1740 |
- os._exit(mycommand[3](mycommand[0](*mycommand[1],**mycommand[2]))) |
1741 |
- try: |
1742 |
- mycommand[0](*mycommand[1],**mycommand[2]) |
1743 |
- except Exception,e: |
1744 |
- print "caught exception",e," in forked func",mycommand[0] |
1745 |
- sys.exit(0) |
1746 |
- |
1747 |
- #os.execvp(myc,myargs) |
1748 |
- os.execve(myc,myargs,env) |
1749 |
- except SystemExit, e: |
1750 |
- raise |
1751 |
- except Exception, e: |
1752 |
- if not func_call: |
1753 |
- raise str(e)+":\n "+myc+" "+string.join(myargs) |
1754 |
- print "func call failed" |
1755 |
- |
1756 |
- # If the execve fails, we need to report it, and exit |
1757 |
- # *carefully* --- report error here |
1758 |
- os._exit(1) |
1759 |
- sys.exit(1) |
1760 |
- return # should never get reached |
1761 |
- |
1762 |
- # if we were logging, kill the pipes. |
1763 |
- if logfile: |
1764 |
- os.close(pr) |
1765 |
- os.close(pw) |
1766 |
- |
1767 |
- if returnpid: |
1768 |
- return mypid |
1769 |
- |
1770 |
- # loop through pids (typically one, unless logging), either waiting on their death, or waxing them |
1771 |
- # if the main pid (mycommand) returned badly. |
1772 |
- while len(mypid): |
1773 |
- retval=os.waitpid(mypid[-1],0)[1] |
1774 |
- if retval != 0: |
1775 |
- cleanup(mypid[0:-1],block_exceptions=False) |
1776 |
- # at this point we've killed all other kid pids generated via this call. |
1777 |
- # return now. |
1778 |
- if raw_exit_code: |
1779 |
- return retval |
1780 |
- return process_exit_code(retval,throw_signals=raise_signals) |
1781 |
- else: |
1782 |
- mypid.pop(-1) |
1783 |
- cleanup(mypid) |
1784 |
- return 0 |
1785 |
- |
1786 |
-def cmd(mycmd,myexc="",env={}): |
1787 |
- try: |
1788 |
- sys.stdout.flush() |
1789 |
- retval=spawn_bash(mycmd,env) |
1790 |
- if retval != 0: |
1791 |
- raise CatalystError,myexc |
1792 |
- except: |
1793 |
- raise |
1794 |
- |
1795 |
-def process_exit_code(retval,throw_signals=False): |
1796 |
- """process a waitpid returned exit code, returning exit code if it exit'd, or the |
1797 |
- signal if it died from signalling |
1798 |
- if throw_signals is on, it raises a SystemExit if the process was signaled. |
1799 |
- This is intended for usage with threads, although at the moment you can't signal individual |
1800 |
- threads in python, only the master thread, so it's a questionable option.""" |
1801 |
- if (retval & 0xff)==0: |
1802 |
- return retval >> 8 # return exit code |
1803 |
- else: |
1804 |
- if throw_signals: |
1805 |
- #use systemexit, since portage is stupid about exception catching. |
1806 |
- raise SystemExit() |
1807 |
- return (retval & 0xff) << 8 # interrupted by signal |
1808 |
- |
1809 |
-def file_locate(settings,filelist,expand=1): |
1810 |
- #if expand=1, non-absolute paths will be accepted and |
1811 |
- # expanded to os.getcwd()+"/"+localpath if file exists |
1812 |
- for myfile in filelist: |
1813 |
- if myfile not in settings: |
1814 |
- #filenames such as cdtar are optional, so we don't assume the variable is defined. |
1815 |
- pass |
1816 |
- else: |
1817 |
- if len(settings[myfile])==0: |
1818 |
- raise CatalystError, "File variable \""+myfile+"\" has a length of zero (not specified.)" |
1819 |
- if settings[myfile][0]=="/": |
1820 |
- if not os.path.exists(settings[myfile]): |
1821 |
- raise CatalystError, "Cannot locate specified "+myfile+": "+settings[myfile] |
1822 |
- elif expand and os.path.exists(os.getcwd()+"/"+settings[myfile]): |
1823 |
- settings[myfile]=os.getcwd()+"/"+settings[myfile] |
1824 |
- else: |
1825 |
- raise CatalystError, "Cannot locate specified "+myfile+": "+settings[myfile]+" (2nd try)" |
1826 |
-""" |
1827 |
-Spec file format: |
1828 |
- |
1829 |
-The spec file format is a very simple and easy-to-use format for storing data. Here's an example |
1830 |
-file: |
1831 |
- |
1832 |
-item1: value1 |
1833 |
-item2: foo bar oni |
1834 |
-item3: |
1835 |
- meep |
1836 |
- bark |
1837 |
- gleep moop |
1838 |
- |
1839 |
-This file would be interpreted as defining three items: item1, item2 and item3. item1 would contain |
1840 |
-the string value "value1". Item2 would contain an ordered list [ "foo", "bar", "oni" ]. item3 |
1841 |
-would contain an ordered list as well: [ "meep", "bark", "gleep", "moop" ]. It's important to note |
1842 |
-that the order of multiple-value items is preserved, but the order that the items themselves are |
1843 |
-defined are not preserved. In other words, "foo", "bar", "oni" ordering is preserved but "item1" |
1844 |
-"item2" "item3" ordering is not, as the item strings are stored in a dictionary (hash). |
1845 |
-""" |
1846 |
- |
1847 |
-def parse_makeconf(mylines): |
1848 |
- mymakeconf={} |
1849 |
- pos=0 |
1850 |
- pat=re.compile("([0-9a-zA-Z_]*)=(.*)") |
1851 |
- while pos<len(mylines): |
1852 |
- if len(mylines[pos])<=1: |
1853 |
- #skip blanks |
1854 |
- pos += 1 |
1855 |
- continue |
1856 |
- if mylines[pos][0] in ["#"," ","\t"]: |
1857 |
- #skip indented lines, comments |
1858 |
- pos += 1 |
1859 |
- continue |
1860 |
- else: |
1861 |
- myline=mylines[pos] |
1862 |
- mobj=pat.match(myline) |
1863 |
- pos += 1 |
1864 |
- if mobj.group(2): |
1865 |
- clean_string = re.sub(r"\"",r"",mobj.group(2)) |
1866 |
- mymakeconf[mobj.group(1)]=clean_string |
1867 |
- return mymakeconf |
1868 |
- |
1869 |
-def read_makeconf(mymakeconffile): |
1870 |
- if os.path.exists(mymakeconffile): |
1871 |
- try: |
1872 |
- try: |
1873 |
- import snakeoil.fileutils |
1874 |
- return snakeoil.fileutils.read_bash_dict(mymakeconffile, sourcing_command="source") |
1875 |
- except ImportError: |
1876 |
- try: |
1877 |
- import portage.util |
1878 |
- return portage.util.getconfig(mymakeconffile, tolerant=1, allow_sourcing=True) |
1879 |
- except: |
1880 |
- try: |
1881 |
- import portage_util |
1882 |
- return portage_util.getconfig(mymakeconffile, tolerant=1, allow_sourcing=True) |
1883 |
- except ImportError: |
1884 |
- myf=open(mymakeconffile,"r") |
1885 |
- mylines=myf.readlines() |
1886 |
- myf.close() |
1887 |
- return parse_makeconf(mylines) |
1888 |
- except: |
1889 |
- raise CatalystError, "Could not parse make.conf file "+mymakeconffile |
1890 |
- else: |
1891 |
- makeconf={} |
1892 |
- return makeconf |
1893 |
- |
1894 |
-def msg(mymsg,verblevel=1): |
1895 |
- if verbosity>=verblevel: |
1896 |
- print mymsg |
1897 |
- |
1898 |
-def pathcompare(path1,path2): |
1899 |
- # Change double slashes to slash |
1900 |
- path1 = re.sub(r"//",r"/",path1) |
1901 |
- path2 = re.sub(r"//",r"/",path2) |
1902 |
- # Removing ending slash |
1903 |
- path1 = re.sub("/$","",path1) |
1904 |
- path2 = re.sub("/$","",path2) |
1905 |
- |
1906 |
- if path1 == path2: |
1907 |
- return 1 |
1908 |
- return 0 |
1909 |
- |
1910 |
-def ismount(path): |
1911 |
- "enhanced to handle bind mounts" |
1912 |
- if os.path.ismount(path): |
1913 |
- return 1 |
1914 |
- a=os.popen("mount") |
1915 |
- mylines=a.readlines() |
1916 |
- a.close() |
1917 |
- for line in mylines: |
1918 |
- mysplit=line.split() |
1919 |
- if pathcompare(path,mysplit[2]): |
1920 |
- return 1 |
1921 |
- return 0 |
1922 |
- |
1923 |
-def addl_arg_parse(myspec,addlargs,requiredspec,validspec): |
1924 |
- "helper function to help targets parse additional arguments" |
1925 |
- global valid_config_file_values |
1926 |
- |
1927 |
- messages = [] |
1928 |
- for x in addlargs.keys(): |
1929 |
- if x not in validspec and x not in valid_config_file_values and x not in requiredspec: |
1930 |
- messages.append("Argument \""+x+"\" not recognized.") |
1931 |
- else: |
1932 |
- myspec[x]=addlargs[x] |
1933 |
- |
1934 |
- for x in requiredspec: |
1935 |
- if x not in myspec: |
1936 |
- messages.append("Required argument \""+x+"\" not specified.") |
1937 |
- |
1938 |
- if messages: |
1939 |
- raise CatalystError, '\n\tAlso: '.join(messages) |
1940 |
- |
1941 |
-def touch(myfile): |
1942 |
- try: |
1943 |
- myf=open(myfile,"w") |
1944 |
- myf.close() |
1945 |
- except IOError: |
1946 |
- raise CatalystError, "Could not touch "+myfile+"." |
1947 |
- |
1948 |
-def countdown(secs=5, doing="Starting"): |
1949 |
- if secs: |
1950 |
- print ">>> Waiting",secs,"seconds before starting..." |
1951 |
- print ">>> (Control-C to abort)...\n"+doing+" in: ", |
1952 |
- ticks=range(secs) |
1953 |
- ticks.reverse() |
1954 |
- for sec in ticks: |
1955 |
- sys.stdout.write(str(sec+1)+" ") |
1956 |
- sys.stdout.flush() |
1957 |
- time.sleep(1) |
1958 |
- print |
1959 |
- |
1960 |
-def normpath(mypath): |
1961 |
- TrailingSlash=False |
1962 |
- if mypath[-1] == "/": |
1963 |
- TrailingSlash=True |
1964 |
- newpath = os.path.normpath(mypath) |
1965 |
- if len(newpath) > 1: |
1966 |
- if newpath[:2] == "//": |
1967 |
- newpath = newpath[1:] |
1968 |
- if TrailingSlash: |
1969 |
- newpath=newpath+'/' |
1970 |
- return newpath |
1971 |
diff --git a/catalyst/modules/embedded_target.py b/catalyst/modules/embedded_target.py |
1972 |
index f38ea00..7cee7a6 100644 |
1973 |
--- a/catalyst/modules/embedded_target.py |
1974 |
+++ b/catalyst/modules/embedded_target.py |
1975 |
@@ -11,7 +11,7 @@ ROOT=/tmp/submerge emerge --something foo bar . |
1976 |
# NOTE: That^^ docstring has influence catalyst-spec(5) man page generation. |
1977 |
|
1978 |
import os,string,imp,types,shutil |
1979 |
-from catalyst_support import * |
1980 |
+from catalyst.support import * |
1981 |
from generic_stage_target import * |
1982 |
from stat import * |
1983 |
|
1984 |
diff --git a/catalyst/modules/generic_stage_target.py b/catalyst/modules/generic_stage_target.py |
1985 |
index e99e652..5200d8a 100644 |
1986 |
--- a/catalyst/modules/generic_stage_target.py |
1987 |
+++ b/catalyst/modules/generic_stage_target.py |
1988 |
@@ -1,8 +1,8 @@ |
1989 |
import os,string,imp,types,shutil |
1990 |
-from catalyst_support import * |
1991 |
+from catalyst.support import * |
1992 |
from generic_target import * |
1993 |
from stat import * |
1994 |
-import catalyst_lock |
1995 |
+from catalyst.lock import LockDir |
1996 |
|
1997 |
class generic_stage_target(generic_target): |
1998 |
""" |
1999 |
@@ -431,7 +431,7 @@ class generic_stage_target(generic_target): |
2000 |
normpath(self.settings["snapshot_cache"]+"/"+\ |
2001 |
self.settings["snapshot"]+"/") |
2002 |
self.snapcache_lock=\ |
2003 |
- catalyst_lock.LockDir(self.settings["snapshot_cache_path"]) |
2004 |
+ LockDir(self.settings["snapshot_cache_path"]) |
2005 |
print "Caching snapshot to "+self.settings["snapshot_cache_path"] |
2006 |
|
2007 |
def set_chroot_path(self): |
2008 |
@@ -441,7 +441,7 @@ class generic_stage_target(generic_target): |
2009 |
""" |
2010 |
self.settings["chroot_path"]=normpath(self.settings["storedir"]+\ |
2011 |
"/tmp/"+self.settings["target_subpath"]+"/") |
2012 |
- self.chroot_lock=catalyst_lock.LockDir(self.settings["chroot_path"]) |
2013 |
+ self.chroot_lock=LockDir(self.settings["chroot_path"]) |
2014 |
|
2015 |
def set_autoresume_path(self): |
2016 |
self.settings["autoresume_path"]=normpath(self.settings["storedir"]+\ |
2017 |
diff --git a/catalyst/modules/generic_target.py b/catalyst/modules/generic_target.py |
2018 |
index fe96bd7..de51994 100644 |
2019 |
--- a/catalyst/modules/generic_target.py |
2020 |
+++ b/catalyst/modules/generic_target.py |
2021 |
@@ -1,4 +1,4 @@ |
2022 |
-from catalyst_support import * |
2023 |
+from catalyst.support import * |
2024 |
|
2025 |
class generic_target: |
2026 |
""" |
2027 |
diff --git a/catalyst/modules/grp_target.py b/catalyst/modules/grp_target.py |
2028 |
index 6941522..8e70042 100644 |
2029 |
--- a/catalyst/modules/grp_target.py |
2030 |
+++ b/catalyst/modules/grp_target.py |
2031 |
@@ -4,7 +4,7 @@ Gentoo Reference Platform (GRP) target |
2032 |
# NOTE: That^^ docstring has influence catalyst-spec(5) man page generation. |
2033 |
|
2034 |
import os,types,glob |
2035 |
-from catalyst_support import * |
2036 |
+from catalyst.support import * |
2037 |
from generic_stage_target import * |
2038 |
|
2039 |
class grp_target(generic_stage_target): |
2040 |
diff --git a/catalyst/modules/livecd_stage1_target.py b/catalyst/modules/livecd_stage1_target.py |
2041 |
index 59de9bb..ac846ec 100644 |
2042 |
--- a/catalyst/modules/livecd_stage1_target.py |
2043 |
+++ b/catalyst/modules/livecd_stage1_target.py |
2044 |
@@ -3,7 +3,7 @@ LiveCD stage1 target |
2045 |
""" |
2046 |
# NOTE: That^^ docstring has influence catalyst-spec(5) man page generation. |
2047 |
|
2048 |
-from catalyst_support import * |
2049 |
+from catalyst.support import * |
2050 |
from generic_stage_target import * |
2051 |
|
2052 |
class livecd_stage1_target(generic_stage_target): |
2053 |
diff --git a/catalyst/modules/livecd_stage2_target.py b/catalyst/modules/livecd_stage2_target.py |
2054 |
index 5be8fd2..1bfd820 100644 |
2055 |
--- a/catalyst/modules/livecd_stage2_target.py |
2056 |
+++ b/catalyst/modules/livecd_stage2_target.py |
2057 |
@@ -4,7 +4,7 @@ LiveCD stage2 target, builds upon previous LiveCD stage1 tarball |
2058 |
# NOTE: That^^ docstring has influence catalyst-spec(5) man page generation. |
2059 |
|
2060 |
import os,string,types,stat,shutil |
2061 |
-from catalyst_support import * |
2062 |
+from catalyst.support import * |
2063 |
from generic_stage_target import * |
2064 |
|
2065 |
class livecd_stage2_target(generic_stage_target): |
2066 |
diff --git a/catalyst/modules/netboot2_target.py b/catalyst/modules/netboot2_target.py |
2067 |
index 1ab7e7d..2b3cd20 100644 |
2068 |
--- a/catalyst/modules/netboot2_target.py |
2069 |
+++ b/catalyst/modules/netboot2_target.py |
2070 |
@@ -4,7 +4,7 @@ netboot target, version 2 |
2071 |
# NOTE: That^^ docstring has influence catalyst-spec(5) man page generation. |
2072 |
|
2073 |
import os,string,types |
2074 |
-from catalyst_support import * |
2075 |
+from catalyst.support import * |
2076 |
from generic_stage_target import * |
2077 |
|
2078 |
class netboot2_target(generic_stage_target): |
2079 |
diff --git a/catalyst/modules/netboot_target.py b/catalyst/modules/netboot_target.py |
2080 |
index ff2c81f..9d01b7e 100644 |
2081 |
--- a/catalyst/modules/netboot_target.py |
2082 |
+++ b/catalyst/modules/netboot_target.py |
2083 |
@@ -4,7 +4,7 @@ netboot target, version 1 |
2084 |
# NOTE: That^^ docstring has influence catalyst-spec(5) man page generation. |
2085 |
|
2086 |
import os,string,types |
2087 |
-from catalyst_support import * |
2088 |
+from catalyst.support import * |
2089 |
from generic_stage_target import * |
2090 |
|
2091 |
class netboot_target(generic_stage_target): |
2092 |
diff --git a/catalyst/modules/snapshot_target.py b/catalyst/modules/snapshot_target.py |
2093 |
index 29d6e87..e21bd1a 100644 |
2094 |
--- a/catalyst/modules/snapshot_target.py |
2095 |
+++ b/catalyst/modules/snapshot_target.py |
2096 |
@@ -3,7 +3,7 @@ Snapshot target |
2097 |
""" |
2098 |
|
2099 |
import os |
2100 |
-from catalyst_support import * |
2101 |
+from catalyst.support import * |
2102 |
from generic_stage_target import * |
2103 |
|
2104 |
class snapshot_target(generic_stage_target): |
2105 |
diff --git a/catalyst/modules/stage1_target.py b/catalyst/modules/stage1_target.py |
2106 |
index aa43926..25f7116 100644 |
2107 |
--- a/catalyst/modules/stage1_target.py |
2108 |
+++ b/catalyst/modules/stage1_target.py |
2109 |
@@ -3,7 +3,7 @@ stage1 target |
2110 |
""" |
2111 |
# NOTE: That^^ docstring has influence catalyst-spec(5) man page generation. |
2112 |
|
2113 |
-from catalyst_support import * |
2114 |
+from catalyst.support import * |
2115 |
from generic_stage_target import * |
2116 |
|
2117 |
class stage1_target(generic_stage_target): |
2118 |
diff --git a/catalyst/modules/stage2_target.py b/catalyst/modules/stage2_target.py |
2119 |
index 6083e2b..15acdee 100644 |
2120 |
--- a/catalyst/modules/stage2_target.py |
2121 |
+++ b/catalyst/modules/stage2_target.py |
2122 |
@@ -3,7 +3,7 @@ stage2 target, builds upon previous stage1 tarball |
2123 |
""" |
2124 |
# NOTE: That^^ docstring has influence catalyst-spec(5) man page generation. |
2125 |
|
2126 |
-from catalyst_support import * |
2127 |
+from catalyst.support import * |
2128 |
from generic_stage_target import * |
2129 |
|
2130 |
class stage2_target(generic_stage_target): |
2131 |
diff --git a/catalyst/modules/stage3_target.py b/catalyst/modules/stage3_target.py |
2132 |
index 4d3a008..89edd66 100644 |
2133 |
--- a/catalyst/modules/stage3_target.py |
2134 |
+++ b/catalyst/modules/stage3_target.py |
2135 |
@@ -3,7 +3,7 @@ stage3 target, builds upon previous stage2/stage3 tarball |
2136 |
""" |
2137 |
# NOTE: That^^ docstring has influence catalyst-spec(5) man page generation. |
2138 |
|
2139 |
-from catalyst_support import * |
2140 |
+from catalyst.support import * |
2141 |
from generic_stage_target import * |
2142 |
|
2143 |
class stage3_target(generic_stage_target): |
2144 |
diff --git a/catalyst/modules/stage4_target.py b/catalyst/modules/stage4_target.py |
2145 |
index ce41b2d..9168f2e 100644 |
2146 |
--- a/catalyst/modules/stage4_target.py |
2147 |
+++ b/catalyst/modules/stage4_target.py |
2148 |
@@ -3,7 +3,7 @@ stage4 target, builds upon previous stage3/stage4 tarball |
2149 |
""" |
2150 |
# NOTE: That^^ docstring has influence catalyst-spec(5) man page generation. |
2151 |
|
2152 |
-from catalyst_support import * |
2153 |
+from catalyst.support import * |
2154 |
from generic_stage_target import * |
2155 |
|
2156 |
class stage4_target(generic_stage_target): |
2157 |
diff --git a/catalyst/modules/tinderbox_target.py b/catalyst/modules/tinderbox_target.py |
2158 |
index d6d3ea3..5985c5b 100644 |
2159 |
--- a/catalyst/modules/tinderbox_target.py |
2160 |
+++ b/catalyst/modules/tinderbox_target.py |
2161 |
@@ -3,7 +3,7 @@ Tinderbox target |
2162 |
""" |
2163 |
# NOTE: That^^ docstring has influence catalyst-spec(5) man page generation. |
2164 |
|
2165 |
-from catalyst_support import * |
2166 |
+from catalyst.support import * |
2167 |
from generic_stage_target import * |
2168 |
|
2169 |
class tinderbox_target(generic_stage_target): |
2170 |
diff --git a/catalyst/support.py b/catalyst/support.py |
2171 |
new file mode 100644 |
2172 |
index 0000000..316dfa3 |
2173 |
--- /dev/null |
2174 |
+++ b/catalyst/support.py |
2175 |
@@ -0,0 +1,718 @@ |
2176 |
+ |
2177 |
+import sys,string,os,types,re,signal,traceback,time |
2178 |
+#import md5,sha |
2179 |
+selinux_capable = False |
2180 |
+#userpriv_capable = (os.getuid() == 0) |
2181 |
+#fakeroot_capable = False |
2182 |
+BASH_BINARY = "/bin/bash" |
2183 |
+ |
2184 |
+try: |
2185 |
+ import resource |
2186 |
+ max_fd_limit=resource.getrlimit(RLIMIT_NOFILE) |
2187 |
+except SystemExit, e: |
2188 |
+ raise |
2189 |
+except: |
2190 |
+ # hokay, no resource module. |
2191 |
+ max_fd_limit=256 |
2192 |
+ |
2193 |
+# pids this process knows of. |
2194 |
+spawned_pids = [] |
2195 |
+ |
2196 |
+try: |
2197 |
+ import urllib |
2198 |
+except SystemExit, e: |
2199 |
+ raise |
2200 |
+ |
2201 |
+def cleanup(pids,block_exceptions=True): |
2202 |
+ """function to go through and reap the list of pids passed to it""" |
2203 |
+ global spawned_pids |
2204 |
+ if type(pids) == int: |
2205 |
+ pids = [pids] |
2206 |
+ for x in pids: |
2207 |
+ try: |
2208 |
+ os.kill(x,signal.SIGTERM) |
2209 |
+ if os.waitpid(x,os.WNOHANG)[1] == 0: |
2210 |
+ # feisty bugger, still alive. |
2211 |
+ os.kill(x,signal.SIGKILL) |
2212 |
+ os.waitpid(x,0) |
2213 |
+ |
2214 |
+ except OSError, oe: |
2215 |
+ if block_exceptions: |
2216 |
+ pass |
2217 |
+ if oe.errno not in (10,3): |
2218 |
+ raise oe |
2219 |
+ except SystemExit: |
2220 |
+ raise |
2221 |
+ except Exception: |
2222 |
+ if block_exceptions: |
2223 |
+ pass |
2224 |
+ try: spawned_pids.remove(x) |
2225 |
+ except IndexError: pass |
2226 |
+ |
2227 |
+ |
2228 |
+ |
2229 |
+# a function to turn a string of non-printable characters into a string of |
2230 |
+# hex characters |
2231 |
+def hexify(str): |
2232 |
+ hexStr = string.hexdigits |
2233 |
+ r = '' |
2234 |
+ for ch in str: |
2235 |
+ i = ord(ch) |
2236 |
+ r = r + hexStr[(i >> 4) & 0xF] + hexStr[i & 0xF] |
2237 |
+ return r |
2238 |
+# hexify() |
2239 |
+ |
2240 |
+def generate_contents(file,contents_function="auto",verbose=False): |
2241 |
+ try: |
2242 |
+ _ = contents_function |
2243 |
+ if _ == 'auto' and file.endswith('.iso'): |
2244 |
+ _ = 'isoinfo-l' |
2245 |
+ if (_ in ['tar-tv','auto']): |
2246 |
+ if file.endswith('.tgz') or file.endswith('.tar.gz'): |
2247 |
+ _ = 'tar-tvz' |
2248 |
+ elif file.endswith('.tbz2') or file.endswith('.tar.bz2'): |
2249 |
+ _ = 'tar-tvj' |
2250 |
+ elif file.endswith('.tar'): |
2251 |
+ _ = 'tar-tv' |
2252 |
+ |
2253 |
+ if _ == 'auto': |
2254 |
+ warn('File %r has unknown type for automatic detection.' % (file, )) |
2255 |
+ return None |
2256 |
+ else: |
2257 |
+ contents_function = _ |
2258 |
+ _ = contents_map[contents_function] |
2259 |
+ return _[0](file,_[1],verbose) |
2260 |
+ except: |
2261 |
+ raise CatalystError,\ |
2262 |
+ "Error generating contents, is appropriate utility (%s) installed on your system?" \ |
2263 |
+ % (contents_function, ) |
2264 |
+ |
2265 |
+def calc_contents(file,cmd,verbose): |
2266 |
+ args={ 'file': file } |
2267 |
+ cmd=cmd % dict(args) |
2268 |
+ a=os.popen(cmd) |
2269 |
+ mylines=a.readlines() |
2270 |
+ a.close() |
2271 |
+ result="".join(mylines) |
2272 |
+ if verbose: |
2273 |
+ print result |
2274 |
+ return result |
2275 |
+ |
2276 |
+# This has map must be defined after the function calc_content |
2277 |
+# It is possible to call different functions from this but they must be defined |
2278 |
+# before hash_map |
2279 |
+# Key,function,cmd |
2280 |
+contents_map={ |
2281 |
+ # 'find' is disabled because it requires the source path, which is not |
2282 |
+ # always available |
2283 |
+ #"find" :[calc_contents,"find %(path)s"], |
2284 |
+ "tar-tv":[calc_contents,"tar tvf %(file)s"], |
2285 |
+ "tar-tvz":[calc_contents,"tar tvzf %(file)s"], |
2286 |
+ "tar-tvj":[calc_contents,"tar -I lbzip2 -tvf %(file)s"], |
2287 |
+ "isoinfo-l":[calc_contents,"isoinfo -l -i %(file)s"], |
2288 |
+ # isoinfo-f should be a last resort only |
2289 |
+ "isoinfo-f":[calc_contents,"isoinfo -f -i %(file)s"], |
2290 |
+} |
2291 |
+ |
2292 |
+def generate_hash(file,hash_function="crc32",verbose=False): |
2293 |
+ try: |
2294 |
+ return hash_map[hash_function][0](file,hash_map[hash_function][1],hash_map[hash_function][2],\ |
2295 |
+ hash_map[hash_function][3],verbose) |
2296 |
+ except: |
2297 |
+ raise CatalystError,"Error generating hash, is appropriate utility installed on your system?" |
2298 |
+ |
2299 |
+def calc_hash(file,cmd,cmd_args,id_string="MD5",verbose=False): |
2300 |
+ a=os.popen(cmd+" "+cmd_args+" "+file) |
2301 |
+ mylines=a.readlines() |
2302 |
+ a.close() |
2303 |
+ mylines=mylines[0].split() |
2304 |
+ result=mylines[0] |
2305 |
+ if verbose: |
2306 |
+ print id_string+" (%s) = %s" % (file, result) |
2307 |
+ return result |
2308 |
+ |
2309 |
+def calc_hash2(file,cmd,cmd_args,id_string="MD5",verbose=False): |
2310 |
+ a=os.popen(cmd+" "+cmd_args+" "+file) |
2311 |
+ header=a.readline() |
2312 |
+ mylines=a.readline().split() |
2313 |
+ hash=mylines[0] |
2314 |
+ short_file=os.path.split(mylines[1])[1] |
2315 |
+ a.close() |
2316 |
+ result=header+hash+" "+short_file+"\n" |
2317 |
+ if verbose: |
2318 |
+ print header+" (%s) = %s" % (short_file, result) |
2319 |
+ return result |
2320 |
+ |
2321 |
+# This has map must be defined after the function calc_hash |
2322 |
+# It is possible to call different functions from this but they must be defined |
2323 |
+# before hash_map |
2324 |
+# Key,function,cmd,cmd_args,Print string |
2325 |
+hash_map={ |
2326 |
+ "adler32":[calc_hash2,"shash","-a ADLER32","ADLER32"],\ |
2327 |
+ "crc32":[calc_hash2,"shash","-a CRC32","CRC32"],\ |
2328 |
+ "crc32b":[calc_hash2,"shash","-a CRC32B","CRC32B"],\ |
2329 |
+ "gost":[calc_hash2,"shash","-a GOST","GOST"],\ |
2330 |
+ "haval128":[calc_hash2,"shash","-a HAVAL128","HAVAL128"],\ |
2331 |
+ "haval160":[calc_hash2,"shash","-a HAVAL160","HAVAL160"],\ |
2332 |
+ "haval192":[calc_hash2,"shash","-a HAVAL192","HAVAL192"],\ |
2333 |
+ "haval224":[calc_hash2,"shash","-a HAVAL224","HAVAL224"],\ |
2334 |
+ "haval256":[calc_hash2,"shash","-a HAVAL256","HAVAL256"],\ |
2335 |
+ "md2":[calc_hash2,"shash","-a MD2","MD2"],\ |
2336 |
+ "md4":[calc_hash2,"shash","-a MD4","MD4"],\ |
2337 |
+ "md5":[calc_hash2,"shash","-a MD5","MD5"],\ |
2338 |
+ "ripemd128":[calc_hash2,"shash","-a RIPEMD128","RIPEMD128"],\ |
2339 |
+ "ripemd160":[calc_hash2,"shash","-a RIPEMD160","RIPEMD160"],\ |
2340 |
+ "ripemd256":[calc_hash2,"shash","-a RIPEMD256","RIPEMD256"],\ |
2341 |
+ "ripemd320":[calc_hash2,"shash","-a RIPEMD320","RIPEMD320"],\ |
2342 |
+ "sha1":[calc_hash2,"shash","-a SHA1","SHA1"],\ |
2343 |
+ "sha224":[calc_hash2,"shash","-a SHA224","SHA224"],\ |
2344 |
+ "sha256":[calc_hash2,"shash","-a SHA256","SHA256"],\ |
2345 |
+ "sha384":[calc_hash2,"shash","-a SHA384","SHA384"],\ |
2346 |
+ "sha512":[calc_hash2,"shash","-a SHA512","SHA512"],\ |
2347 |
+ "snefru128":[calc_hash2,"shash","-a SNEFRU128","SNEFRU128"],\ |
2348 |
+ "snefru256":[calc_hash2,"shash","-a SNEFRU256","SNEFRU256"],\ |
2349 |
+ "tiger":[calc_hash2,"shash","-a TIGER","TIGER"],\ |
2350 |
+ "tiger128":[calc_hash2,"shash","-a TIGER128","TIGER128"],\ |
2351 |
+ "tiger160":[calc_hash2,"shash","-a TIGER160","TIGER160"],\ |
2352 |
+ "whirlpool":[calc_hash2,"shash","-a WHIRLPOOL","WHIRLPOOL"],\ |
2353 |
+ } |
2354 |
+ |
2355 |
+def read_from_clst(file): |
2356 |
+ line = '' |
2357 |
+ myline = '' |
2358 |
+ try: |
2359 |
+ myf=open(file,"r") |
2360 |
+ except: |
2361 |
+ return -1 |
2362 |
+ #raise CatalystError, "Could not open file "+file |
2363 |
+ for line in myf.readlines(): |
2364 |
+ #line = string.replace(line, "\n", "") # drop newline |
2365 |
+ myline = myline + line |
2366 |
+ myf.close() |
2367 |
+ return myline |
2368 |
+# read_from_clst |
2369 |
+ |
2370 |
+# these should never be touched |
2371 |
+required_build_targets=["generic_target","generic_stage_target"] |
2372 |
+ |
2373 |
+# new build types should be added here |
2374 |
+valid_build_targets=["stage1_target","stage2_target","stage3_target","stage4_target","grp_target", |
2375 |
+ "livecd_stage1_target","livecd_stage2_target","embedded_target", |
2376 |
+ "tinderbox_target","snapshot_target","netboot_target","netboot2_target"] |
2377 |
+ |
2378 |
+required_config_file_values=["storedir","sharedir","distdir","portdir"] |
2379 |
+valid_config_file_values=required_config_file_values[:] |
2380 |
+valid_config_file_values.append("PKGCACHE") |
2381 |
+valid_config_file_values.append("KERNCACHE") |
2382 |
+valid_config_file_values.append("CCACHE") |
2383 |
+valid_config_file_values.append("DISTCC") |
2384 |
+valid_config_file_values.append("ICECREAM") |
2385 |
+valid_config_file_values.append("ENVSCRIPT") |
2386 |
+valid_config_file_values.append("AUTORESUME") |
2387 |
+valid_config_file_values.append("FETCH") |
2388 |
+valid_config_file_values.append("CLEAR_AUTORESUME") |
2389 |
+valid_config_file_values.append("options") |
2390 |
+valid_config_file_values.append("DEBUG") |
2391 |
+valid_config_file_values.append("VERBOSE") |
2392 |
+valid_config_file_values.append("PURGE") |
2393 |
+valid_config_file_values.append("PURGEONLY") |
2394 |
+valid_config_file_values.append("SNAPCACHE") |
2395 |
+valid_config_file_values.append("snapshot_cache") |
2396 |
+valid_config_file_values.append("hash_function") |
2397 |
+valid_config_file_values.append("digests") |
2398 |
+valid_config_file_values.append("contents") |
2399 |
+valid_config_file_values.append("SEEDCACHE") |
2400 |
+ |
2401 |
+verbosity=1 |
2402 |
+ |
2403 |
+def list_bashify(mylist): |
2404 |
+ if type(mylist)==types.StringType: |
2405 |
+ mypack=[mylist] |
2406 |
+ else: |
2407 |
+ mypack=mylist[:] |
2408 |
+ for x in range(0,len(mypack)): |
2409 |
+ # surround args with quotes for passing to bash, |
2410 |
+ # allows things like "<" to remain intact |
2411 |
+ mypack[x]="'"+mypack[x]+"'" |
2412 |
+ mypack=string.join(mypack) |
2413 |
+ return mypack |
2414 |
+ |
2415 |
+def list_to_string(mylist): |
2416 |
+ if type(mylist)==types.StringType: |
2417 |
+ mypack=[mylist] |
2418 |
+ else: |
2419 |
+ mypack=mylist[:] |
2420 |
+ for x in range(0,len(mypack)): |
2421 |
+ # surround args with quotes for passing to bash, |
2422 |
+ # allows things like "<" to remain intact |
2423 |
+ mypack[x]=mypack[x] |
2424 |
+ mypack=string.join(mypack) |
2425 |
+ return mypack |
2426 |
+ |
2427 |
+class CatalystError(Exception): |
2428 |
+ def __init__(self, message): |
2429 |
+ if message: |
2430 |
+ (type,value)=sys.exc_info()[:2] |
2431 |
+ if value!=None: |
2432 |
+ print |
2433 |
+ print traceback.print_exc(file=sys.stdout) |
2434 |
+ print |
2435 |
+ print "!!! catalyst: "+message |
2436 |
+ print |
2437 |
+ |
2438 |
+class LockInUse(Exception): |
2439 |
+ def __init__(self, message): |
2440 |
+ if message: |
2441 |
+ #(type,value)=sys.exc_info()[:2] |
2442 |
+ #if value!=None: |
2443 |
+ #print |
2444 |
+ #kprint traceback.print_exc(file=sys.stdout) |
2445 |
+ print |
2446 |
+ print "!!! catalyst lock file in use: "+message |
2447 |
+ print |
2448 |
+ |
2449 |
+def die(msg=None): |
2450 |
+ warn(msg) |
2451 |
+ sys.exit(1) |
2452 |
+ |
2453 |
+def warn(msg): |
2454 |
+ print "!!! catalyst: "+msg |
2455 |
+ |
2456 |
+def find_binary(myc): |
2457 |
+ """look through the environmental path for an executable file named whatever myc is""" |
2458 |
+ # this sucks. badly. |
2459 |
+ p=os.getenv("PATH") |
2460 |
+ if p == None: |
2461 |
+ return None |
2462 |
+ for x in p.split(":"): |
2463 |
+ #if it exists, and is executable |
2464 |
+ if os.path.exists("%s/%s" % (x,myc)) and os.stat("%s/%s" % (x,myc))[0] & 0x0248: |
2465 |
+ return "%s/%s" % (x,myc) |
2466 |
+ return None |
2467 |
+ |
2468 |
+def spawn_bash(mycommand,env={},debug=False,opt_name=None,**keywords): |
2469 |
+ """spawn mycommand as an arguement to bash""" |
2470 |
+ args=[BASH_BINARY] |
2471 |
+ if not opt_name: |
2472 |
+ opt_name=mycommand.split()[0] |
2473 |
+ if "BASH_ENV" not in env: |
2474 |
+ env["BASH_ENV"] = "/etc/spork/is/not/valid/profile.env" |
2475 |
+ if debug: |
2476 |
+ args.append("-x") |
2477 |
+ args.append("-c") |
2478 |
+ args.append(mycommand) |
2479 |
+ return spawn(args,env=env,opt_name=opt_name,**keywords) |
2480 |
+ |
2481 |
+#def spawn_get_output(mycommand,spawn_type=spawn,raw_exit_code=False,emulate_gso=True, \ |
2482 |
+# collect_fds=[1],fd_pipes=None,**keywords): |
2483 |
+ |
2484 |
+def spawn_get_output(mycommand,raw_exit_code=False,emulate_gso=True, \ |
2485 |
+ collect_fds=[1],fd_pipes=None,**keywords): |
2486 |
+ """call spawn, collecting the output to fd's specified in collect_fds list |
2487 |
+ emulate_gso is a compatability hack to emulate commands.getstatusoutput's return, minus the |
2488 |
+ requirement it always be a bash call (spawn_type controls the actual spawn call), and minus the |
2489 |
+ 'lets let log only stdin and let stderr slide by'. |
2490 |
+ |
2491 |
+ emulate_gso was deprecated from the day it was added, so convert your code over. |
2492 |
+ spawn_type is the passed in function to call- typically spawn_bash, spawn, spawn_sandbox, or spawn_fakeroot""" |
2493 |
+ global selinux_capable |
2494 |
+ pr,pw=os.pipe() |
2495 |
+ |
2496 |
+ #if type(spawn_type) not in [types.FunctionType, types.MethodType]: |
2497 |
+ # s="spawn_type must be passed a function, not",type(spawn_type),spawn_type |
2498 |
+ # raise Exception,s |
2499 |
+ |
2500 |
+ if fd_pipes==None: |
2501 |
+ fd_pipes={} |
2502 |
+ fd_pipes[0] = 0 |
2503 |
+ |
2504 |
+ for x in collect_fds: |
2505 |
+ fd_pipes[x] = pw |
2506 |
+ keywords["returnpid"]=True |
2507 |
+ |
2508 |
+ mypid=spawn_bash(mycommand,fd_pipes=fd_pipes,**keywords) |
2509 |
+ os.close(pw) |
2510 |
+ if type(mypid) != types.ListType: |
2511 |
+ os.close(pr) |
2512 |
+ return [mypid, "%s: No such file or directory" % mycommand.split()[0]] |
2513 |
+ |
2514 |
+ fd=os.fdopen(pr,"r") |
2515 |
+ mydata=fd.readlines() |
2516 |
+ fd.close() |
2517 |
+ if emulate_gso: |
2518 |
+ mydata=string.join(mydata) |
2519 |
+ if len(mydata) and mydata[-1] == "\n": |
2520 |
+ mydata=mydata[:-1] |
2521 |
+ retval=os.waitpid(mypid[0],0)[1] |
2522 |
+ cleanup(mypid) |
2523 |
+ if raw_exit_code: |
2524 |
+ return [retval,mydata] |
2525 |
+ retval=process_exit_code(retval) |
2526 |
+ return [retval, mydata] |
2527 |
+ |
2528 |
+# base spawn function |
2529 |
+def spawn(mycommand,env={},raw_exit_code=False,opt_name=None,fd_pipes=None,returnpid=False,\ |
2530 |
+ uid=None,gid=None,groups=None,umask=None,logfile=None,path_lookup=True,\ |
2531 |
+ selinux_context=None, raise_signals=False, func_call=False): |
2532 |
+ """base fork/execve function. |
2533 |
+ mycommand is the desired command- if you need a command to execute in a bash/sandbox/fakeroot |
2534 |
+ environment, use the appropriate spawn call. This is a straight fork/exec code path. |
2535 |
+ Can either have a tuple, or a string passed in. If uid/gid/groups/umask specified, it changes |
2536 |
+ the forked process to said value. If path_lookup is on, a non-absolute command will be converted |
2537 |
+ to an absolute command, otherwise it returns None. |
2538 |
+ |
2539 |
+ selinux_context is the desired context, dependant on selinux being available. |
2540 |
+ opt_name controls the name the processor goes by. |
2541 |
+ fd_pipes controls which file descriptor numbers are left open in the forked process- it's a dict of |
2542 |
+ current fd's raw fd #, desired #. |
2543 |
+ |
2544 |
+ func_call is a boolean for specifying to execute a python function- use spawn_func instead. |
2545 |
+ raise_signals is questionable. Basically throw an exception if signal'd. No exception is thrown |
2546 |
+ if raw_input is on. |
2547 |
+ |
2548 |
+ logfile overloads the specified fd's to write to a tee process which logs to logfile |
2549 |
+ returnpid returns the relevant pids (a list, including the logging process if logfile is on). |
2550 |
+ |
2551 |
+ non-returnpid calls to spawn will block till the process has exited, returning the exitcode/signal |
2552 |
+ raw_exit_code controls whether the actual waitpid result is returned, or intrepretted.""" |
2553 |
+ |
2554 |
+ myc='' |
2555 |
+ if not func_call: |
2556 |
+ if type(mycommand)==types.StringType: |
2557 |
+ mycommand=mycommand.split() |
2558 |
+ myc = mycommand[0] |
2559 |
+ if not os.access(myc, os.X_OK): |
2560 |
+ if not path_lookup: |
2561 |
+ return None |
2562 |
+ myc = find_binary(myc) |
2563 |
+ if myc == None: |
2564 |
+ return None |
2565 |
+ mypid=[] |
2566 |
+ if logfile: |
2567 |
+ pr,pw=os.pipe() |
2568 |
+ mypid.extend(spawn(('tee','-i','-a',logfile),returnpid=True,fd_pipes={0:pr,1:1,2:2})) |
2569 |
+ retval=os.waitpid(mypid[-1],os.WNOHANG)[1] |
2570 |
+ if retval != 0: |
2571 |
+ # he's dead jim. |
2572 |
+ if raw_exit_code: |
2573 |
+ return retval |
2574 |
+ return process_exit_code(retval) |
2575 |
+ |
2576 |
+ if fd_pipes == None: |
2577 |
+ fd_pipes={} |
2578 |
+ fd_pipes[0] = 0 |
2579 |
+ fd_pipes[1]=pw |
2580 |
+ fd_pipes[2]=pw |
2581 |
+ |
2582 |
+ if not opt_name: |
2583 |
+ opt_name = mycommand[0] |
2584 |
+ myargs=[opt_name] |
2585 |
+ myargs.extend(mycommand[1:]) |
2586 |
+ global spawned_pids |
2587 |
+ mypid.append(os.fork()) |
2588 |
+ if mypid[-1] != 0: |
2589 |
+ #log the bugger. |
2590 |
+ spawned_pids.extend(mypid) |
2591 |
+ |
2592 |
+ if mypid[-1] == 0: |
2593 |
+ if func_call: |
2594 |
+ spawned_pids = [] |
2595 |
+ |
2596 |
+ # this may look ugly, but basically it moves file descriptors around to ensure no |
2597 |
+ # handles that are needed are accidentally closed during the final dup2 calls. |
2598 |
+ trg_fd=[] |
2599 |
+ if type(fd_pipes)==types.DictType: |
2600 |
+ src_fd=[] |
2601 |
+ k=fd_pipes.keys() |
2602 |
+ k.sort() |
2603 |
+ |
2604 |
+ #build list of which fds will be where, and where they are at currently |
2605 |
+ for x in k: |
2606 |
+ trg_fd.append(x) |
2607 |
+ src_fd.append(fd_pipes[x]) |
2608 |
+ |
2609 |
+ # run through said list dup'ing descriptors so that they won't be waxed |
2610 |
+ # by other dup calls. |
2611 |
+ for x in range(0,len(trg_fd)): |
2612 |
+ if trg_fd[x] == src_fd[x]: |
2613 |
+ continue |
2614 |
+ if trg_fd[x] in src_fd[x+1:]: |
2615 |
+ new=os.dup2(trg_fd[x],max(src_fd) + 1) |
2616 |
+ os.close(trg_fd[x]) |
2617 |
+ try: |
2618 |
+ while True: |
2619 |
+ src_fd[s.index(trg_fd[x])]=new |
2620 |
+ except SystemExit, e: |
2621 |
+ raise |
2622 |
+ except: |
2623 |
+ pass |
2624 |
+ |
2625 |
+ # transfer the fds to their final pre-exec position. |
2626 |
+ for x in range(0,len(trg_fd)): |
2627 |
+ if trg_fd[x] != src_fd[x]: |
2628 |
+ os.dup2(src_fd[x], trg_fd[x]) |
2629 |
+ else: |
2630 |
+ trg_fd=[0,1,2] |
2631 |
+ |
2632 |
+ # wax all open descriptors that weren't requested be left open. |
2633 |
+ for x in range(0,max_fd_limit): |
2634 |
+ if x not in trg_fd: |
2635 |
+ try: |
2636 |
+ os.close(x) |
2637 |
+ except SystemExit, e: |
2638 |
+ raise |
2639 |
+ except: |
2640 |
+ pass |
2641 |
+ |
2642 |
+ # note this order must be preserved- can't change gid/groups if you change uid first. |
2643 |
+ if selinux_capable and selinux_context: |
2644 |
+ import selinux |
2645 |
+ selinux.setexec(selinux_context) |
2646 |
+ if gid: |
2647 |
+ os.setgid(gid) |
2648 |
+ if groups: |
2649 |
+ os.setgroups(groups) |
2650 |
+ if uid: |
2651 |
+ os.setuid(uid) |
2652 |
+ if umask: |
2653 |
+ os.umask(umask) |
2654 |
+ else: |
2655 |
+ os.umask(022) |
2656 |
+ |
2657 |
+ try: |
2658 |
+ #print "execing", myc, myargs |
2659 |
+ if func_call: |
2660 |
+ # either use a passed in func for interpretting the results, or return if no exception. |
2661 |
+ # note the passed in list, and dict are expanded. |
2662 |
+ if len(mycommand) == 4: |
2663 |
+ os._exit(mycommand[3](mycommand[0](*mycommand[1],**mycommand[2]))) |
2664 |
+ try: |
2665 |
+ mycommand[0](*mycommand[1],**mycommand[2]) |
2666 |
+ except Exception,e: |
2667 |
+ print "caught exception",e," in forked func",mycommand[0] |
2668 |
+ sys.exit(0) |
2669 |
+ |
2670 |
+ #os.execvp(myc,myargs) |
2671 |
+ os.execve(myc,myargs,env) |
2672 |
+ except SystemExit, e: |
2673 |
+ raise |
2674 |
+ except Exception, e: |
2675 |
+ if not func_call: |
2676 |
+ raise str(e)+":\n "+myc+" "+string.join(myargs) |
2677 |
+ print "func call failed" |
2678 |
+ |
2679 |
+ # If the execve fails, we need to report it, and exit |
2680 |
+ # *carefully* --- report error here |
2681 |
+ os._exit(1) |
2682 |
+ sys.exit(1) |
2683 |
+ return # should never get reached |
2684 |
+ |
2685 |
+ # if we were logging, kill the pipes. |
2686 |
+ if logfile: |
2687 |
+ os.close(pr) |
2688 |
+ os.close(pw) |
2689 |
+ |
2690 |
+ if returnpid: |
2691 |
+ return mypid |
2692 |
+ |
2693 |
+ # loop through pids (typically one, unless logging), either waiting on their death, or waxing them |
2694 |
+ # if the main pid (mycommand) returned badly. |
2695 |
+ while len(mypid): |
2696 |
+ retval=os.waitpid(mypid[-1],0)[1] |
2697 |
+ if retval != 0: |
2698 |
+ cleanup(mypid[0:-1],block_exceptions=False) |
2699 |
+ # at this point we've killed all other kid pids generated via this call. |
2700 |
+ # return now. |
2701 |
+ if raw_exit_code: |
2702 |
+ return retval |
2703 |
+ return process_exit_code(retval,throw_signals=raise_signals) |
2704 |
+ else: |
2705 |
+ mypid.pop(-1) |
2706 |
+ cleanup(mypid) |
2707 |
+ return 0 |
2708 |
+ |
2709 |
+def cmd(mycmd,myexc="",env={}): |
2710 |
+ try: |
2711 |
+ sys.stdout.flush() |
2712 |
+ retval=spawn_bash(mycmd,env) |
2713 |
+ if retval != 0: |
2714 |
+ raise CatalystError,myexc |
2715 |
+ except: |
2716 |
+ raise |
2717 |
+ |
2718 |
+def process_exit_code(retval,throw_signals=False): |
2719 |
+ """process a waitpid returned exit code, returning exit code if it exit'd, or the |
2720 |
+ signal if it died from signalling |
2721 |
+ if throw_signals is on, it raises a SystemExit if the process was signaled. |
2722 |
+ This is intended for usage with threads, although at the moment you can't signal individual |
2723 |
+ threads in python, only the master thread, so it's a questionable option.""" |
2724 |
+ if (retval & 0xff)==0: |
2725 |
+ return retval >> 8 # return exit code |
2726 |
+ else: |
2727 |
+ if throw_signals: |
2728 |
+ #use systemexit, since portage is stupid about exception catching. |
2729 |
+ raise SystemExit() |
2730 |
+ return (retval & 0xff) << 8 # interrupted by signal |
2731 |
+ |
2732 |
+def file_locate(settings,filelist,expand=1): |
2733 |
+ #if expand=1, non-absolute paths will be accepted and |
2734 |
+ # expanded to os.getcwd()+"/"+localpath if file exists |
2735 |
+ for myfile in filelist: |
2736 |
+ if myfile not in settings: |
2737 |
+ #filenames such as cdtar are optional, so we don't assume the variable is defined. |
2738 |
+ pass |
2739 |
+ else: |
2740 |
+ if len(settings[myfile])==0: |
2741 |
+ raise CatalystError, "File variable \""+myfile+"\" has a length of zero (not specified.)" |
2742 |
+ if settings[myfile][0]=="/": |
2743 |
+ if not os.path.exists(settings[myfile]): |
2744 |
+ raise CatalystError, "Cannot locate specified "+myfile+": "+settings[myfile] |
2745 |
+ elif expand and os.path.exists(os.getcwd()+"/"+settings[myfile]): |
2746 |
+ settings[myfile]=os.getcwd()+"/"+settings[myfile] |
2747 |
+ else: |
2748 |
+ raise CatalystError, "Cannot locate specified "+myfile+": "+settings[myfile]+" (2nd try)" |
2749 |
+""" |
2750 |
+Spec file format: |
2751 |
+ |
2752 |
+The spec file format is a very simple and easy-to-use format for storing data. Here's an example |
2753 |
+file: |
2754 |
+ |
2755 |
+item1: value1 |
2756 |
+item2: foo bar oni |
2757 |
+item3: |
2758 |
+ meep |
2759 |
+ bark |
2760 |
+ gleep moop |
2761 |
+ |
2762 |
+This file would be interpreted as defining three items: item1, item2 and item3. item1 would contain |
2763 |
+the string value "value1". Item2 would contain an ordered list [ "foo", "bar", "oni" ]. item3 |
2764 |
+would contain an ordered list as well: [ "meep", "bark", "gleep", "moop" ]. It's important to note |
2765 |
+that the order of multiple-value items is preserved, but the order that the items themselves are |
2766 |
+defined are not preserved. In other words, "foo", "bar", "oni" ordering is preserved but "item1" |
2767 |
+"item2" "item3" ordering is not, as the item strings are stored in a dictionary (hash). |
2768 |
+""" |
2769 |
+ |
2770 |
+def parse_makeconf(mylines): |
2771 |
+ mymakeconf={} |
2772 |
+ pos=0 |
2773 |
+ pat=re.compile("([0-9a-zA-Z_]*)=(.*)") |
2774 |
+ while pos<len(mylines): |
2775 |
+ if len(mylines[pos])<=1: |
2776 |
+ #skip blanks |
2777 |
+ pos += 1 |
2778 |
+ continue |
2779 |
+ if mylines[pos][0] in ["#"," ","\t"]: |
2780 |
+ #skip indented lines, comments |
2781 |
+ pos += 1 |
2782 |
+ continue |
2783 |
+ else: |
2784 |
+ myline=mylines[pos] |
2785 |
+ mobj=pat.match(myline) |
2786 |
+ pos += 1 |
2787 |
+ if mobj.group(2): |
2788 |
+ clean_string = re.sub(r"\"",r"",mobj.group(2)) |
2789 |
+ mymakeconf[mobj.group(1)]=clean_string |
2790 |
+ return mymakeconf |
2791 |
+ |
2792 |
+def read_makeconf(mymakeconffile): |
2793 |
+ if os.path.exists(mymakeconffile): |
2794 |
+ try: |
2795 |
+ try: |
2796 |
+ import snakeoil.fileutils |
2797 |
+ return snakeoil.fileutils.read_bash_dict(mymakeconffile, sourcing_command="source") |
2798 |
+ except ImportError: |
2799 |
+ try: |
2800 |
+ import portage.util |
2801 |
+ return portage.util.getconfig(mymakeconffile, tolerant=1, allow_sourcing=True) |
2802 |
+ except: |
2803 |
+ try: |
2804 |
+ import portage_util |
2805 |
+ return portage_util.getconfig(mymakeconffile, tolerant=1, allow_sourcing=True) |
2806 |
+ except ImportError: |
2807 |
+ myf=open(mymakeconffile,"r") |
2808 |
+ mylines=myf.readlines() |
2809 |
+ myf.close() |
2810 |
+ return parse_makeconf(mylines) |
2811 |
+ except: |
2812 |
+ raise CatalystError, "Could not parse make.conf file "+mymakeconffile |
2813 |
+ else: |
2814 |
+ makeconf={} |
2815 |
+ return makeconf |
2816 |
+ |
2817 |
+def msg(mymsg,verblevel=1): |
2818 |
+ if verbosity>=verblevel: |
2819 |
+ print mymsg |
2820 |
+ |
2821 |
+def pathcompare(path1,path2): |
2822 |
+ # Change double slashes to slash |
2823 |
+ path1 = re.sub(r"//",r"/",path1) |
2824 |
+ path2 = re.sub(r"//",r"/",path2) |
2825 |
+ # Removing ending slash |
2826 |
+ path1 = re.sub("/$","",path1) |
2827 |
+ path2 = re.sub("/$","",path2) |
2828 |
+ |
2829 |
+ if path1 == path2: |
2830 |
+ return 1 |
2831 |
+ return 0 |
2832 |
+ |
2833 |
+def ismount(path): |
2834 |
+ "enhanced to handle bind mounts" |
2835 |
+ if os.path.ismount(path): |
2836 |
+ return 1 |
2837 |
+ a=os.popen("mount") |
2838 |
+ mylines=a.readlines() |
2839 |
+ a.close() |
2840 |
+ for line in mylines: |
2841 |
+ mysplit=line.split() |
2842 |
+ if pathcompare(path,mysplit[2]): |
2843 |
+ return 1 |
2844 |
+ return 0 |
2845 |
+ |
2846 |
+def addl_arg_parse(myspec,addlargs,requiredspec,validspec): |
2847 |
+ "helper function to help targets parse additional arguments" |
2848 |
+ global valid_config_file_values |
2849 |
+ |
2850 |
+ messages = [] |
2851 |
+ for x in addlargs.keys(): |
2852 |
+ if x not in validspec and x not in valid_config_file_values and x not in requiredspec: |
2853 |
+ messages.append("Argument \""+x+"\" not recognized.") |
2854 |
+ else: |
2855 |
+ myspec[x]=addlargs[x] |
2856 |
+ |
2857 |
+ for x in requiredspec: |
2858 |
+ if x not in myspec: |
2859 |
+ messages.append("Required argument \""+x+"\" not specified.") |
2860 |
+ |
2861 |
+ if messages: |
2862 |
+ raise CatalystError, '\n\tAlso: '.join(messages) |
2863 |
+ |
2864 |
+def touch(myfile): |
2865 |
+ try: |
2866 |
+ myf=open(myfile,"w") |
2867 |
+ myf.close() |
2868 |
+ except IOError: |
2869 |
+ raise CatalystError, "Could not touch "+myfile+"." |
2870 |
+ |
2871 |
+def countdown(secs=5, doing="Starting"): |
2872 |
+ if secs: |
2873 |
+ print ">>> Waiting",secs,"seconds before starting..." |
2874 |
+ print ">>> (Control-C to abort)...\n"+doing+" in: ", |
2875 |
+ ticks=range(secs) |
2876 |
+ ticks.reverse() |
2877 |
+ for sec in ticks: |
2878 |
+ sys.stdout.write(str(sec+1)+" ") |
2879 |
+ sys.stdout.flush() |
2880 |
+ time.sleep(1) |
2881 |
+ print |
2882 |
+ |
2883 |
+def normpath(mypath): |
2884 |
+ TrailingSlash=False |
2885 |
+ if mypath[-1] == "/": |
2886 |
+ TrailingSlash=True |
2887 |
+ newpath = os.path.normpath(mypath) |
2888 |
+ if len(newpath) > 1: |
2889 |
+ if newpath[:2] == "//": |
2890 |
+ newpath = newpath[1:] |
2891 |
+ if TrailingSlash: |
2892 |
+ newpath=newpath+'/' |
2893 |
+ return newpath |
2894 |
-- |
2895 |
1.8.3.2 |