1 |
commit: 833002b191944eeee586dc7b5e4e85408d9be621 |
2 |
Author: Magnus Granberg <zorry <AT> gentoo <DOT> org> |
3 |
AuthorDate: Fri Apr 27 17:44:43 2012 +0000 |
4 |
Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org> |
5 |
CommitDate: Fri Apr 27 17:44:43 2012 +0000 |
6 |
URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=833002b1 |
7 |
|
8 |
major update |
9 |
|
10 |
--- |
11 |
ebuild/dev-python/gobs/gobs-9999.ebuild~ | 50 + |
12 |
gobs/bin/gobs_buildquerys | 12 +- |
13 |
gobs/bin/{gobs_buildquerys => gobs_buildquerys~} | 6 +- |
14 |
gobs/bin/gobs_portage_hooks~ | 79 + |
15 |
gobs/bin/gobs_setup_profile~ | 12 + |
16 |
gobs/bin/gobs_updatedb | 88 +- |
17 |
gobs/bin/{gobs_updatedb => gobs_updatedb~} | 87 +- |
18 |
gobs/doc/Setup.txt~ | 7 + |
19 |
gobs/doc/portage/all/bashrc~ | 7 + |
20 |
gobs/doc/portage/base/make.conf | 15 + |
21 |
gobs/pym/ConnectionManager.py~ | 56 + |
22 |
gobs/pym/Scheduler.py~ | 1994 ++++++++++++++++++++++ |
23 |
gobs/pym/arch.py~ | 25 + |
24 |
gobs/pym/build_log.py | 20 +- |
25 |
gobs/pym/{build_log.py => build_log.py~} | 0 |
26 |
gobs/pym/build_queru.py~ | 708 ++++++++ |
27 |
gobs/pym/categories.py~ | 30 + |
28 |
gobs/pym/check_setup.py | 25 +- |
29 |
gobs/pym/{check_setup.py => check_setup.py~} | 25 +- |
30 |
gobs/pym/depclean.py~ | 632 +++++++ |
31 |
gobs/pym/flags.py~ | 219 +++ |
32 |
gobs/pym/init_setup_profile.py~ | 86 + |
33 |
gobs/pym/manifest.py~ | 124 ++ |
34 |
gobs/pym/old_cpv.py~ | 89 + |
35 |
gobs/pym/package.py | 9 +- |
36 |
gobs/pym/{package.py => package.py~} | 9 +- |
37 |
gobs/pym/pgsql.py | 32 +- |
38 |
gobs/pym/{pgsql.py => pgsql.py~} | 32 +- |
39 |
gobs/pym/readconf.py~ | 46 + |
40 |
gobs/pym/repoman_gobs.py~ | 48 + |
41 |
gobs/pym/sync.py | 23 + |
42 |
gobs/pym/sync.py~ | 23 + |
43 |
gobs/pym/text.py~ | 48 + |
44 |
33 files changed, 4525 insertions(+), 141 deletions(-) |
45 |
|
46 |
diff --git a/ebuild/dev-python/gobs/gobs-9999.ebuild~ b/ebuild/dev-python/gobs/gobs-9999.ebuild~ |
47 |
new file mode 100644 |
48 |
index 0000000..22fa1b7 |
49 |
--- /dev/null |
50 |
+++ b/ebuild/dev-python/gobs/gobs-9999.ebuild~ |
51 |
@@ -0,0 +1,50 @@ |
52 |
+# Copyright 1999-2010 Gentoo Foundation |
53 |
+# Distributed under the terms of the GNU General Public License v2 |
54 |
+# $Header: $ |
55 |
+ |
56 |
+EAPI="2" |
57 |
+PYTHON_DEPEND="*:2.7" |
58 |
+SUPPORT_PYTHON_ABIS="1" |
59 |
+ |
60 |
+inherit distutils git-2 |
61 |
+ |
62 |
+DESCRIPTION="Gobs" |
63 |
+HOMEPAGE="http://git.overlays.gentoo.org/gitroot/dev/zorry.git" |
64 |
+SRC_URI="" |
65 |
+LICENSE="GPL-2" |
66 |
+KEYWORDS="~amd64" |
67 |
+SLOT="0" |
68 |
+IUSE="+postgresql" |
69 |
+ |
70 |
+RDEPEND="sys-apps/portage |
71 |
+ >=dev-python/git-python-0.3.2_rc1 |
72 |
+ postgresql? ( dev-python/psycopg )" |
73 |
+ |
74 |
+DEPEND="${RDEPEND} |
75 |
+ dev-python/setuptools" |
76 |
+ |
77 |
+# RESTRICT_PYTHON_ABIS="3.*" |
78 |
+ |
79 |
+EGIT_REPO_URI="http://git.overlays.gentoo.org/gitroot/dev/zorry.git" |
80 |
+#EGIT_FETCH_CMD="git clone" |
81 |
+##EGIT_BRANCH="master" |
82 |
+##EGIT_COMMIT=${EGIT_BRANCH} |
83 |
+# The eclass is based on subversion eclass. |
84 |
+# If you use this eclass, the ${S} is ${WORKDIR}/${P}. |
85 |
+# It is necessary to define the EGIT_REPO_URI variable at least. |
86 |
+ |
87 |
+PYTHON_MODNAME="gobs" |
88 |
+ |
89 |
+src_install() { |
90 |
+ dodir /var/lib/gobs || die |
91 |
+ dodir etc/gobs || die |
92 |
+ insinto /etc/gobs |
93 |
+ doins ${FILESDIR}/gobs.conf || die |
94 |
+ dobin ${S}/gobs/bin/gobs_updatedb || die |
95 |
+ dobin ${S}/gobs/bin/gobs_portage_hooks || die |
96 |
+ dosbin ${S}/gobs/bin/gobs_buildquerys || die |
97 |
+ dodoc ${S}/gobs/sql/pgdump.sql.gz || die |
98 |
+ dodoc ${S}/gobs/doc/Setup.txt || die |
99 |
+ |
100 |
+ distutils_src_install |
101 |
+} |
102 |
\ No newline at end of file |
103 |
|
104 |
diff --git a/gobs/bin/gobs_buildquerys b/gobs/bin/gobs_buildquerys |
105 |
index e109efc..868d01a 100755 |
106 |
--- a/gobs/bin/gobs_buildquerys |
107 |
+++ b/gobs/bin/gobs_buildquerys |
108 |
@@ -8,22 +8,28 @@ gobs_settings_dict=reader.read_gobs_settings_all() |
109 |
from gobs.ConnectionManager import connectionManager |
110 |
|
111 |
from gobs.check_setup import check_configure_guest |
112 |
+from gobs.sync import git_pull |
113 |
from gobs.build_queru import queruaction |
114 |
import portage |
115 |
import sys |
116 |
import os |
117 |
+import time |
118 |
+from multiprocessing import Process |
119 |
|
120 |
def main_loop(config_profile): |
121 |
repeat = True |
122 |
#get a connection from the pool |
123 |
init_queru = queruaction(config_profile) |
124 |
while repeat: |
125 |
- #FIXME do a git reop check |
126 |
- if check_configure_guest( config_profile) is not True: |
127 |
+ git_pull() |
128 |
+ if check_configure_guest(config_profile) is not True: |
129 |
# time.sleep(60) |
130 |
continue # retunr to the start of the function |
131 |
else: |
132 |
- init_queru.procces_qureru() |
133 |
+ p = Process(target=init_queru.procces_qureru) |
134 |
+ p.start() |
135 |
+ p.join() |
136 |
+ time.sleep(10) |
137 |
|
138 |
def main(): |
139 |
# Main |
140 |
|
141 |
diff --git a/gobs/bin/gobs_buildquerys b/gobs/bin/gobs_buildquerys~ |
142 |
similarity index 88% |
143 |
copy from gobs/bin/gobs_buildquerys |
144 |
copy to gobs/bin/gobs_buildquerys~ |
145 |
index e109efc..842b324 100755 |
146 |
--- a/gobs/bin/gobs_buildquerys |
147 |
+++ b/gobs/bin/gobs_buildquerys~ |
148 |
@@ -8,6 +8,7 @@ gobs_settings_dict=reader.read_gobs_settings_all() |
149 |
from gobs.ConnectionManager import connectionManager |
150 |
|
151 |
from gobs.check_setup import check_configure_guest |
152 |
+from gobs.sync import git_pull |
153 |
from gobs.build_queru import queruaction |
154 |
import portage |
155 |
import sys |
156 |
@@ -18,12 +19,13 @@ def main_loop(config_profile): |
157 |
#get a connection from the pool |
158 |
init_queru = queruaction(config_profile) |
159 |
while repeat: |
160 |
- #FIXME do a git reop check |
161 |
- if check_configure_guest( config_profile) is not True: |
162 |
+ git_pull() |
163 |
+ if check_configure_guest(config_profile) is not True: |
164 |
# time.sleep(60) |
165 |
continue # retunr to the start of the function |
166 |
else: |
167 |
init_queru.procces_qureru() |
168 |
+ # time.sleep(60) |
169 |
|
170 |
def main(): |
171 |
# Main |
172 |
|
173 |
diff --git a/gobs/bin/gobs_portage_hooks~ b/gobs/bin/gobs_portage_hooks~ |
174 |
new file mode 100755 |
175 |
index 0000000..5432545 |
176 |
--- /dev/null |
177 |
+++ b/gobs/bin/gobs_portage_hooks~ |
178 |
@@ -0,0 +1,79 @@ |
179 |
+#!/usr/bin/python |
180 |
+from __future__ import print_function |
181 |
+import os |
182 |
+import sys |
183 |
+# Get the options from the config file set in gobs.readconf |
184 |
+from gobs.readconf import get_conf_settings |
185 |
+reader=get_conf_settings() |
186 |
+gobs_settings_dict=reader.read_gobs_settings_all() |
187 |
+# make a CM |
188 |
+from gobs.ConnectionManager import connectionManager |
189 |
+CM=connectionManager(gobs_settings_dict) |
190 |
+#selectively import the pgsql/mysql querys |
191 |
+if CM.getName()=='pgsql': |
192 |
+ from gobs.pgsql import * |
193 |
+ |
194 |
+from gobs.package import gobs_package |
195 |
+from gobs.build_log import gobs_buildlog |
196 |
+from gobs.flags import gobs_use_flags |
197 |
+from portage.util import writemsg, writemsg_level, writemsg_stdout |
198 |
+import portage |
199 |
+ |
200 |
+def get_build_dict_db(mysettings, config_profile, gobs_settings_dict): |
201 |
+ conn=CM.getConnection() |
202 |
+ myportdb = portage.portdbapi(mysettings=mysettings) |
203 |
+ categories = os.environ['CATEGORY'] |
204 |
+ package = os.environ['PN'] |
205 |
+ ebuild_version = os.environ['PVR'] |
206 |
+ cpv = categories + "/" + package + "-" + ebuild_version |
207 |
+ init_package = gobs_package(mysettings, myportdb) |
208 |
+ print("cpv", cpv) |
209 |
+ package_id = have_package_db(conn, categories, package) |
210 |
+ # print("package_id %s" % package_id, file=sys.stdout) |
211 |
+ build_dict = {} |
212 |
+ mybuild_dict = {} |
213 |
+ build_dict['ebuild_version'] = ebuild_version |
214 |
+ build_dict['package_id'] = package_id |
215 |
+ build_dict['cpv'] = cpv |
216 |
+ build_dict['categories'] = categories |
217 |
+ build_dict['package'] = package |
218 |
+ build_dict['config_profile'] = config_profile |
219 |
+ init_useflags = gobs_use_flags(mysettings, myportdb, cpv) |
220 |
+ iuse_flags_list, final_use_list = init_useflags.get_flags_looked() |
221 |
+ #print 'final_use_list', final_use_list |
222 |
+ if final_use_list != []: |
223 |
+ build_dict['build_useflags'] = final_use_list |
224 |
+ else: |
225 |
+ build_dict['build_useflags'] = None |
226 |
+ #print "build_dict['build_useflags']", build_dict['build_useflags'] |
227 |
+ pkgdir = os.path.join(mysettings['PORTDIR'], categories + "/" + package) |
228 |
+ ebuild_version_checksum_tree = portage.checksum.sha256hash(pkgdir+ "/" + package + "-" + ebuild_version + ".ebuild")[0] |
229 |
+ build_dict['checksum'] = ebuild_version_checksum_tree |
230 |
+ print('checksum' ,ebuild_version_checksum_tree) |
231 |
+ ebuild_id = get_ebuild_id_db_checksum(conn, build_dict) |
232 |
+ print('ebuild_id in db', ebuild_id) |
233 |
+ if ebuild_id is None: |
234 |
+ #print 'have any ebuild', get_ebuild_checksum(conn, package_id, ebuild_version) |
235 |
+ init_package.update_ebuild_db(build_dict) |
236 |
+ ebuild_id = get_ebuild_id_db_checksum(conn, build_dict) |
237 |
+ build_dict['ebuild_id'] = ebuild_id |
238 |
+ queue_id = check_revision(conn, build_dict) |
239 |
+ print("queue_id in db", queue_id) |
240 |
+ if queue_id is None: |
241 |
+ build_dict['queue_id'] = None |
242 |
+ else: |
243 |
+ build_dict['queue_id'] = queue_id |
244 |
+ return build_dict |
245 |
+ |
246 |
+def main(): |
247 |
+ # Main |
248 |
+ config_profile = gobs_settings_dict['gobs_config'] |
249 |
+ #we provide the main_loop with the ConnectionManager so we can hand out connections from within the loop |
250 |
+ mysettings = portage.settings |
251 |
+ build_dict = get_build_dict_db( mysettings, config_profile, gobs_settings_dict) |
252 |
+ init_buildlog = gobs_buildlog(mysettings, build_dict) |
253 |
+ init_buildlog.add_buildlog_main() |
254 |
+ #connectionManager.closeAllConnections() |
255 |
+ |
256 |
+if __name__ == "__main__": |
257 |
+ main() |
258 |
\ No newline at end of file |
259 |
|
260 |
diff --git a/gobs/bin/gobs_setup_profile~ b/gobs/bin/gobs_setup_profile~ |
261 |
new file mode 100755 |
262 |
index 0000000..cf926fa |
263 |
--- /dev/null |
264 |
+++ b/gobs/bin/gobs_setup_profile~ |
265 |
@@ -0,0 +1,12 @@ |
266 |
+#!/usr/bin/python |
267 |
+# Copyright 2006-2011 Gentoo Foundation |
268 |
+# Distributed under the terms of the GNU General Public License v2 |
269 |
+ |
270 |
+from gobs.init_setup_profile import setup_profile_main |
271 |
+ |
272 |
+def main(): |
273 |
+ # Main |
274 |
+ setup_profile_main(args=None): |
275 |
+ |
276 |
+if __name__ == "__main__": |
277 |
+ main() |
278 |
\ No newline at end of file |
279 |
|
280 |
diff --git a/gobs/bin/gobs_updatedb b/gobs/bin/gobs_updatedb |
281 |
index a80c270..5beb6b2 100755 |
282 |
--- a/gobs/bin/gobs_updatedb |
283 |
+++ b/gobs/bin/gobs_updatedb |
284 |
@@ -7,8 +7,8 @@ |
285 |
|
286 |
import sys |
287 |
import os |
288 |
-from threading import Thread |
289 |
-from git import * |
290 |
+import multiprocessing |
291 |
+ |
292 |
|
293 |
# Get the options from the config file set in gobs.readconf |
294 |
from gobs.readconf import get_conf_settings |
295 |
@@ -27,6 +27,7 @@ from gobs.package import gobs_package |
296 |
from gobs.categories import gobs_categories |
297 |
from gobs.old_cpv import gobs_old_cpv |
298 |
from gobs.categories import gobs_categories |
299 |
+from gobs.sync import git_pull, sync_tree |
300 |
import portage |
301 |
|
302 |
def init_portage_settings(): |
303 |
@@ -41,12 +42,6 @@ def init_portage_settings(): |
304 |
""" |
305 |
# check config setup |
306 |
#git stuff |
307 |
- repo = Repo("/var/lib/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/") |
308 |
- repo_remote = repo.remotes.origin |
309 |
- repo_remote.pull() |
310 |
- master = repo.head.reference |
311 |
- print master.log() |
312 |
- |
313 |
conn=CM.getConnection() |
314 |
check_make_conf() |
315 |
print "Check configs done" |
316 |
@@ -59,6 +54,29 @@ def init_portage_settings(): |
317 |
print "Setting default config to:", config_id[0] |
318 |
return mysettings |
319 |
|
320 |
+def update_cpv_db_pool(mysettings, package_line): |
321 |
+ conn=CM.getConnection() |
322 |
+ # Setup portdb, gobs_categories, gobs_old_cpv, package |
323 |
+ myportdb = portage.portdbapi(mysettings=mysettings) |
324 |
+ init_categories = gobs_categories(mysettings) |
325 |
+ init_package = gobs_package(mysettings, myportdb) |
326 |
+ # split the cp to categories and package |
327 |
+ element = package_line.split('/') |
328 |
+ categories = element[0] |
329 |
+ package = element[1] |
330 |
+ # Check if we don't have the cp in the package table |
331 |
+ package_id = have_package_db(conn,categories, package) |
332 |
+ if package_id is None: |
333 |
+ # Add new package with ebuilds |
334 |
+ init_package.add_new_package_db(categories, package) |
335 |
+ # Ceck if we have the cp in the package table |
336 |
+ elif package_id is not None: |
337 |
+ # Update the packages with ebuilds |
338 |
+ init_package.update_package_db(categories, package, package_id) |
339 |
+ # Update the metadata for categories |
340 |
+ init_categories.update_categories_db(categories) |
341 |
+ CM.putConnection(conn) |
342 |
+ |
343 |
def update_cpv_db(mysettings): |
344 |
"""Code to update the cpv in the database. |
345 |
@type:settings |
346 |
@@ -71,51 +89,35 @@ def update_cpv_db(mysettings): |
347 |
print "Checking categories, package, ebuilds" |
348 |
# Setup portdb, gobs_categories, gobs_old_cpv, package |
349 |
myportdb = portage.portdbapi(mysettings=mysettings) |
350 |
- init_categories = gobs_categories(mysettings) |
351 |
- init_old_cpv = gobs_old_cpv(myportdb, mysettings) |
352 |
- init_package = gobs_package(mysettings, myportdb) |
353 |
package_id_list_tree = [] |
354 |
# Will run some update checks and update package if needed |
355 |
# Get categories/package list from portage |
356 |
package_list_tree = myportdb.cp_all() |
357 |
- # Run the update package for all package in the list |
358 |
- conn=CM.getConnection() |
359 |
+ # Use all exept 2 cores when multiprocessing |
360 |
+ pool_cores= multiprocessing.cpu_count() |
361 |
+ if pool_cores > "3": |
362 |
+ use_pool_cores = pool_cores - 2 |
363 |
+ else |
364 |
+ use_pool_cores = 1 |
365 |
+ pool = multiprocessing.Pool(processes=use_pool_cores) |
366 |
+ # Run the update package for all package in the list in |
367 |
+ # a multiprocessing pool |
368 |
for package_line in sorted(package_list_tree): |
369 |
- # split the cp to categories and package |
370 |
- element = package_line.split('/') |
371 |
- categories = element[0] |
372 |
- package = element[1] |
373 |
- print "C", categories + "/" + package # C = Checking |
374 |
- # Check if we don't have the cp in the package table |
375 |
- package_id = have_package_db(conn,categories, package) |
376 |
- if package_id is None: |
377 |
- # Add new package with ebuilds |
378 |
- package_id = init_package.add_new_package_db(categories, package) |
379 |
- # FIXME log some way that package_id was None |
380 |
- if not package_id is None: |
381 |
- package_id_list_tree.append(package_id) |
382 |
- # Ceck if we have the cp in the package table |
383 |
- elif package_id is not None: |
384 |
- # Update the packages with ebuilds |
385 |
- init_package.update_package_db(categories, package, package_id) |
386 |
- package_id_list_tree.append(package_id) |
387 |
- # Update the metadata for categories |
388 |
- init_categories.update_categories_db(categories) |
389 |
- CM.putConnection(conn) |
390 |
- # Remove any old ebuild, package and categories |
391 |
- init_old_cpv.mark_old_ebuild_db(categories, package, package_id) |
392 |
- init_old_cpv.mark_old_package_db(sorted(package_id_list_tree)) |
393 |
- init_old_cpv.mark_old_categories_db() |
394 |
+ pool.apply_async(update_cpv_db_pool, (mysettings, package_line,)) |
395 |
+ pool.close() |
396 |
+ pool.join() |
397 |
print "Checking categories, package and ebuilds done" |
398 |
|
399 |
def main(): |
400 |
# Main |
401 |
# Init settings for the default config |
402 |
- mysettings = init_portage_settings() |
403 |
- init_arch = gobs_arch() |
404 |
- init_arch.update_arch_db() |
405 |
- # Update the cpv db |
406 |
- update_cpv_db(mysettings) |
407 |
+ git_pull |
408 |
+ if sync_tree(): |
409 |
+ mysettings = init_portage_settings() |
410 |
+ init_arch = gobs_arch() |
411 |
+ init_arch.update_arch_db() |
412 |
+ # Update the cpv db |
413 |
+ update_cpv_db(mysettings) |
414 |
CM.closeAllConnections() |
415 |
|
416 |
if __name__ == "__main__": |
417 |
|
418 |
diff --git a/gobs/bin/gobs_updatedb b/gobs/bin/gobs_updatedb~ |
419 |
similarity index 62% |
420 |
copy from gobs/bin/gobs_updatedb |
421 |
copy to gobs/bin/gobs_updatedb~ |
422 |
index a80c270..15271e4 100755 |
423 |
--- a/gobs/bin/gobs_updatedb |
424 |
+++ b/gobs/bin/gobs_updatedb~ |
425 |
@@ -7,8 +7,8 @@ |
426 |
|
427 |
import sys |
428 |
import os |
429 |
-from threading import Thread |
430 |
-from git import * |
431 |
+import multiprocessing |
432 |
+ |
433 |
|
434 |
# Get the options from the config file set in gobs.readconf |
435 |
from gobs.readconf import get_conf_settings |
436 |
@@ -27,6 +27,7 @@ from gobs.package import gobs_package |
437 |
from gobs.categories import gobs_categories |
438 |
from gobs.old_cpv import gobs_old_cpv |
439 |
from gobs.categories import gobs_categories |
440 |
+from gobs.sync import git_pull, sync_tree |
441 |
import portage |
442 |
|
443 |
def init_portage_settings(): |
444 |
@@ -41,12 +42,6 @@ def init_portage_settings(): |
445 |
""" |
446 |
# check config setup |
447 |
#git stuff |
448 |
- repo = Repo("/var/lib/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/") |
449 |
- repo_remote = repo.remotes.origin |
450 |
- repo_remote.pull() |
451 |
- master = repo.head.reference |
452 |
- print master.log() |
453 |
- |
454 |
conn=CM.getConnection() |
455 |
check_make_conf() |
456 |
print "Check configs done" |
457 |
@@ -59,6 +54,29 @@ def init_portage_settings(): |
458 |
print "Setting default config to:", config_id[0] |
459 |
return mysettings |
460 |
|
461 |
+def update_cpv_db_pool(mysettings, package_line): |
462 |
+ conn=CM.getConnection() |
463 |
+ # Setup portdb, gobs_categories, gobs_old_cpv, package |
464 |
+ myportdb = portage.portdbapi(mysettings=mysettings) |
465 |
+ init_categories = gobs_categories(mysettings) |
466 |
+ init_package = gobs_package(mysettings, myportdb) |
467 |
+ # split the cp to categories and package |
468 |
+ element = package_line.split('/') |
469 |
+ categories = element[0] |
470 |
+ package = element[1] |
471 |
+ # Check if we don't have the cp in the package table |
472 |
+ package_id = have_package_db(conn,categories, package) |
473 |
+ if package_id is None: |
474 |
+ # Add new package with ebuilds |
475 |
+ init_package.add_new_package_db(categories, package) |
476 |
+ # Ceck if we have the cp in the package table |
477 |
+ elif package_id is not None: |
478 |
+ # Update the packages with ebuilds |
479 |
+ init_package.update_package_db(categories, package, package_id) |
480 |
+ # Update the metadata for categories |
481 |
+ init_categories.update_categories_db(categories) |
482 |
+ CM.putConnection(conn) |
483 |
+ |
484 |
def update_cpv_db(mysettings): |
485 |
"""Code to update the cpv in the database. |
486 |
@type:settings |
487 |
@@ -71,51 +89,34 @@ def update_cpv_db(mysettings): |
488 |
print "Checking categories, package, ebuilds" |
489 |
# Setup portdb, gobs_categories, gobs_old_cpv, package |
490 |
myportdb = portage.portdbapi(mysettings=mysettings) |
491 |
- init_categories = gobs_categories(mysettings) |
492 |
- init_old_cpv = gobs_old_cpv(myportdb, mysettings) |
493 |
- init_package = gobs_package(mysettings, myportdb) |
494 |
package_id_list_tree = [] |
495 |
# Will run some update checks and update package if needed |
496 |
# Get categories/package list from portage |
497 |
package_list_tree = myportdb.cp_all() |
498 |
- # Run the update package for all package in the list |
499 |
- conn=CM.getConnection() |
500 |
+ pool_cores= multiprocessing.cpu_count() |
501 |
+ if pool_cores > "3": |
502 |
+ use_pool_cores = pool_cores - 2 |
503 |
+ else |
504 |
+ use_pool_cores = 1 |
505 |
+ pool = multiprocessing.Pool(processes=use_pool_cores) |
506 |
+ # Run the update package for all package in the list in |
507 |
+ # a multiprocessing pool |
508 |
for package_line in sorted(package_list_tree): |
509 |
- # split the cp to categories and package |
510 |
- element = package_line.split('/') |
511 |
- categories = element[0] |
512 |
- package = element[1] |
513 |
- print "C", categories + "/" + package # C = Checking |
514 |
- # Check if we don't have the cp in the package table |
515 |
- package_id = have_package_db(conn,categories, package) |
516 |
- if package_id is None: |
517 |
- # Add new package with ebuilds |
518 |
- package_id = init_package.add_new_package_db(categories, package) |
519 |
- # FIXME log some way that package_id was None |
520 |
- if not package_id is None: |
521 |
- package_id_list_tree.append(package_id) |
522 |
- # Ceck if we have the cp in the package table |
523 |
- elif package_id is not None: |
524 |
- # Update the packages with ebuilds |
525 |
- init_package.update_package_db(categories, package, package_id) |
526 |
- package_id_list_tree.append(package_id) |
527 |
- # Update the metadata for categories |
528 |
- init_categories.update_categories_db(categories) |
529 |
- CM.putConnection(conn) |
530 |
- # Remove any old ebuild, package and categories |
531 |
- init_old_cpv.mark_old_ebuild_db(categories, package, package_id) |
532 |
- init_old_cpv.mark_old_package_db(sorted(package_id_list_tree)) |
533 |
- init_old_cpv.mark_old_categories_db() |
534 |
+ pool.apply_async(update_cpv_db_pool, (mysettings, package_line,)) |
535 |
+ pool.close() |
536 |
+ pool.join() |
537 |
print "Checking categories, package and ebuilds done" |
538 |
|
539 |
def main(): |
540 |
# Main |
541 |
# Init settings for the default config |
542 |
- mysettings = init_portage_settings() |
543 |
- init_arch = gobs_arch() |
544 |
- init_arch.update_arch_db() |
545 |
- # Update the cpv db |
546 |
- update_cpv_db(mysettings) |
547 |
+ git_pull |
548 |
+ if sync_tree(): |
549 |
+ mysettings = init_portage_settings() |
550 |
+ init_arch = gobs_arch() |
551 |
+ init_arch.update_arch_db() |
552 |
+ # Update the cpv db |
553 |
+ update_cpv_db(mysettings) |
554 |
CM.closeAllConnections() |
555 |
|
556 |
if __name__ == "__main__": |
557 |
|
558 |
diff --git a/gobs/doc/Setup.txt~ b/gobs/doc/Setup.txt~ |
559 |
new file mode 100644 |
560 |
index 0000000..956990f |
561 |
--- /dev/null |
562 |
+++ b/gobs/doc/Setup.txt~ |
563 |
@@ -0,0 +1,7 @@ |
564 |
+1. Setup the Backend |
565 |
+Setup the gobs.conf for the db. |
566 |
+Change GOBSGITREPONAME to point to the git repo with your configs for the profiles/setups. |
567 |
+Import the *dump.sql.gz to your sql. |
568 |
+The portage/base/make.conf should be in the base profile/setup |
569 |
+The portage/all/bashrc should be in all the guest profiles/setups |
570 |
+The porfiles dir need a dir call config with a parent file that point to base profile |
571 |
|
572 |
diff --git a/gobs/doc/portage/all/bashrc~ b/gobs/doc/portage/all/bashrc~ |
573 |
new file mode 100644 |
574 |
index 0000000..092548e |
575 |
--- /dev/null |
576 |
+++ b/gobs/doc/portage/all/bashrc~ |
577 |
@@ -0,0 +1,7 @@ |
578 |
+pre_pkg_setup() { |
579 |
+ register_die_hook gobs_portage_hook |
580 |
+ register_success_hook gobs_portage_hook |
581 |
+} |
582 |
+gobs_portage_hook() { |
583 |
+ /home/buildhost/portage_hooks |
584 |
+} |
585 |
|
586 |
diff --git a/gobs/doc/portage/base/make.conf b/gobs/doc/portage/base/make.conf |
587 |
new file mode 100644 |
588 |
index 0000000..b69dfa9 |
589 |
--- /dev/null |
590 |
+++ b/gobs/doc/portage/base/make.conf |
591 |
@@ -0,0 +1,15 @@ |
592 |
+# This is for the base config |
593 |
+CHOST="x86_64-pc-linux-gnu" |
594 |
+ACCEPT_KEYWORDS="" |
595 |
+ARCH="amd64" |
596 |
+FEATURES="-metadata-transfer -news" |
597 |
+ACCEPT_LICENSE="*" |
598 |
+PORTAGE_TMPDIR=/var/tmp |
599 |
+PORTDIR=/usr/portage |
600 |
+DISTDIR=/usr/portage/distfiles |
601 |
+PORT_LOGDIR="/var/log/portage" |
602 |
+GENTOO_MIRRORS="ftp://ftp.sunet.se/pub/Linux/distributions/gentoo http://distfiles.gentoo.org http://www.ibiblio.org/pub/Linux/distributions/gentoo" |
603 |
+SYNC="rsync://rsync.europe.gentoo.org/gentoo-portage" |
604 |
+PORTAGE_TMPFS="/dev/shm" |
605 |
+PORTAGE_ELOG_CLASSES="" |
606 |
+PORTAGE_ELOG_SYSTEM="" |
607 |
|
608 |
diff --git a/gobs/pym/ConnectionManager.py~ b/gobs/pym/ConnectionManager.py~ |
609 |
new file mode 100644 |
610 |
index 0000000..1bbeb35 |
611 |
--- /dev/null |
612 |
+++ b/gobs/pym/ConnectionManager.py~ |
613 |
@@ -0,0 +1,56 @@ |
614 |
+#a simple CM build around sie singleton so there can only be 1 CM but you can call the class in different place with out caring about it. |
615 |
+#when the first object is created of this class, the SQL settings are read from the file and stored in the class for later reuse by the next object and so on. |
616 |
+#(maybe later add support for connection pools) |
617 |
+from __future__ import print_function |
618 |
+ |
619 |
+class connectionManager(object): |
620 |
+ _instance = None |
621 |
+ |
622 |
+ #size of the connection Pool |
623 |
+ def __new__(cls, settings_dict, numberOfconnections=20, *args, **kwargs): |
624 |
+ if not cls._instance: |
625 |
+ cls._instance = super(connectionManager, cls).__new__(cls, *args, **kwargs) |
626 |
+ #read the sql user/host etc and store it in the local object |
627 |
+ print(settings_dict['sql_host']) |
628 |
+ cls._host=settings_dict['sql_host'] |
629 |
+ cls._user=settings_dict['sql_user'] |
630 |
+ cls._password=settings_dict['sql_passwd'] |
631 |
+ cls._database=settings_dict['sql_db'] |
632 |
+ #shouldnt we include port also? |
633 |
+ try: |
634 |
+ from psycopg2 import pool |
635 |
+ cls._connectionNumber=numberOfconnections |
636 |
+ #always create 1 connection |
637 |
+ cls._pool=pool.ThreadedConnectionPool(1,cls._connectionNumber,host=cls._host,database=cls._database,user=cls._user,password=cls._password) |
638 |
+ cls._name='pgsql' |
639 |
+ except ImportError: |
640 |
+ print("Please install a recent version of dev-python/psycopg for Python") |
641 |
+ sys.exit(1) |
642 |
+ #setup connection pool |
643 |
+ return cls._instance |
644 |
+ |
645 |
+ ## returns the name of the database pgsql/mysql etc |
646 |
+ def getName(self): |
647 |
+ return self._name |
648 |
+ |
649 |
+ def getConnection(self): |
650 |
+ return self._pool.getconn() |
651 |
+ |
652 |
+ def putConnection(self, connection): |
653 |
+ self._pool.putconn(connection) |
654 |
+ |
655 |
+ def closeAllConnections(self): |
656 |
+ self._pool.closeall() |
657 |
+ |
658 |
+##how to use this class |
659 |
+#get a instance of the class (there can only be 1 instance but many pointers (single ton)) |
660 |
+#get the connection |
661 |
+#conn=cm.getConnection() |
662 |
+#get a cursor |
663 |
+#cur=conn.cursor() |
664 |
+#do stuff |
665 |
+#cur.execute(stuff) |
666 |
+#"close a connection" temporarily put it away for reuse |
667 |
+#cm.putConnection(conn) |
668 |
+#kill all connections, should only be used just before the program terminates |
669 |
+#cm.closeAllConnections() |
670 |
|
671 |
diff --git a/gobs/pym/Scheduler.py~ b/gobs/pym/Scheduler.py~ |
672 |
new file mode 100644 |
673 |
index 0000000..005f861 |
674 |
--- /dev/null |
675 |
+++ b/gobs/pym/Scheduler.py~ |
676 |
@@ -0,0 +1,1994 @@ |
677 |
+# Copyright 1999-2011 Gentoo Foundation |
678 |
+# Distributed under the terms of the GNU General Public License v2 |
679 |
+ |
680 |
+from __future__ import print_function |
681 |
+ |
682 |
+from collections import deque |
683 |
+import gc |
684 |
+import gzip |
685 |
+import logging |
686 |
+import shutil |
687 |
+import signal |
688 |
+import sys |
689 |
+import tempfile |
690 |
+import textwrap |
691 |
+import time |
692 |
+import warnings |
693 |
+import weakref |
694 |
+import zlib |
695 |
+ |
696 |
+import portage |
697 |
+from portage import os |
698 |
+from portage import _encodings |
699 |
+from portage import _unicode_decode, _unicode_encode |
700 |
+from portage.cache.mappings import slot_dict_class |
701 |
+from portage.elog.messages import eerror |
702 |
+from portage.localization import _ |
703 |
+from portage.output import colorize, create_color_func, red |
704 |
+bad = create_color_func("BAD") |
705 |
+from portage._sets import SETPREFIX |
706 |
+from portage._sets.base import InternalPackageSet |
707 |
+from portage.util import writemsg, writemsg_level |
708 |
+from portage.package.ebuild.digestcheck import digestcheck |
709 |
+from portage.package.ebuild.digestgen import digestgen |
710 |
+from portage.package.ebuild.prepare_build_dirs import prepare_build_dirs |
711 |
+ |
712 |
+import _emerge |
713 |
+from _emerge.BinpkgFetcher import BinpkgFetcher |
714 |
+from _emerge.BinpkgPrefetcher import BinpkgPrefetcher |
715 |
+from _emerge.BinpkgVerifier import BinpkgVerifier |
716 |
+from _emerge.Blocker import Blocker |
717 |
+from _emerge.BlockerDB import BlockerDB |
718 |
+from _emerge.clear_caches import clear_caches |
719 |
+from _emerge.create_depgraph_params import create_depgraph_params |
720 |
+from _emerge.create_world_atom import create_world_atom |
721 |
+from _emerge.DepPriority import DepPriority |
722 |
+from _emerge.depgraph import depgraph, resume_depgraph |
723 |
+from _emerge.EbuildFetcher import EbuildFetcher |
724 |
+from _emerge.EbuildPhase import EbuildPhase |
725 |
+from _emerge.emergelog import emergelog |
726 |
+from _emerge.FakeVartree import FakeVartree |
727 |
+from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps |
728 |
+from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo |
729 |
+from _emerge.JobStatusDisplay import JobStatusDisplay |
730 |
+from _emerge.MergeListItem import MergeListItem |
731 |
+from _emerge.MiscFunctionsProcess import MiscFunctionsProcess |
732 |
+from _emerge.Package import Package |
733 |
+from _emerge.PackageMerge import PackageMerge |
734 |
+from _emerge.PollScheduler import PollScheduler |
735 |
+from _emerge.RootConfig import RootConfig |
736 |
+from _emerge.SlotObject import SlotObject |
737 |
+from _emerge.SequentialTaskQueue import SequentialTaskQueue |
738 |
+ |
739 |
+from gobs.build_log import gobs_buildlog |
740 |
+ |
741 |
+if sys.hexversion >= 0x3000000: |
742 |
+ basestring = str |
743 |
+ |
744 |
+class Scheduler(PollScheduler): |
745 |
+ |
746 |
+ # max time between display status updates (milliseconds) |
747 |
+ _max_display_latency = 3000 |
748 |
+ |
749 |
+ _opts_ignore_blockers = \ |
750 |
+ frozenset(["--buildpkgonly", |
751 |
+ "--fetchonly", "--fetch-all-uri", |
752 |
+ "--nodeps", "--pretend"]) |
753 |
+ |
754 |
+ _opts_no_background = \ |
755 |
+ frozenset(["--pretend", |
756 |
+ "--fetchonly", "--fetch-all-uri"]) |
757 |
+ |
758 |
+ _opts_no_restart = frozenset(["--buildpkgonly", |
759 |
+ "--fetchonly", "--fetch-all-uri", "--pretend"]) |
760 |
+ |
761 |
+ _bad_resume_opts = set(["--ask", "--changelog", |
762 |
+ "--resume", "--skipfirst"]) |
763 |
+ |
764 |
+ class _iface_class(SlotObject): |
765 |
+ __slots__ = ("fetch", |
766 |
+ "output", "register", "schedule", |
767 |
+ "scheduleSetup", "scheduleUnpack", "scheduleYield", |
768 |
+ "unregister") |
769 |
+ |
770 |
+ class _fetch_iface_class(SlotObject): |
771 |
+ __slots__ = ("log_file", "schedule") |
772 |
+ |
773 |
+ _task_queues_class = slot_dict_class( |
774 |
+ ("merge", "jobs", "ebuild_locks", "fetch", "unpack"), prefix="") |
775 |
+ |
776 |
+ class _build_opts_class(SlotObject): |
777 |
+ __slots__ = ("buildpkg", "buildpkg_exclude", "buildpkgonly", |
778 |
+ "fetch_all_uri", "fetchonly", "pretend") |
779 |
+ |
780 |
+ class _binpkg_opts_class(SlotObject): |
781 |
+ __slots__ = ("fetchonly", "getbinpkg", "pretend") |
782 |
+ |
783 |
+ class _pkg_count_class(SlotObject): |
784 |
+ __slots__ = ("curval", "maxval") |
785 |
+ |
786 |
+ class _emerge_log_class(SlotObject): |
787 |
+ __slots__ = ("xterm_titles",) |
788 |
+ |
789 |
+ def log(self, *pargs, **kwargs): |
790 |
+ if not self.xterm_titles: |
791 |
+ # Avoid interference with the scheduler's status display. |
792 |
+ kwargs.pop("short_msg", None) |
793 |
+ emergelog(self.xterm_titles, *pargs, **kwargs) |
794 |
+ |
795 |
+ class _failed_pkg(SlotObject): |
796 |
+ __slots__ = ("build_dir", "build_log", "pkg", "returncode") |
797 |
+ |
798 |
+ class _ConfigPool(object): |
799 |
+ """Interface for a task to temporarily allocate a config |
800 |
+ instance from a pool. This allows a task to be constructed |
801 |
+ long before the config instance actually becomes needed, like |
802 |
+ when prefetchers are constructed for the whole merge list.""" |
803 |
+ __slots__ = ("_root", "_allocate", "_deallocate") |
804 |
+ def __init__(self, root, allocate, deallocate): |
805 |
+ self._root = root |
806 |
+ self._allocate = allocate |
807 |
+ self._deallocate = deallocate |
808 |
+ def allocate(self): |
809 |
+ return self._allocate(self._root) |
810 |
+ def deallocate(self, settings): |
811 |
+ self._deallocate(settings) |
812 |
+ |
813 |
+ class _unknown_internal_error(portage.exception.PortageException): |
814 |
+ """ |
815 |
+ Used internally to terminate scheduling. The specific reason for |
816 |
+ the failure should have been dumped to stderr. |
817 |
+ """ |
818 |
+ def __init__(self, value=""): |
819 |
+ portage.exception.PortageException.__init__(self, value) |
820 |
+ |
821 |
+ def __init__(self, settings, trees, mtimedb, myopts, |
822 |
+ spinner, mergelist=None, favorites=None, graph_config=None): |
823 |
+ PollScheduler.__init__(self) |
824 |
+ |
825 |
+ if mergelist is not None: |
826 |
+ warnings.warn("The mergelist parameter of the " + \ |
827 |
+ "_emerge.Scheduler constructor is now unused. Use " + \ |
828 |
+ "the graph_config parameter instead.", |
829 |
+ DeprecationWarning, stacklevel=2) |
830 |
+ |
831 |
+ self.settings = settings |
832 |
+ self.target_root = settings["ROOT"] |
833 |
+ self.trees = trees |
834 |
+ self.myopts = myopts |
835 |
+ self._spinner = spinner |
836 |
+ self._mtimedb = mtimedb |
837 |
+ self._favorites = favorites |
838 |
+ self._args_set = InternalPackageSet(favorites, allow_repo=True) |
839 |
+ self._build_opts = self._build_opts_class() |
840 |
+ |
841 |
+ for k in self._build_opts.__slots__: |
842 |
+ setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts) |
843 |
+ self._build_opts.buildpkg_exclude = InternalPackageSet( \ |
844 |
+ initial_atoms=" ".join(myopts.get("--buildpkg-exclude", [])).split(), \ |
845 |
+ allow_wildcard=True, allow_repo=True) |
846 |
+ |
847 |
+ self._binpkg_opts = self._binpkg_opts_class() |
848 |
+ for k in self._binpkg_opts.__slots__: |
849 |
+ setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts) |
850 |
+ |
851 |
+ self.curval = 0 |
852 |
+ self._logger = self._emerge_log_class() |
853 |
+ self._task_queues = self._task_queues_class() |
854 |
+ for k in self._task_queues.allowed_keys: |
855 |
+ setattr(self._task_queues, k, |
856 |
+ SequentialTaskQueue()) |
857 |
+ |
858 |
+ # Holds merges that will wait to be executed when no builds are |
859 |
+ # executing. This is useful for system packages since dependencies |
860 |
+ # on system packages are frequently unspecified. For example, see |
861 |
+ # bug #256616. |
862 |
+ self._merge_wait_queue = deque() |
863 |
+ # Holds merges that have been transfered from the merge_wait_queue to |
864 |
+ # the actual merge queue. They are removed from this list upon |
865 |
+ # completion. Other packages can start building only when this list is |
866 |
+ # empty. |
867 |
+ self._merge_wait_scheduled = [] |
868 |
+ |
869 |
+ # Holds system packages and their deep runtime dependencies. Before |
870 |
+ # being merged, these packages go to merge_wait_queue, to be merged |
871 |
+ # when no other packages are building. |
872 |
+ self._deep_system_deps = set() |
873 |
+ |
874 |
+ # Holds packages to merge which will satisfy currently unsatisfied |
875 |
+ # deep runtime dependencies of system packages. If this is not empty |
876 |
+ # then no parallel builds will be spawned until it is empty. This |
877 |
+ # minimizes the possibility that a build will fail due to the system |
878 |
+ # being in a fragile state. For example, see bug #259954. |
879 |
+ self._unsatisfied_system_deps = set() |
880 |
+ |
881 |
+ self._status_display = JobStatusDisplay( |
882 |
+ xterm_titles=('notitles' not in settings.features)) |
883 |
+ self._max_load = myopts.get("--load-average") |
884 |
+ max_jobs = myopts.get("--jobs") |
885 |
+ if max_jobs is None: |
886 |
+ max_jobs = 1 |
887 |
+ self._set_max_jobs(max_jobs) |
888 |
+ |
889 |
+ # The root where the currently running |
890 |
+ # portage instance is installed. |
891 |
+ self._running_root = trees["/"]["root_config"] |
892 |
+ self.edebug = 0 |
893 |
+ if settings.get("PORTAGE_DEBUG", "") == "1": |
894 |
+ self.edebug = 1 |
895 |
+ self.pkgsettings = {} |
896 |
+ self._config_pool = {} |
897 |
+ for root in self.trees: |
898 |
+ self._config_pool[root] = [] |
899 |
+ |
900 |
+ self._fetch_log = os.path.join(_emerge.emergelog._emerge_log_dir, |
901 |
+ 'emerge-fetch.log') |
902 |
+ fetch_iface = self._fetch_iface_class(log_file=self._fetch_log, |
903 |
+ schedule=self._schedule_fetch) |
904 |
+ self._sched_iface = self._iface_class( |
905 |
+ fetch=fetch_iface, output=self._task_output, |
906 |
+ register=self._register, |
907 |
+ schedule=self._schedule_wait, |
908 |
+ scheduleSetup=self._schedule_setup, |
909 |
+ scheduleUnpack=self._schedule_unpack, |
910 |
+ scheduleYield=self._schedule_yield, |
911 |
+ unregister=self._unregister) |
912 |
+ |
913 |
+ self._prefetchers = weakref.WeakValueDictionary() |
914 |
+ self._pkg_queue = [] |
915 |
+ self._running_tasks = {} |
916 |
+ self._completed_tasks = set() |
917 |
+ |
918 |
+ self._failed_pkgs = [] |
919 |
+ self._failed_pkgs_all = [] |
920 |
+ self._failed_pkgs_die_msgs = [] |
921 |
+ self._post_mod_echo_msgs = [] |
922 |
+ self._parallel_fetch = False |
923 |
+ self._init_graph(graph_config) |
924 |
+ merge_count = len([x for x in self._mergelist \ |
925 |
+ if isinstance(x, Package) and x.operation == "merge"]) |
926 |
+ self._pkg_count = self._pkg_count_class( |
927 |
+ curval=0, maxval=merge_count) |
928 |
+ self._status_display.maxval = self._pkg_count.maxval |
929 |
+ |
930 |
+ # The load average takes some time to respond when new |
931 |
+ # jobs are added, so we need to limit the rate of adding |
932 |
+ # new jobs. |
933 |
+ self._job_delay_max = 10 |
934 |
+ self._job_delay_factor = 1.0 |
935 |
+ self._job_delay_exp = 1.5 |
936 |
+ self._previous_job_start_time = None |
937 |
+ |
938 |
+ # This is used to memoize the _choose_pkg() result when |
939 |
+ # no packages can be chosen until one of the existing |
940 |
+ # jobs completes. |
941 |
+ self._choose_pkg_return_early = False |
942 |
+ |
943 |
+ features = self.settings.features |
944 |
+ if "parallel-fetch" in features and \ |
945 |
+ not ("--pretend" in self.myopts or \ |
946 |
+ "--fetch-all-uri" in self.myopts or \ |
947 |
+ "--fetchonly" in self.myopts): |
948 |
+ if "distlocks" not in features: |
949 |
+ portage.writemsg(red("!!!")+"\n", noiselevel=-1) |
950 |
+ portage.writemsg(red("!!!")+" parallel-fetching " + \ |
951 |
+ "requires the distlocks feature enabled"+"\n", |
952 |
+ noiselevel=-1) |
953 |
+ portage.writemsg(red("!!!")+" you have it disabled, " + \ |
954 |
+ "thus parallel-fetching is being disabled"+"\n", |
955 |
+ noiselevel=-1) |
956 |
+ portage.writemsg(red("!!!")+"\n", noiselevel=-1) |
957 |
+ elif merge_count > 1: |
958 |
+ self._parallel_fetch = True |
959 |
+ |
960 |
+ if self._parallel_fetch: |
961 |
+ # clear out existing fetch log if it exists |
962 |
+ try: |
963 |
+ open(self._fetch_log, 'w').close() |
964 |
+ except EnvironmentError: |
965 |
+ pass |
966 |
+ |
967 |
+ self._running_portage = None |
968 |
+ portage_match = self._running_root.trees["vartree"].dbapi.match( |
969 |
+ portage.const.PORTAGE_PACKAGE_ATOM) |
970 |
+ if portage_match: |
971 |
+ cpv = portage_match.pop() |
972 |
+ self._running_portage = self._pkg(cpv, "installed", |
973 |
+ self._running_root, installed=True) |
974 |
+ |
975 |
+ def _terminate_tasks(self): |
976 |
+ self._status_display.quiet = True |
977 |
+ while self._running_tasks: |
978 |
+ task_id, task = self._running_tasks.popitem() |
979 |
+ task.cancel() |
980 |
+ for q in self._task_queues.values(): |
981 |
+ q.clear() |
982 |
+ |
983 |
+ def _init_graph(self, graph_config): |
984 |
+ """ |
985 |
+ Initialization structures used for dependency calculations |
986 |
+ involving currently installed packages. |
987 |
+ """ |
988 |
+ self._set_graph_config(graph_config) |
989 |
+ self._blocker_db = {} |
990 |
+ for root in self.trees: |
991 |
+ if graph_config is None: |
992 |
+ fake_vartree = FakeVartree(self.trees[root]["root_config"], |
993 |
+ pkg_cache=self._pkg_cache) |
994 |
+ fake_vartree.sync() |
995 |
+ else: |
996 |
+ fake_vartree = graph_config.trees[root]['vartree'] |
997 |
+ self._blocker_db[root] = BlockerDB(fake_vartree) |
998 |
+ |
999 |
+ def _destroy_graph(self): |
1000 |
+ """ |
1001 |
+ Use this to free memory at the beginning of _calc_resume_list(). |
1002 |
+ After _calc_resume_list(), the _init_graph() method |
1003 |
+ must to be called in order to re-generate the structures that |
1004 |
+ this method destroys. |
1005 |
+ """ |
1006 |
+ self._blocker_db = None |
1007 |
+ self._set_graph_config(None) |
1008 |
+ gc.collect() |
1009 |
+ |
1010 |
+ def _poll(self, timeout=None): |
1011 |
+ |
1012 |
+ self._schedule() |
1013 |
+ |
1014 |
+ if timeout is None: |
1015 |
+ while True: |
1016 |
+ if not self._poll_event_handlers: |
1017 |
+ self._schedule() |
1018 |
+ if not self._poll_event_handlers: |
1019 |
+ raise StopIteration( |
1020 |
+ "timeout is None and there are no poll() event handlers") |
1021 |
+ previous_count = len(self._poll_event_queue) |
1022 |
+ PollScheduler._poll(self, timeout=self._max_display_latency) |
1023 |
+ self._status_display.display() |
1024 |
+ if previous_count != len(self._poll_event_queue): |
1025 |
+ break |
1026 |
+ |
1027 |
+ elif timeout <= self._max_display_latency: |
1028 |
+ PollScheduler._poll(self, timeout=timeout) |
1029 |
+ if timeout == 0: |
1030 |
+ # The display is updated by _schedule() above, so it would be |
1031 |
+ # redundant to update it here when timeout is 0. |
1032 |
+ pass |
1033 |
+ else: |
1034 |
+ self._status_display.display() |
1035 |
+ |
1036 |
+ else: |
1037 |
+ remaining_timeout = timeout |
1038 |
+ start_time = time.time() |
1039 |
+ while True: |
1040 |
+ previous_count = len(self._poll_event_queue) |
1041 |
+ PollScheduler._poll(self, |
1042 |
+ timeout=min(self._max_display_latency, remaining_timeout)) |
1043 |
+ self._status_display.display() |
1044 |
+ if previous_count != len(self._poll_event_queue): |
1045 |
+ break |
1046 |
+ elapsed_time = time.time() - start_time |
1047 |
+ if elapsed_time < 0: |
1048 |
+ # The system clock has changed such that start_time |
1049 |
+ # is now in the future, so just assume that the |
1050 |
+ # timeout has already elapsed. |
1051 |
+ break |
1052 |
+ remaining_timeout = timeout - 1000 * elapsed_time |
1053 |
+ if remaining_timeout <= 0: |
1054 |
+ break |
1055 |
+ |
1056 |
+ def _set_max_jobs(self, max_jobs): |
1057 |
+ self._max_jobs = max_jobs |
1058 |
+ self._task_queues.jobs.max_jobs = max_jobs |
1059 |
+ if "parallel-install" in self.settings.features: |
1060 |
+ self._task_queues.merge.max_jobs = max_jobs |
1061 |
+ |
1062 |
+ def _background_mode(self): |
1063 |
+ """ |
1064 |
+ Check if background mode is enabled and adjust states as necessary. |
1065 |
+ |
1066 |
+ @rtype: bool |
1067 |
+ @returns: True if background mode is enabled, False otherwise. |
1068 |
+ """ |
1069 |
+ background = (self._max_jobs is True or \ |
1070 |
+ self._max_jobs > 1 or "--quiet" in self.myopts \ |
1071 |
+ or "--quiet-build" in self.myopts) and \ |
1072 |
+ not bool(self._opts_no_background.intersection(self.myopts)) |
1073 |
+ |
1074 |
+ if background: |
1075 |
+ interactive_tasks = self._get_interactive_tasks() |
1076 |
+ if interactive_tasks: |
1077 |
+ background = False |
1078 |
+ writemsg_level(">>> Sending package output to stdio due " + \ |
1079 |
+ "to interactive package(s):\n", |
1080 |
+ level=logging.INFO, noiselevel=-1) |
1081 |
+ msg = [""] |
1082 |
+ for pkg in interactive_tasks: |
1083 |
+ pkg_str = " " + colorize("INFORM", str(pkg.cpv)) |
1084 |
+ if pkg.root != "/": |
1085 |
+ pkg_str += " for " + pkg.root |
1086 |
+ msg.append(pkg_str) |
1087 |
+ msg.append("") |
1088 |
+ writemsg_level("".join("%s\n" % (l,) for l in msg), |
1089 |
+ level=logging.INFO, noiselevel=-1) |
1090 |
+ if self._max_jobs is True or self._max_jobs > 1: |
1091 |
+ self._set_max_jobs(1) |
1092 |
+ writemsg_level(">>> Setting --jobs=1 due " + \ |
1093 |
+ "to the above interactive package(s)\n", |
1094 |
+ level=logging.INFO, noiselevel=-1) |
1095 |
+ writemsg_level(">>> In order to temporarily mask " + \ |
1096 |
+ "interactive updates, you may\n" + \ |
1097 |
+ ">>> specify --accept-properties=-interactive\n", |
1098 |
+ level=logging.INFO, noiselevel=-1) |
1099 |
+ self._status_display.quiet = \ |
1100 |
+ not background or \ |
1101 |
+ ("--quiet" in self.myopts and \ |
1102 |
+ "--verbose" not in self.myopts) |
1103 |
+ |
1104 |
+ self._logger.xterm_titles = \ |
1105 |
+ "notitles" not in self.settings.features and \ |
1106 |
+ self._status_display.quiet |
1107 |
+ |
1108 |
+ return background |
1109 |
+ |
1110 |
+ def _get_interactive_tasks(self): |
1111 |
+ interactive_tasks = [] |
1112 |
+ for task in self._mergelist: |
1113 |
+ if not (isinstance(task, Package) and \ |
1114 |
+ task.operation == "merge"): |
1115 |
+ continue |
1116 |
+ if 'interactive' in task.metadata.properties: |
1117 |
+ interactive_tasks.append(task) |
1118 |
+ return interactive_tasks |
1119 |
+ |
1120 |
+ def _set_graph_config(self, graph_config): |
1121 |
+ |
1122 |
+ if graph_config is None: |
1123 |
+ self._graph_config = None |
1124 |
+ self._pkg_cache = {} |
1125 |
+ self._digraph = None |
1126 |
+ self._mergelist = [] |
1127 |
+ self._deep_system_deps.clear() |
1128 |
+ return |
1129 |
+ |
1130 |
+ self._graph_config = graph_config |
1131 |
+ self._pkg_cache = graph_config.pkg_cache |
1132 |
+ self._digraph = graph_config.graph |
1133 |
+ self._mergelist = graph_config.mergelist |
1134 |
+ |
1135 |
+ if "--nodeps" in self.myopts or \ |
1136 |
+ (self._max_jobs is not True and self._max_jobs < 2): |
1137 |
+ # save some memory |
1138 |
+ self._digraph = None |
1139 |
+ graph_config.graph = None |
1140 |
+ graph_config.pkg_cache.clear() |
1141 |
+ self._deep_system_deps.clear() |
1142 |
+ for pkg in self._mergelist: |
1143 |
+ self._pkg_cache[pkg] = pkg |
1144 |
+ return |
1145 |
+ |
1146 |
+ self._find_system_deps() |
1147 |
+ self._prune_digraph() |
1148 |
+ self._prevent_builddir_collisions() |
1149 |
+ if '--debug' in self.myopts: |
1150 |
+ writemsg("\nscheduler digraph:\n\n", noiselevel=-1) |
1151 |
+ self._digraph.debug_print() |
1152 |
+ writemsg("\n", noiselevel=-1) |
1153 |
+ |
1154 |
+ def _find_system_deps(self): |
1155 |
+ """ |
1156 |
+ Find system packages and their deep runtime dependencies. Before being |
1157 |
+ merged, these packages go to merge_wait_queue, to be merged when no |
1158 |
+ other packages are building. |
1159 |
+ NOTE: This can only find deep system deps if the system set has been |
1160 |
+ added to the graph and traversed deeply (the depgraph "complete" |
1161 |
+ parameter will do this, triggered by emerge --complete-graph option). |
1162 |
+ """ |
1163 |
+ deep_system_deps = self._deep_system_deps |
1164 |
+ deep_system_deps.clear() |
1165 |
+ deep_system_deps.update( |
1166 |
+ _find_deep_system_runtime_deps(self._digraph)) |
1167 |
+ deep_system_deps.difference_update([pkg for pkg in \ |
1168 |
+ deep_system_deps if pkg.operation != "merge"]) |
1169 |
+ |
1170 |
+ def _prune_digraph(self): |
1171 |
+ """ |
1172 |
+ Prune any root nodes that are irrelevant. |
1173 |
+ """ |
1174 |
+ |
1175 |
+ graph = self._digraph |
1176 |
+ completed_tasks = self._completed_tasks |
1177 |
+ removed_nodes = set() |
1178 |
+ while True: |
1179 |
+ for node in graph.root_nodes(): |
1180 |
+ if not isinstance(node, Package) or \ |
1181 |
+ (node.installed and node.operation == "nomerge") or \ |
1182 |
+ node.onlydeps or \ |
1183 |
+ node in completed_tasks: |
1184 |
+ removed_nodes.add(node) |
1185 |
+ if removed_nodes: |
1186 |
+ graph.difference_update(removed_nodes) |
1187 |
+ if not removed_nodes: |
1188 |
+ break |
1189 |
+ removed_nodes.clear() |
1190 |
+ |
1191 |
+ def _prevent_builddir_collisions(self): |
1192 |
+ """ |
1193 |
+ When building stages, sometimes the same exact cpv needs to be merged |
1194 |
+ to both $ROOTs. Add edges to the digraph in order to avoid collisions |
1195 |
+ in the builddir. Currently, normal file locks would be inappropriate |
1196 |
+ for this purpose since emerge holds all of it's build dir locks from |
1197 |
+ the main process. |
1198 |
+ """ |
1199 |
+ cpv_map = {} |
1200 |
+ for pkg in self._mergelist: |
1201 |
+ if not isinstance(pkg, Package): |
1202 |
+ # a satisfied blocker |
1203 |
+ continue |
1204 |
+ if pkg.installed: |
1205 |
+ continue |
1206 |
+ if pkg.cpv not in cpv_map: |
1207 |
+ cpv_map[pkg.cpv] = [pkg] |
1208 |
+ continue |
1209 |
+ for earlier_pkg in cpv_map[pkg.cpv]: |
1210 |
+ self._digraph.add(earlier_pkg, pkg, |
1211 |
+ priority=DepPriority(buildtime=True)) |
1212 |
+ cpv_map[pkg.cpv].append(pkg) |
1213 |
+ |
1214 |
+ class _pkg_failure(portage.exception.PortageException): |
1215 |
+ """ |
1216 |
+ An instance of this class is raised by unmerge() when |
1217 |
+ an uninstallation fails. |
1218 |
+ """ |
1219 |
+ status = 1 |
1220 |
+ def __init__(self, *pargs): |
1221 |
+ portage.exception.PortageException.__init__(self, pargs) |
1222 |
+ if pargs: |
1223 |
+ self.status = pargs[0] |
1224 |
+ |
1225 |
+ def _schedule_fetch(self, fetcher): |
1226 |
+ """ |
1227 |
+ Schedule a fetcher, in order to control the number of concurrent |
1228 |
+ fetchers. If self._max_jobs is greater than 1 then the fetch |
1229 |
+ queue is bypassed and the fetcher is started immediately, |
1230 |
+ otherwise it is added to the front of the parallel-fetch queue. |
1231 |
+ NOTE: The parallel-fetch queue is currently used to serialize |
1232 |
+ access to the parallel-fetch log, so changes in the log handling |
1233 |
+ would be required before it would be possible to enable |
1234 |
+ concurrent fetching within the parallel-fetch queue. |
1235 |
+ """ |
1236 |
+ if self._max_jobs > 1: |
1237 |
+ fetcher.start() |
1238 |
+ else: |
1239 |
+ self._task_queues.fetch.addFront(fetcher) |
1240 |
+ |
1241 |
+ def _schedule_setup(self, setup_phase): |
1242 |
+ """ |
1243 |
+ Schedule a setup phase on the merge queue, in order to |
1244 |
+ serialize unsandboxed access to the live filesystem. |
1245 |
+ """ |
1246 |
+ if self._task_queues.merge.max_jobs > 1 and \ |
1247 |
+ "ebuild-locks" in self.settings.features: |
1248 |
+ # Use a separate queue for ebuild-locks when the merge |
1249 |
+ # queue allows more than 1 job (due to parallel-install), |
1250 |
+ # since the portage.locks module does not behave as desired |
1251 |
+ # if we try to lock the same file multiple times |
1252 |
+ # concurrently from the same process. |
1253 |
+ self._task_queues.ebuild_locks.add(setup_phase) |
1254 |
+ else: |
1255 |
+ self._task_queues.merge.add(setup_phase) |
1256 |
+ self._schedule() |
1257 |
+ |
1258 |
+ def _schedule_unpack(self, unpack_phase): |
1259 |
+ """ |
1260 |
+ Schedule an unpack phase on the unpack queue, in order |
1261 |
+ to serialize $DISTDIR access for live ebuilds. |
1262 |
+ """ |
1263 |
+ self._task_queues.unpack.add(unpack_phase) |
1264 |
+ |
1265 |
+ def _find_blockers(self, new_pkg): |
1266 |
+ """ |
1267 |
+ Returns a callable. |
1268 |
+ """ |
1269 |
+ def get_blockers(): |
1270 |
+ return self._find_blockers_impl(new_pkg) |
1271 |
+ return get_blockers |
1272 |
+ |
1273 |
+ def _find_blockers_impl(self, new_pkg): |
1274 |
+ if self._opts_ignore_blockers.intersection(self.myopts): |
1275 |
+ return None |
1276 |
+ |
1277 |
+ blocker_db = self._blocker_db[new_pkg.root] |
1278 |
+ |
1279 |
+ blocker_dblinks = [] |
1280 |
+ for blocking_pkg in blocker_db.findInstalledBlockers(new_pkg): |
1281 |
+ if new_pkg.slot_atom == blocking_pkg.slot_atom: |
1282 |
+ continue |
1283 |
+ if new_pkg.cpv == blocking_pkg.cpv: |
1284 |
+ continue |
1285 |
+ blocker_dblinks.append(portage.dblink( |
1286 |
+ blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root, |
1287 |
+ self.pkgsettings[blocking_pkg.root], treetype="vartree", |
1288 |
+ vartree=self.trees[blocking_pkg.root]["vartree"])) |
1289 |
+ |
1290 |
+ return blocker_dblinks |
1291 |
+ |
1292 |
+ def _generate_digests(self): |
1293 |
+ """ |
1294 |
+ Generate digests if necessary for --digests or FEATURES=digest. |
1295 |
+ In order to avoid interference, this must done before parallel |
1296 |
+ tasks are started. |
1297 |
+ """ |
1298 |
+ |
1299 |
+ if '--fetchonly' in self.myopts: |
1300 |
+ return os.EX_OK |
1301 |
+ |
1302 |
+ digest = '--digest' in self.myopts |
1303 |
+ if not digest: |
1304 |
+ for pkgsettings in self.pkgsettings.values(): |
1305 |
+ if pkgsettings.mycpv is not None: |
1306 |
+ # ensure that we are using global features |
1307 |
+ # settings rather than those from package.env |
1308 |
+ pkgsettings.reset() |
1309 |
+ if 'digest' in pkgsettings.features: |
1310 |
+ digest = True |
1311 |
+ break |
1312 |
+ |
1313 |
+ if not digest: |
1314 |
+ return os.EX_OK |
1315 |
+ |
1316 |
+ for x in self._mergelist: |
1317 |
+ if not isinstance(x, Package) or \ |
1318 |
+ x.type_name != 'ebuild' or \ |
1319 |
+ x.operation != 'merge': |
1320 |
+ continue |
1321 |
+ pkgsettings = self.pkgsettings[x.root] |
1322 |
+ if pkgsettings.mycpv is not None: |
1323 |
+ # ensure that we are using global features |
1324 |
+ # settings rather than those from package.env |
1325 |
+ pkgsettings.reset() |
1326 |
+ if '--digest' not in self.myopts and \ |
1327 |
+ 'digest' not in pkgsettings.features: |
1328 |
+ continue |
1329 |
+ portdb = x.root_config.trees['porttree'].dbapi |
1330 |
+ ebuild_path = portdb.findname(x.cpv, myrepo=x.repo) |
1331 |
+ if ebuild_path is None: |
1332 |
+ raise AssertionError("ebuild not found for '%s'" % x.cpv) |
1333 |
+ pkgsettings['O'] = os.path.dirname(ebuild_path) |
1334 |
+ if not digestgen(mysettings=pkgsettings, myportdb=portdb): |
1335 |
+ writemsg_level( |
1336 |
+ "!!! Unable to generate manifest for '%s'.\n" \ |
1337 |
+ % x.cpv, level=logging.ERROR, noiselevel=-1) |
1338 |
+ return 1 |
1339 |
+ |
1340 |
+ return os.EX_OK |
1341 |
+ |
1342 |
+ def _env_sanity_check(self): |
1343 |
+ """ |
1344 |
+ Verify a sane environment before trying to build anything from source. |
1345 |
+ """ |
1346 |
+ have_src_pkg = False |
1347 |
+ for x in self._mergelist: |
1348 |
+ if isinstance(x, Package) and not x.built: |
1349 |
+ have_src_pkg = True |
1350 |
+ break |
1351 |
+ |
1352 |
+ if not have_src_pkg: |
1353 |
+ return os.EX_OK |
1354 |
+ |
1355 |
+ for settings in self.pkgsettings.values(): |
1356 |
+ for var in ("ARCH", ): |
1357 |
+ value = settings.get(var) |
1358 |
+ if value and value.strip(): |
1359 |
+ continue |
1360 |
+ msg = _("%(var)s is not set... " |
1361 |
+ "Are you missing the '%(configroot)setc/make.profile' symlink? " |
1362 |
+ "Is the symlink correct? " |
1363 |
+ "Is your portage tree complete?") % \ |
1364 |
+ {"var": var, "configroot": settings["PORTAGE_CONFIGROOT"]} |
1365 |
+ |
1366 |
+ out = portage.output.EOutput() |
1367 |
+ for line in textwrap.wrap(msg, 70): |
1368 |
+ out.eerror(line) |
1369 |
+ return 1 |
1370 |
+ |
1371 |
+ return os.EX_OK |
1372 |
+ |
1373 |
+ def _check_manifests(self): |
1374 |
+ # Verify all the manifests now so that the user is notified of failure |
1375 |
+ # as soon as possible. |
1376 |
+ if "strict" not in self.settings.features or \ |
1377 |
+ "--fetchonly" in self.myopts or \ |
1378 |
+ "--fetch-all-uri" in self.myopts: |
1379 |
+ return os.EX_OK |
1380 |
+ |
1381 |
+ shown_verifying_msg = False |
1382 |
+ quiet_settings = {} |
1383 |
+ for myroot, pkgsettings in self.pkgsettings.items(): |
1384 |
+ quiet_config = portage.config(clone=pkgsettings) |
1385 |
+ quiet_config["PORTAGE_QUIET"] = "1" |
1386 |
+ quiet_config.backup_changes("PORTAGE_QUIET") |
1387 |
+ quiet_settings[myroot] = quiet_config |
1388 |
+ del quiet_config |
1389 |
+ |
1390 |
+ failures = 0 |
1391 |
+ |
1392 |
+ for x in self._mergelist: |
1393 |
+ if not isinstance(x, Package) or \ |
1394 |
+ x.type_name != "ebuild": |
1395 |
+ continue |
1396 |
+ |
1397 |
+ if x.operation == "uninstall": |
1398 |
+ continue |
1399 |
+ |
1400 |
+ if not shown_verifying_msg: |
1401 |
+ shown_verifying_msg = True |
1402 |
+ self._status_msg("Verifying ebuild manifests") |
1403 |
+ |
1404 |
+ root_config = x.root_config |
1405 |
+ portdb = root_config.trees["porttree"].dbapi |
1406 |
+ quiet_config = quiet_settings[root_config.root] |
1407 |
+ ebuild_path = portdb.findname(x.cpv, myrepo=x.repo) |
1408 |
+ if ebuild_path is None: |
1409 |
+ raise AssertionError("ebuild not found for '%s'" % x.cpv) |
1410 |
+ quiet_config["O"] = os.path.dirname(ebuild_path) |
1411 |
+ if not digestcheck([], quiet_config, strict=True): |
1412 |
+ failures |= 1 |
1413 |
+ |
1414 |
+ if failures: |
1415 |
+ return 1 |
1416 |
+ return os.EX_OK |
1417 |
+ |
1418 |
+ def _add_prefetchers(self): |
1419 |
+ |
1420 |
+ if not self._parallel_fetch: |
1421 |
+ return |
1422 |
+ |
1423 |
+ if self._parallel_fetch: |
1424 |
+ self._status_msg("Starting parallel fetch") |
1425 |
+ |
1426 |
+ prefetchers = self._prefetchers |
1427 |
+ getbinpkg = "--getbinpkg" in self.myopts |
1428 |
+ |
1429 |
+ for pkg in self._mergelist: |
1430 |
+ # mergelist can contain solved Blocker instances |
1431 |
+ if not isinstance(pkg, Package) or pkg.operation == "uninstall": |
1432 |
+ continue |
1433 |
+ prefetcher = self._create_prefetcher(pkg) |
1434 |
+ if prefetcher is not None: |
1435 |
+ self._task_queues.fetch.add(prefetcher) |
1436 |
+ prefetchers[pkg] = prefetcher |
1437 |
+ |
1438 |
+ # Start the first prefetcher immediately so that self._task() |
1439 |
+ # won't discard it. This avoids a case where the first |
1440 |
+ # prefetcher is discarded, causing the second prefetcher to |
1441 |
+ # occupy the fetch queue before the first fetcher has an |
1442 |
+ # opportunity to execute. |
1443 |
+ self._task_queues.fetch.schedule() |
1444 |
+ |
1445 |
+ def _create_prefetcher(self, pkg): |
1446 |
+ """ |
1447 |
+ @return: a prefetcher, or None if not applicable |
1448 |
+ """ |
1449 |
+ prefetcher = None |
1450 |
+ |
1451 |
+ if not isinstance(pkg, Package): |
1452 |
+ pass |
1453 |
+ |
1454 |
+ elif pkg.type_name == "ebuild": |
1455 |
+ |
1456 |
+ prefetcher = EbuildFetcher(background=True, |
1457 |
+ config_pool=self._ConfigPool(pkg.root, |
1458 |
+ self._allocate_config, self._deallocate_config), |
1459 |
+ fetchonly=1, logfile=self._fetch_log, |
1460 |
+ pkg=pkg, prefetch=True, scheduler=self._sched_iface) |
1461 |
+ |
1462 |
+ elif pkg.type_name == "binary" and \ |
1463 |
+ "--getbinpkg" in self.myopts and \ |
1464 |
+ pkg.root_config.trees["bintree"].isremote(pkg.cpv): |
1465 |
+ |
1466 |
+ prefetcher = BinpkgPrefetcher(background=True, |
1467 |
+ pkg=pkg, scheduler=self._sched_iface) |
1468 |
+ |
1469 |
+ return prefetcher |
1470 |
+ |
1471 |
+ def _is_restart_scheduled(self): |
1472 |
+ """ |
1473 |
+ Check if the merge list contains a replacement |
1474 |
+ for the current running instance, that will result |
1475 |
+ in restart after merge. |
1476 |
+ @rtype: bool |
1477 |
+ @returns: True if a restart is scheduled, False otherwise. |
1478 |
+ """ |
1479 |
+ if self._opts_no_restart.intersection(self.myopts): |
1480 |
+ return False |
1481 |
+ |
1482 |
+ mergelist = self._mergelist |
1483 |
+ |
1484 |
+ for i, pkg in enumerate(mergelist): |
1485 |
+ if self._is_restart_necessary(pkg) and \ |
1486 |
+ i != len(mergelist) - 1: |
1487 |
+ return True |
1488 |
+ |
1489 |
+ return False |
1490 |
+ |
1491 |
+ def _is_restart_necessary(self, pkg): |
1492 |
+ """ |
1493 |
+ @return: True if merging the given package |
1494 |
+ requires restart, False otherwise. |
1495 |
+ """ |
1496 |
+ |
1497 |
+ # Figure out if we need a restart. |
1498 |
+ if pkg.root == self._running_root.root and \ |
1499 |
+ portage.match_from_list( |
1500 |
+ portage.const.PORTAGE_PACKAGE_ATOM, [pkg]): |
1501 |
+ if self._running_portage is None: |
1502 |
+ return True |
1503 |
+ elif pkg.cpv != self._running_portage.cpv or \ |
1504 |
+ '9999' in pkg.cpv or \ |
1505 |
+ 'git' in pkg.inherited or \ |
1506 |
+ 'git-2' in pkg.inherited: |
1507 |
+ return True |
1508 |
+ return False |
1509 |
+ |
1510 |
+ def _restart_if_necessary(self, pkg): |
1511 |
+ """ |
1512 |
+ Use execv() to restart emerge. This happens |
1513 |
+ if portage upgrades itself and there are |
1514 |
+ remaining packages in the list. |
1515 |
+ """ |
1516 |
+ |
1517 |
+ if self._opts_no_restart.intersection(self.myopts): |
1518 |
+ return |
1519 |
+ |
1520 |
+ if not self._is_restart_necessary(pkg): |
1521 |
+ return |
1522 |
+ |
1523 |
+ if pkg == self._mergelist[-1]: |
1524 |
+ return |
1525 |
+ |
1526 |
+ self._main_loop_cleanup() |
1527 |
+ |
1528 |
+ logger = self._logger |
1529 |
+ pkg_count = self._pkg_count |
1530 |
+ mtimedb = self._mtimedb |
1531 |
+ bad_resume_opts = self._bad_resume_opts |
1532 |
+ |
1533 |
+ logger.log(" ::: completed emerge (%s of %s) %s to %s" % \ |
1534 |
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root)) |
1535 |
+ |
1536 |
+ logger.log(" *** RESTARTING " + \ |
1537 |
+ "emerge via exec() after change of " + \ |
1538 |
+ "portage version.") |
1539 |
+ |
1540 |
+ mtimedb["resume"]["mergelist"].remove(list(pkg)) |
1541 |
+ mtimedb.commit() |
1542 |
+ portage.run_exitfuncs() |
1543 |
+ # Don't trust sys.argv[0] here because eselect-python may modify it. |
1544 |
+ emerge_binary = os.path.join(portage.const.PORTAGE_BIN_PATH, 'emerge') |
1545 |
+ mynewargv = [emerge_binary, "--resume"] |
1546 |
+ resume_opts = self.myopts.copy() |
1547 |
+ # For automatic resume, we need to prevent |
1548 |
+ # any of bad_resume_opts from leaking in |
1549 |
+ # via EMERGE_DEFAULT_OPTS. |
1550 |
+ resume_opts["--ignore-default-opts"] = True |
1551 |
+ for myopt, myarg in resume_opts.items(): |
1552 |
+ if myopt not in bad_resume_opts: |
1553 |
+ if myarg is True: |
1554 |
+ mynewargv.append(myopt) |
1555 |
+ elif isinstance(myarg, list): |
1556 |
+ # arguments like --exclude that use 'append' action |
1557 |
+ for x in myarg: |
1558 |
+ mynewargv.append("%s=%s" % (myopt, x)) |
1559 |
+ else: |
1560 |
+ mynewargv.append("%s=%s" % (myopt, myarg)) |
1561 |
+ # priority only needs to be adjusted on the first run |
1562 |
+ os.environ["PORTAGE_NICENESS"] = "0" |
1563 |
+ os.execv(mynewargv[0], mynewargv) |
1564 |
+ |
1565 |
+ def _run_pkg_pretend(self): |
1566 |
+ """ |
1567 |
+ Since pkg_pretend output may be important, this method sends all |
1568 |
+ output directly to stdout (regardless of options like --quiet or |
1569 |
+ --jobs). |
1570 |
+ """ |
1571 |
+ |
1572 |
+ failures = 0 |
1573 |
+ |
1574 |
+ # Use a local PollScheduler instance here, since we don't |
1575 |
+ # want tasks here to trigger the usual Scheduler callbacks |
1576 |
+ # that handle job scheduling and status display. |
1577 |
+ sched_iface = PollScheduler().sched_iface |
1578 |
+ |
1579 |
+ for x in self._mergelist: |
1580 |
+ if not isinstance(x, Package): |
1581 |
+ continue |
1582 |
+ |
1583 |
+ if x.operation == "uninstall": |
1584 |
+ continue |
1585 |
+ |
1586 |
+ if x.metadata["EAPI"] in ("0", "1", "2", "3"): |
1587 |
+ continue |
1588 |
+ |
1589 |
+ if "pretend" not in x.metadata.defined_phases: |
1590 |
+ continue |
1591 |
+ |
1592 |
+ out_str =">>> Running pre-merge checks for " + colorize("INFORM", x.cpv) + "\n" |
1593 |
+ portage.util.writemsg_stdout(out_str, noiselevel=-1) |
1594 |
+ |
1595 |
+ root_config = x.root_config |
1596 |
+ settings = self.pkgsettings[root_config.root] |
1597 |
+ settings.setcpv(x) |
1598 |
+ tmpdir = tempfile.mkdtemp() |
1599 |
+ tmpdir_orig = settings["PORTAGE_TMPDIR"] |
1600 |
+ settings["PORTAGE_TMPDIR"] = tmpdir |
1601 |
+ |
1602 |
+ try: |
1603 |
+ if x.built: |
1604 |
+ tree = "bintree" |
1605 |
+ bintree = root_config.trees["bintree"].dbapi.bintree |
1606 |
+ fetched = False |
1607 |
+ |
1608 |
+ # Display fetch on stdout, so that it's always clear what |
1609 |
+ # is consuming time here. |
1610 |
+ if bintree.isremote(x.cpv): |
1611 |
+ fetcher = BinpkgFetcher(pkg=x, |
1612 |
+ scheduler=sched_iface) |
1613 |
+ fetcher.start() |
1614 |
+ if fetcher.wait() != os.EX_OK: |
1615 |
+ failures += 1 |
1616 |
+ continue |
1617 |
+ fetched = fetcher.pkg_path |
1618 |
+ |
1619 |
+ verifier = BinpkgVerifier(pkg=x, |
1620 |
+ scheduler=sched_iface) |
1621 |
+ verifier.start() |
1622 |
+ if verifier.wait() != os.EX_OK: |
1623 |
+ failures += 1 |
1624 |
+ continue |
1625 |
+ |
1626 |
+ if fetched: |
1627 |
+ bintree.inject(x.cpv, filename=fetched) |
1628 |
+ tbz2_file = bintree.getname(x.cpv) |
1629 |
+ infloc = os.path.join(tmpdir, x.category, x.pf, "build-info") |
1630 |
+ os.makedirs(infloc) |
1631 |
+ portage.xpak.tbz2(tbz2_file).unpackinfo(infloc) |
1632 |
+ ebuild_path = os.path.join(infloc, x.pf + ".ebuild") |
1633 |
+ settings.configdict["pkg"]["EMERGE_FROM"] = "binary" |
1634 |
+ settings.configdict["pkg"]["MERGE_TYPE"] = "binary" |
1635 |
+ |
1636 |
+ else: |
1637 |
+ tree = "porttree" |
1638 |
+ portdb = root_config.trees["porttree"].dbapi |
1639 |
+ ebuild_path = portdb.findname(x.cpv, myrepo=x.repo) |
1640 |
+ if ebuild_path is None: |
1641 |
+ raise AssertionError("ebuild not found for '%s'" % x.cpv) |
1642 |
+ settings.configdict["pkg"]["EMERGE_FROM"] = "ebuild" |
1643 |
+ if self._build_opts.buildpkgonly: |
1644 |
+ settings.configdict["pkg"]["MERGE_TYPE"] = "buildonly" |
1645 |
+ else: |
1646 |
+ settings.configdict["pkg"]["MERGE_TYPE"] = "source" |
1647 |
+ |
1648 |
+ portage.package.ebuild.doebuild.doebuild_environment(ebuild_path, |
1649 |
+ "pretend", settings=settings, |
1650 |
+ db=self.trees[settings["ROOT"]][tree].dbapi) |
1651 |
+ prepare_build_dirs(root_config.root, settings, cleanup=0) |
1652 |
+ |
1653 |
+ vardb = root_config.trees['vartree'].dbapi |
1654 |
+ settings["REPLACING_VERSIONS"] = " ".join( |
1655 |
+ set(portage.versions.cpv_getversion(match) \ |
1656 |
+ for match in vardb.match(x.slot_atom) + \ |
1657 |
+ vardb.match('='+x.cpv))) |
1658 |
+ pretend_phase = EbuildPhase( |
1659 |
+ phase="pretend", scheduler=sched_iface, |
1660 |
+ settings=settings) |
1661 |
+ |
1662 |
+ pretend_phase.start() |
1663 |
+ ret = pretend_phase.wait() |
1664 |
+ if ret != os.EX_OK: |
1665 |
+ failures += 1 |
1666 |
+ portage.elog.elog_process(x.cpv, settings) |
1667 |
+ finally: |
1668 |
+ shutil.rmtree(tmpdir) |
1669 |
+ settings["PORTAGE_TMPDIR"] = tmpdir_orig |
1670 |
+ |
1671 |
+ if failures: |
1672 |
+ return 1 |
1673 |
+ return os.EX_OK |
1674 |
+ |
1675 |
+ def merge(self): |
1676 |
+ if "--resume" in self.myopts: |
1677 |
+ # We're resuming. |
1678 |
+ portage.writemsg_stdout( |
1679 |
+ colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1) |
1680 |
+ self._logger.log(" *** Resuming merge...") |
1681 |
+ |
1682 |
+ self._save_resume_list() |
1683 |
+ |
1684 |
+ try: |
1685 |
+ self._background = self._background_mode() |
1686 |
+ except self._unknown_internal_error: |
1687 |
+ return 1 |
1688 |
+ |
1689 |
+ for root in self.trees: |
1690 |
+ root_config = self.trees[root]["root_config"] |
1691 |
+ |
1692 |
+ # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required |
1693 |
+ # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR |
1694 |
+ # for ensuring sane $PWD (bug #239560) and storing elog messages. |
1695 |
+ tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "") |
1696 |
+ if not tmpdir or not os.path.isdir(tmpdir): |
1697 |
+ msg = "The directory specified in your " + \ |
1698 |
+ "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \ |
1699 |
+ "does not exist. Please create this " + \ |
1700 |
+ "directory or correct your PORTAGE_TMPDIR setting." |
1701 |
+ msg = textwrap.wrap(msg, 70) |
1702 |
+ out = portage.output.EOutput() |
1703 |
+ for l in msg: |
1704 |
+ out.eerror(l) |
1705 |
+ return 1 |
1706 |
+ |
1707 |
+ if self._background: |
1708 |
+ root_config.settings.unlock() |
1709 |
+ root_config.settings["PORTAGE_BACKGROUND"] = "1" |
1710 |
+ root_config.settings.backup_changes("PORTAGE_BACKGROUND") |
1711 |
+ root_config.settings.lock() |
1712 |
+ |
1713 |
+ self.pkgsettings[root] = portage.config( |
1714 |
+ clone=root_config.settings) |
1715 |
+ |
1716 |
+ keep_going = "--keep-going" in self.myopts |
1717 |
+ fetchonly = self._build_opts.fetchonly |
1718 |
+ mtimedb = self._mtimedb |
1719 |
+ failed_pkgs = self._failed_pkgs |
1720 |
+ |
1721 |
+ rval = self._generate_digests() |
1722 |
+ if rval != os.EX_OK: |
1723 |
+ return rval |
1724 |
+ |
1725 |
+ rval = self._env_sanity_check() |
1726 |
+ if rval != os.EX_OK: |
1727 |
+ return rval |
1728 |
+ |
1729 |
+ # TODO: Immediately recalculate deps here if --keep-going |
1730 |
+ # is enabled and corrupt manifests are detected. |
1731 |
+ rval = self._check_manifests() |
1732 |
+ if rval != os.EX_OK and not keep_going: |
1733 |
+ return rval |
1734 |
+ |
1735 |
+ if not fetchonly: |
1736 |
+ rval = self._run_pkg_pretend() |
1737 |
+ if rval != os.EX_OK: |
1738 |
+ return rval |
1739 |
+ |
1740 |
+ while True: |
1741 |
+ |
1742 |
+ received_signal = [] |
1743 |
+ |
1744 |
+ def sighandler(signum, frame): |
1745 |
+ signal.signal(signal.SIGINT, signal.SIG_IGN) |
1746 |
+ signal.signal(signal.SIGTERM, signal.SIG_IGN) |
1747 |
+ portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % \ |
1748 |
+ {"signal":signum}) |
1749 |
+ self.terminate() |
1750 |
+ received_signal.append(128 + signum) |
1751 |
+ |
1752 |
+ earlier_sigint_handler = signal.signal(signal.SIGINT, sighandler) |
1753 |
+ earlier_sigterm_handler = signal.signal(signal.SIGTERM, sighandler) |
1754 |
+ |
1755 |
+ try: |
1756 |
+ rval = self._merge() |
1757 |
+ finally: |
1758 |
+ # Restore previous handlers |
1759 |
+ if earlier_sigint_handler is not None: |
1760 |
+ signal.signal(signal.SIGINT, earlier_sigint_handler) |
1761 |
+ else: |
1762 |
+ signal.signal(signal.SIGINT, signal.SIG_DFL) |
1763 |
+ if earlier_sigterm_handler is not None: |
1764 |
+ signal.signal(signal.SIGTERM, earlier_sigterm_handler) |
1765 |
+ else: |
1766 |
+ signal.signal(signal.SIGTERM, signal.SIG_DFL) |
1767 |
+ |
1768 |
+ if received_signal: |
1769 |
+ sys.exit(received_signal[0]) |
1770 |
+ |
1771 |
+ if rval == os.EX_OK or fetchonly or not keep_going: |
1772 |
+ break |
1773 |
+ if "resume" not in mtimedb: |
1774 |
+ break |
1775 |
+ mergelist = self._mtimedb["resume"].get("mergelist") |
1776 |
+ if not mergelist: |
1777 |
+ break |
1778 |
+ |
1779 |
+ if not failed_pkgs: |
1780 |
+ break |
1781 |
+ |
1782 |
+ for failed_pkg in failed_pkgs: |
1783 |
+ mergelist.remove(list(failed_pkg.pkg)) |
1784 |
+ |
1785 |
+ self._failed_pkgs_all.extend(failed_pkgs) |
1786 |
+ del failed_pkgs[:] |
1787 |
+ |
1788 |
+ if not mergelist: |
1789 |
+ break |
1790 |
+ |
1791 |
+ if not self._calc_resume_list(): |
1792 |
+ break |
1793 |
+ |
1794 |
+ clear_caches(self.trees) |
1795 |
+ if not self._mergelist: |
1796 |
+ break |
1797 |
+ |
1798 |
+ self._save_resume_list() |
1799 |
+ self._pkg_count.curval = 0 |
1800 |
+ self._pkg_count.maxval = len([x for x in self._mergelist \ |
1801 |
+ if isinstance(x, Package) and x.operation == "merge"]) |
1802 |
+ self._status_display.maxval = self._pkg_count.maxval |
1803 |
+ |
1804 |
+ self._logger.log(" *** Finished. Cleaning up...") |
1805 |
+ |
1806 |
+ if failed_pkgs: |
1807 |
+ self._failed_pkgs_all.extend(failed_pkgs) |
1808 |
+ del failed_pkgs[:] |
1809 |
+ |
1810 |
+ printer = portage.output.EOutput() |
1811 |
+ background = self._background |
1812 |
+ failure_log_shown = False |
1813 |
+ if background and len(self._failed_pkgs_all) == 1: |
1814 |
+ # If only one package failed then just show it's |
1815 |
+ # whole log for easy viewing. |
1816 |
+ failed_pkg = self._failed_pkgs_all[-1] |
1817 |
+ build_dir = failed_pkg.build_dir |
1818 |
+ log_file = None |
1819 |
+ log_file_real = None |
1820 |
+ |
1821 |
+ log_paths = [failed_pkg.build_log] |
1822 |
+ |
1823 |
+ log_path = self._locate_failure_log(failed_pkg) |
1824 |
+ if log_path is not None: |
1825 |
+ try: |
1826 |
+ log_file = open(_unicode_encode(log_path, |
1827 |
+ encoding=_encodings['fs'], errors='strict'), mode='rb') |
1828 |
+ except IOError: |
1829 |
+ pass |
1830 |
+ else: |
1831 |
+ if log_path.endswith('.gz'): |
1832 |
+ log_file_real = log_file |
1833 |
+ log_file = gzip.GzipFile(filename='', |
1834 |
+ mode='rb', fileobj=log_file) |
1835 |
+ |
1836 |
+ if log_file is not None: |
1837 |
+ try: |
1838 |
+ for line in log_file: |
1839 |
+ writemsg_level(line, noiselevel=-1) |
1840 |
+ except zlib.error as e: |
1841 |
+ writemsg_level("%s\n" % (e,), level=logging.ERROR, |
1842 |
+ noiselevel=-1) |
1843 |
+ finally: |
1844 |
+ log_file.close() |
1845 |
+ if log_file_real is not None: |
1846 |
+ log_file_real.close() |
1847 |
+ failure_log_shown = True |
1848 |
+ |
1849 |
+ # Dump mod_echo output now since it tends to flood the terminal. |
1850 |
+ # This allows us to avoid having more important output, generated |
1851 |
+ # later, from being swept away by the mod_echo output. |
1852 |
+ mod_echo_output = _flush_elog_mod_echo() |
1853 |
+ |
1854 |
+ if background and not failure_log_shown and \ |
1855 |
+ self._failed_pkgs_all and \ |
1856 |
+ self._failed_pkgs_die_msgs and \ |
1857 |
+ not mod_echo_output: |
1858 |
+ |
1859 |
+ for mysettings, key, logentries in self._failed_pkgs_die_msgs: |
1860 |
+ root_msg = "" |
1861 |
+ if mysettings["ROOT"] != "/": |
1862 |
+ root_msg = " merged to %s" % mysettings["ROOT"] |
1863 |
+ print() |
1864 |
+ printer.einfo("Error messages for package %s%s:" % \ |
1865 |
+ (colorize("INFORM", key), root_msg)) |
1866 |
+ print() |
1867 |
+ for phase in portage.const.EBUILD_PHASES: |
1868 |
+ if phase not in logentries: |
1869 |
+ continue |
1870 |
+ for msgtype, msgcontent in logentries[phase]: |
1871 |
+ if isinstance(msgcontent, basestring): |
1872 |
+ msgcontent = [msgcontent] |
1873 |
+ for line in msgcontent: |
1874 |
+ printer.eerror(line.strip("\n")) |
1875 |
+ |
1876 |
+ if self._post_mod_echo_msgs: |
1877 |
+ for msg in self._post_mod_echo_msgs: |
1878 |
+ msg() |
1879 |
+ |
1880 |
+ if len(self._failed_pkgs_all) > 1 or \ |
1881 |
+ (self._failed_pkgs_all and keep_going): |
1882 |
+ if len(self._failed_pkgs_all) > 1: |
1883 |
+ msg = "The following %d packages have " % \ |
1884 |
+ len(self._failed_pkgs_all) + \ |
1885 |
+ "failed to build or install:" |
1886 |
+ else: |
1887 |
+ msg = "The following package has " + \ |
1888 |
+ "failed to build or install:" |
1889 |
+ |
1890 |
+ printer.eerror("") |
1891 |
+ for line in textwrap.wrap(msg, 72): |
1892 |
+ printer.eerror(line) |
1893 |
+ printer.eerror("") |
1894 |
+ for failed_pkg in self._failed_pkgs_all: |
1895 |
+ # Use _unicode_decode() to force unicode format string so |
1896 |
+ # that Package.__unicode__() is called in python2. |
1897 |
+ msg = _unicode_decode(" %s") % (failed_pkg.pkg,) |
1898 |
+ log_path = self._locate_failure_log(failed_pkg) |
1899 |
+ if log_path is not None: |
1900 |
+ msg += ", Log file:" |
1901 |
+ printer.eerror(msg) |
1902 |
+ if log_path is not None: |
1903 |
+ printer.eerror(" '%s'" % colorize('INFORM', log_path)) |
1904 |
+ printer.eerror("") |
1905 |
+ |
1906 |
+ if self._failed_pkgs_all: |
1907 |
+ return 1 |
1908 |
+ return os.EX_OK |
1909 |
+ |
1910 |
+ def _elog_listener(self, mysettings, key, logentries, fulltext): |
1911 |
+ errors = portage.elog.filter_loglevels(logentries, ["ERROR"]) |
1912 |
+ if errors: |
1913 |
+ self._failed_pkgs_die_msgs.append( |
1914 |
+ (mysettings, key, errors)) |
1915 |
+ |
1916 |
+ def _locate_failure_log(self, failed_pkg): |
1917 |
+ |
1918 |
+ build_dir = failed_pkg.build_dir |
1919 |
+ log_file = None |
1920 |
+ |
1921 |
+ log_paths = [failed_pkg.build_log] |
1922 |
+ |
1923 |
+ for log_path in log_paths: |
1924 |
+ if not log_path: |
1925 |
+ continue |
1926 |
+ |
1927 |
+ try: |
1928 |
+ log_size = os.stat(log_path).st_size |
1929 |
+ except OSError: |
1930 |
+ continue |
1931 |
+ |
1932 |
+ if log_size == 0: |
1933 |
+ continue |
1934 |
+ |
1935 |
+ return log_path |
1936 |
+ |
1937 |
+ return None |
1938 |
+ |
1939 |
+ def _add_packages(self): |
1940 |
+ pkg_queue = self._pkg_queue |
1941 |
+ for pkg in self._mergelist: |
1942 |
+ if isinstance(pkg, Package): |
1943 |
+ pkg_queue.append(pkg) |
1944 |
+ elif isinstance(pkg, Blocker): |
1945 |
+ pass |
1946 |
+ |
1947 |
+ def _system_merge_started(self, merge): |
1948 |
+ """ |
1949 |
+ Add any unsatisfied runtime deps to self._unsatisfied_system_deps. |
1950 |
+ In general, this keeps track of installed system packages with |
1951 |
+ unsatisfied RDEPEND or PDEPEND (circular dependencies). It can be |
1952 |
+ a fragile situation, so we don't execute any unrelated builds until |
1953 |
+ the circular dependencies are built and installed. |
1954 |
+ """ |
1955 |
+ graph = self._digraph |
1956 |
+ if graph is None: |
1957 |
+ return |
1958 |
+ pkg = merge.merge.pkg |
1959 |
+ |
1960 |
+ # Skip this if $ROOT != / since it shouldn't matter if there |
1961 |
+ # are unsatisfied system runtime deps in this case. |
1962 |
+ if pkg.root != '/': |
1963 |
+ return |
1964 |
+ |
1965 |
+ completed_tasks = self._completed_tasks |
1966 |
+ unsatisfied = self._unsatisfied_system_deps |
1967 |
+ |
1968 |
+ def ignore_non_runtime_or_satisfied(priority): |
1969 |
+ """ |
1970 |
+ Ignore non-runtime and satisfied runtime priorities. |
1971 |
+ """ |
1972 |
+ if isinstance(priority, DepPriority) and \ |
1973 |
+ not priority.satisfied and \ |
1974 |
+ (priority.runtime or priority.runtime_post): |
1975 |
+ return False |
1976 |
+ return True |
1977 |
+ |
1978 |
+ # When checking for unsatisfied runtime deps, only check |
1979 |
+ # direct deps since indirect deps are checked when the |
1980 |
+ # corresponding parent is merged. |
1981 |
+ for child in graph.child_nodes(pkg, |
1982 |
+ ignore_priority=ignore_non_runtime_or_satisfied): |
1983 |
+ if not isinstance(child, Package) or \ |
1984 |
+ child.operation == 'uninstall': |
1985 |
+ continue |
1986 |
+ if child is pkg: |
1987 |
+ continue |
1988 |
+ if child.operation == 'merge' and \ |
1989 |
+ child not in completed_tasks: |
1990 |
+ unsatisfied.add(child) |
1991 |
+ |
1992 |
+ def _merge_wait_exit_handler(self, task): |
1993 |
+ self._merge_wait_scheduled.remove(task) |
1994 |
+ self._merge_exit(task) |
1995 |
+ |
1996 |
+ def _merge_exit(self, merge): |
1997 |
+ self._running_tasks.pop(id(merge), None) |
1998 |
+ self._do_merge_exit(merge) |
1999 |
+ self._deallocate_config(merge.merge.settings) |
2000 |
+ if merge.returncode == os.EX_OK and \ |
2001 |
+ not merge.merge.pkg.installed: |
2002 |
+ self._status_display.curval += 1 |
2003 |
+ self._status_display.merges = len(self._task_queues.merge) |
2004 |
+ self._schedule() |
2005 |
+ |
2006 |
+ def _do_merge_exit(self, merge): |
2007 |
+ pkg = merge.merge.pkg |
2008 |
+ settings = merge.merge.settings |
2009 |
+ trees = self.trees |
2010 |
+ init_buildlog = gobs_buildlog() |
2011 |
+ if merge.returncode != os.EX_OK: |
2012 |
+ build_dir = settings.get("PORTAGE_BUILDDIR") |
2013 |
+ build_log = settings.get("PORTAGE_LOG_FILE") |
2014 |
+ |
2015 |
+ self._failed_pkgs.append(self._failed_pkg( |
2016 |
+ build_dir=build_dir, build_log=build_log, |
2017 |
+ pkg=pkg, |
2018 |
+ returncode=merge.returncode)) |
2019 |
+ if not self._terminated_tasks: |
2020 |
+ self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to") |
2021 |
+ self._status_display.failed = len(self._failed_pkgs) |
2022 |
+ init_buildlog.add_buildlog_main(settings, pkg, trees) |
2023 |
+ return |
2024 |
+ |
2025 |
+ self._task_complete(pkg) |
2026 |
+ pkg_to_replace = merge.merge.pkg_to_replace |
2027 |
+ if pkg_to_replace is not None: |
2028 |
+ # When a package is replaced, mark it's uninstall |
2029 |
+ # task complete (if any). |
2030 |
+ if self._digraph is not None and \ |
2031 |
+ pkg_to_replace in self._digraph: |
2032 |
+ try: |
2033 |
+ self._pkg_queue.remove(pkg_to_replace) |
2034 |
+ except ValueError: |
2035 |
+ pass |
2036 |
+ self._task_complete(pkg_to_replace) |
2037 |
+ else: |
2038 |
+ self._pkg_cache.pop(pkg_to_replace, None) |
2039 |
+ |
2040 |
+ if pkg.installed: |
2041 |
+ init_buildlog.add_buildlog_main(settings, pkg, trees) |
2042 |
+ return |
2043 |
+ |
2044 |
+ self._restart_if_necessary(pkg) |
2045 |
+ |
2046 |
+ # Call mtimedb.commit() after each merge so that |
2047 |
+ # --resume still works after being interrupted |
2048 |
+ # by reboot, sigkill or similar. |
2049 |
+ mtimedb = self._mtimedb |
2050 |
+ mtimedb["resume"]["mergelist"].remove(list(pkg)) |
2051 |
+ if not mtimedb["resume"]["mergelist"]: |
2052 |
+ del mtimedb["resume"] |
2053 |
+ mtimedb.commit() |
2054 |
+ init_buildlog.add_buildlog_main(settings, pkg, trees) |
2055 |
+ |
2056 |
+ def _build_exit(self, build): |
2057 |
+ self._running_tasks.pop(id(build), None) |
2058 |
+ if build.returncode == os.EX_OK and self._terminated_tasks: |
2059 |
+ # We've been interrupted, so we won't |
2060 |
+ # add this to the merge queue. |
2061 |
+ self.curval += 1 |
2062 |
+ self._deallocate_config(build.settings) |
2063 |
+ elif build.returncode == os.EX_OK: |
2064 |
+ self.curval += 1 |
2065 |
+ merge = PackageMerge(merge=build) |
2066 |
+ self._running_tasks[id(merge)] = merge |
2067 |
+ if not build.build_opts.buildpkgonly and \ |
2068 |
+ build.pkg in self._deep_system_deps: |
2069 |
+ # Since dependencies on system packages are frequently |
2070 |
+ # unspecified, merge them only when no builds are executing. |
2071 |
+ self._merge_wait_queue.append(merge) |
2072 |
+ merge.addStartListener(self._system_merge_started) |
2073 |
+ else: |
2074 |
+ merge.addExitListener(self._merge_exit) |
2075 |
+ self._task_queues.merge.add(merge) |
2076 |
+ self._status_display.merges = len(self._task_queues.merge) |
2077 |
+ else: |
2078 |
+ settings = build.settings |
2079 |
+ trees = self.trees |
2080 |
+ pkg=build.pkg |
2081 |
+ init_buildlog = gobs_buildlog() |
2082 |
+ build_dir = settings.get("PORTAGE_BUILDDIR") |
2083 |
+ build_log = settings.get("PORTAGE_LOG_FILE") |
2084 |
+ |
2085 |
+ self._failed_pkgs.append(self._failed_pkg( |
2086 |
+ build_dir=build_dir, build_log=build_log, |
2087 |
+ pkg=pkg, returncode=build.returncode)) |
2088 |
+ if not self._terminated_tasks: |
2089 |
+ self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for") |
2090 |
+ self._status_display.failed = len(self._failed_pkgs) |
2091 |
+ self._deallocate_config(build.settings) |
2092 |
+ init_buildlog.add_buildlog_main(settings, pkg, trees) |
2093 |
+ self._jobs -= 1 |
2094 |
+ self._status_display.running = self._jobs |
2095 |
+ self._schedule() |
2096 |
+ |
2097 |
+ def _extract_exit(self, build): |
2098 |
+ self._build_exit(build) |
2099 |
+ |
2100 |
+ def _task_complete(self, pkg): |
2101 |
+ self._completed_tasks.add(pkg) |
2102 |
+ self._unsatisfied_system_deps.discard(pkg) |
2103 |
+ self._choose_pkg_return_early = False |
2104 |
+ blocker_db = self._blocker_db[pkg.root] |
2105 |
+ blocker_db.discardBlocker(pkg) |
2106 |
+ |
2107 |
+ def _merge(self): |
2108 |
+ |
2109 |
+ self._add_prefetchers() |
2110 |
+ self._add_packages() |
2111 |
+ pkg_queue = self._pkg_queue |
2112 |
+ failed_pkgs = self._failed_pkgs |
2113 |
+ portage.locks._quiet = self._background |
2114 |
+ portage.elog.add_listener(self._elog_listener) |
2115 |
+ rval = os.EX_OK |
2116 |
+ |
2117 |
+ try: |
2118 |
+ self._main_loop() |
2119 |
+ finally: |
2120 |
+ self._main_loop_cleanup() |
2121 |
+ portage.locks._quiet = False |
2122 |
+ portage.elog.remove_listener(self._elog_listener) |
2123 |
+ if failed_pkgs: |
2124 |
+ rval = failed_pkgs[-1].returncode |
2125 |
+ |
2126 |
+ return rval |
2127 |
+ |
2128 |
+ def _main_loop_cleanup(self): |
2129 |
+ del self._pkg_queue[:] |
2130 |
+ self._completed_tasks.clear() |
2131 |
+ self._deep_system_deps.clear() |
2132 |
+ self._unsatisfied_system_deps.clear() |
2133 |
+ self._choose_pkg_return_early = False |
2134 |
+ self._status_display.reset() |
2135 |
+ self._digraph = None |
2136 |
+ self._task_queues.fetch.clear() |
2137 |
+ self._prefetchers.clear() |
2138 |
+ |
2139 |
+ def _choose_pkg(self): |
2140 |
+ """ |
2141 |
+ Choose a task that has all its dependencies satisfied. This is used |
2142 |
+ for parallel build scheduling, and ensures that we don't build |
2143 |
+ anything with deep dependencies that have yet to be merged. |
2144 |
+ """ |
2145 |
+ |
2146 |
+ if self._choose_pkg_return_early: |
2147 |
+ return None |
2148 |
+ |
2149 |
+ if self._digraph is None: |
2150 |
+ if self._is_work_scheduled() and \ |
2151 |
+ not ("--nodeps" in self.myopts and \ |
2152 |
+ (self._max_jobs is True or self._max_jobs > 1)): |
2153 |
+ self._choose_pkg_return_early = True |
2154 |
+ return None |
2155 |
+ return self._pkg_queue.pop(0) |
2156 |
+ |
2157 |
+ if not self._is_work_scheduled(): |
2158 |
+ return self._pkg_queue.pop(0) |
2159 |
+ |
2160 |
+ self._prune_digraph() |
2161 |
+ |
2162 |
+ chosen_pkg = None |
2163 |
+ |
2164 |
+ # Prefer uninstall operations when available. |
2165 |
+ graph = self._digraph |
2166 |
+ for pkg in self._pkg_queue: |
2167 |
+ if pkg.operation == 'uninstall' and \ |
2168 |
+ not graph.child_nodes(pkg): |
2169 |
+ chosen_pkg = pkg |
2170 |
+ break |
2171 |
+ |
2172 |
+ if chosen_pkg is None: |
2173 |
+ later = set(self._pkg_queue) |
2174 |
+ for pkg in self._pkg_queue: |
2175 |
+ later.remove(pkg) |
2176 |
+ if not self._dependent_on_scheduled_merges(pkg, later): |
2177 |
+ chosen_pkg = pkg |
2178 |
+ break |
2179 |
+ |
2180 |
+ if chosen_pkg is not None: |
2181 |
+ self._pkg_queue.remove(chosen_pkg) |
2182 |
+ |
2183 |
+ if chosen_pkg is None: |
2184 |
+ # There's no point in searching for a package to |
2185 |
+ # choose until at least one of the existing jobs |
2186 |
+ # completes. |
2187 |
+ self._choose_pkg_return_early = True |
2188 |
+ |
2189 |
+ return chosen_pkg |
2190 |
+ |
2191 |
+ def _dependent_on_scheduled_merges(self, pkg, later): |
2192 |
+ """ |
2193 |
+ Traverse the subgraph of the given packages deep dependencies |
2194 |
+ to see if it contains any scheduled merges. |
2195 |
+ @param pkg: a package to check dependencies for |
2196 |
+ @type pkg: Package |
2197 |
+ @param later: packages for which dependence should be ignored |
2198 |
+ since they will be merged later than pkg anyway and therefore |
2199 |
+ delaying the merge of pkg will not result in a more optimal |
2200 |
+ merge order |
2201 |
+ @type later: set |
2202 |
+ @rtype: bool |
2203 |
+ @returns: True if the package is dependent, False otherwise. |
2204 |
+ """ |
2205 |
+ |
2206 |
+ graph = self._digraph |
2207 |
+ completed_tasks = self._completed_tasks |
2208 |
+ |
2209 |
+ dependent = False |
2210 |
+ traversed_nodes = set([pkg]) |
2211 |
+ direct_deps = graph.child_nodes(pkg) |
2212 |
+ node_stack = direct_deps |
2213 |
+ direct_deps = frozenset(direct_deps) |
2214 |
+ while node_stack: |
2215 |
+ node = node_stack.pop() |
2216 |
+ if node in traversed_nodes: |
2217 |
+ continue |
2218 |
+ traversed_nodes.add(node) |
2219 |
+ if not ((node.installed and node.operation == "nomerge") or \ |
2220 |
+ (node.operation == "uninstall" and \ |
2221 |
+ node not in direct_deps) or \ |
2222 |
+ node in completed_tasks or \ |
2223 |
+ node in later): |
2224 |
+ dependent = True |
2225 |
+ break |
2226 |
+ |
2227 |
+ # Don't traverse children of uninstall nodes since |
2228 |
+ # those aren't dependencies in the usual sense. |
2229 |
+ if node.operation != "uninstall": |
2230 |
+ node_stack.extend(graph.child_nodes(node)) |
2231 |
+ |
2232 |
+ return dependent |
2233 |
+ |
2234 |
+ def _allocate_config(self, root): |
2235 |
+ """ |
2236 |
+ Allocate a unique config instance for a task in order |
2237 |
+ to prevent interference between parallel tasks. |
2238 |
+ """ |
2239 |
+ if self._config_pool[root]: |
2240 |
+ temp_settings = self._config_pool[root].pop() |
2241 |
+ else: |
2242 |
+ temp_settings = portage.config(clone=self.pkgsettings[root]) |
2243 |
+ # Since config.setcpv() isn't guaranteed to call config.reset() due to |
2244 |
+ # performance reasons, call it here to make sure all settings from the |
2245 |
+ # previous package get flushed out (such as PORTAGE_LOG_FILE). |
2246 |
+ temp_settings.reload() |
2247 |
+ temp_settings.reset() |
2248 |
+ return temp_settings |
2249 |
+ |
2250 |
+ def _deallocate_config(self, settings): |
2251 |
+ self._config_pool[settings["ROOT"]].append(settings) |
2252 |
+ |
2253 |
+ def _main_loop(self): |
2254 |
+ |
2255 |
+ # Only allow 1 job max if a restart is scheduled |
2256 |
+ # due to portage update. |
2257 |
+ if self._is_restart_scheduled() or \ |
2258 |
+ self._opts_no_background.intersection(self.myopts): |
2259 |
+ self._set_max_jobs(1) |
2260 |
+ |
2261 |
+ while self._schedule(): |
2262 |
+ self._poll_loop() |
2263 |
+ |
2264 |
+ while True: |
2265 |
+ self._schedule() |
2266 |
+ if not self._is_work_scheduled(): |
2267 |
+ break |
2268 |
+ self._poll_loop() |
2269 |
+ |
2270 |
+ def _keep_scheduling(self): |
2271 |
+ return bool(not self._terminated_tasks and self._pkg_queue and \ |
2272 |
+ not (self._failed_pkgs and not self._build_opts.fetchonly)) |
2273 |
+ |
2274 |
+ def _is_work_scheduled(self): |
2275 |
+ return bool(self._running_tasks) |
2276 |
+ |
2277 |
+ def _schedule_tasks(self): |
2278 |
+ |
2279 |
+ while True: |
2280 |
+ |
2281 |
+ # When the number of jobs and merges drops to zero, |
2282 |
+ # process a single merge from _merge_wait_queue if |
2283 |
+ # it's not empty. We only process one since these are |
2284 |
+ # special packages and we want to ensure that |
2285 |
+ # parallel-install does not cause more than one of |
2286 |
+ # them to install at the same time. |
2287 |
+ if (self._merge_wait_queue and not self._jobs and |
2288 |
+ not self._task_queues.merge): |
2289 |
+ task = self._merge_wait_queue.popleft() |
2290 |
+ task.addExitListener(self._merge_wait_exit_handler) |
2291 |
+ self._task_queues.merge.add(task) |
2292 |
+ self._status_display.merges = len(self._task_queues.merge) |
2293 |
+ self._merge_wait_scheduled.append(task) |
2294 |
+ |
2295 |
+ self._schedule_tasks_imp() |
2296 |
+ self._status_display.display() |
2297 |
+ |
2298 |
+ state_change = 0 |
2299 |
+ for q in self._task_queues.values(): |
2300 |
+ if q.schedule(): |
2301 |
+ state_change += 1 |
2302 |
+ |
2303 |
+ # Cancel prefetchers if they're the only reason |
2304 |
+ # the main poll loop is still running. |
2305 |
+ if self._failed_pkgs and not self._build_opts.fetchonly and \ |
2306 |
+ not self._is_work_scheduled() and \ |
2307 |
+ self._task_queues.fetch: |
2308 |
+ self._task_queues.fetch.clear() |
2309 |
+ state_change += 1 |
2310 |
+ |
2311 |
+ if not (state_change or \ |
2312 |
+ (self._merge_wait_queue and not self._jobs and |
2313 |
+ not self._task_queues.merge)): |
2314 |
+ break |
2315 |
+ |
2316 |
+ return self._keep_scheduling() |
2317 |
+ |
2318 |
+ def _job_delay(self): |
2319 |
+ """ |
2320 |
+ @rtype: bool |
2321 |
+ @returns: True if job scheduling should be delayed, False otherwise. |
2322 |
+ """ |
2323 |
+ |
2324 |
+ if self._jobs and self._max_load is not None: |
2325 |
+ |
2326 |
+ current_time = time.time() |
2327 |
+ |
2328 |
+ delay = self._job_delay_factor * self._jobs ** self._job_delay_exp |
2329 |
+ if delay > self._job_delay_max: |
2330 |
+ delay = self._job_delay_max |
2331 |
+ if (current_time - self._previous_job_start_time) < delay: |
2332 |
+ return True |
2333 |
+ |
2334 |
+ return False |
2335 |
+ |
2336 |
+ def _schedule_tasks_imp(self): |
2337 |
+ """ |
2338 |
+ @rtype: bool |
2339 |
+ @returns: True if state changed, False otherwise. |
2340 |
+ """ |
2341 |
+ |
2342 |
+ state_change = 0 |
2343 |
+ |
2344 |
+ while True: |
2345 |
+ |
2346 |
+ if not self._keep_scheduling(): |
2347 |
+ return bool(state_change) |
2348 |
+ |
2349 |
+ if self._choose_pkg_return_early or \ |
2350 |
+ self._merge_wait_scheduled or \ |
2351 |
+ (self._jobs and self._unsatisfied_system_deps) or \ |
2352 |
+ not self._can_add_job() or \ |
2353 |
+ self._job_delay(): |
2354 |
+ return bool(state_change) |
2355 |
+ |
2356 |
+ pkg = self._choose_pkg() |
2357 |
+ if pkg is None: |
2358 |
+ return bool(state_change) |
2359 |
+ |
2360 |
+ state_change += 1 |
2361 |
+ |
2362 |
+ if not pkg.installed: |
2363 |
+ self._pkg_count.curval += 1 |
2364 |
+ |
2365 |
+ task = self._task(pkg) |
2366 |
+ |
2367 |
+ if pkg.installed: |
2368 |
+ merge = PackageMerge(merge=task) |
2369 |
+ self._running_tasks[id(merge)] = merge |
2370 |
+ merge.addExitListener(self._merge_exit) |
2371 |
+ self._task_queues.merge.addFront(merge) |
2372 |
+ |
2373 |
+ elif pkg.built: |
2374 |
+ self._jobs += 1 |
2375 |
+ self._previous_job_start_time = time.time() |
2376 |
+ self._status_display.running = self._jobs |
2377 |
+ self._running_tasks[id(task)] = task |
2378 |
+ task.addExitListener(self._extract_exit) |
2379 |
+ self._task_queues.jobs.add(task) |
2380 |
+ |
2381 |
+ else: |
2382 |
+ self._jobs += 1 |
2383 |
+ self._previous_job_start_time = time.time() |
2384 |
+ self._status_display.running = self._jobs |
2385 |
+ self._running_tasks[id(task)] = task |
2386 |
+ task.addExitListener(self._build_exit) |
2387 |
+ self._task_queues.jobs.add(task) |
2388 |
+ |
2389 |
+ return bool(state_change) |
2390 |
+ |
2391 |
+ def _task(self, pkg): |
2392 |
+ |
2393 |
+ pkg_to_replace = None |
2394 |
+ if pkg.operation != "uninstall": |
2395 |
+ vardb = pkg.root_config.trees["vartree"].dbapi |
2396 |
+ previous_cpv = [x for x in vardb.match(pkg.slot_atom) \ |
2397 |
+ if portage.cpv_getkey(x) == pkg.cp] |
2398 |
+ if not previous_cpv and vardb.cpv_exists(pkg.cpv): |
2399 |
+ # same cpv, different SLOT |
2400 |
+ previous_cpv = [pkg.cpv] |
2401 |
+ if previous_cpv: |
2402 |
+ previous_cpv = previous_cpv.pop() |
2403 |
+ pkg_to_replace = self._pkg(previous_cpv, |
2404 |
+ "installed", pkg.root_config, installed=True, |
2405 |
+ operation="uninstall") |
2406 |
+ |
2407 |
+ prefetcher = self._prefetchers.pop(pkg, None) |
2408 |
+ if prefetcher is not None and not prefetcher.isAlive(): |
2409 |
+ try: |
2410 |
+ self._task_queues.fetch._task_queue.remove(prefetcher) |
2411 |
+ except ValueError: |
2412 |
+ pass |
2413 |
+ prefetcher = None |
2414 |
+ |
2415 |
+ task = MergeListItem(args_set=self._args_set, |
2416 |
+ background=self._background, binpkg_opts=self._binpkg_opts, |
2417 |
+ build_opts=self._build_opts, |
2418 |
+ config_pool=self._ConfigPool(pkg.root, |
2419 |
+ self._allocate_config, self._deallocate_config), |
2420 |
+ emerge_opts=self.myopts, |
2421 |
+ find_blockers=self._find_blockers(pkg), logger=self._logger, |
2422 |
+ mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(), |
2423 |
+ pkg_to_replace=pkg_to_replace, |
2424 |
+ prefetcher=prefetcher, |
2425 |
+ scheduler=self._sched_iface, |
2426 |
+ settings=self._allocate_config(pkg.root), |
2427 |
+ statusMessage=self._status_msg, |
2428 |
+ world_atom=self._world_atom) |
2429 |
+ |
2430 |
+ return task |
2431 |
+ |
2432 |
+ def _failed_pkg_msg(self, failed_pkg, action, preposition): |
2433 |
+ pkg = failed_pkg.pkg |
2434 |
+ msg = "%s to %s %s" % \ |
2435 |
+ (bad("Failed"), action, colorize("INFORM", pkg.cpv)) |
2436 |
+ if pkg.root != "/": |
2437 |
+ msg += " %s %s" % (preposition, pkg.root) |
2438 |
+ |
2439 |
+ log_path = self._locate_failure_log(failed_pkg) |
2440 |
+ if log_path is not None: |
2441 |
+ msg += ", Log file:" |
2442 |
+ self._status_msg(msg) |
2443 |
+ |
2444 |
+ if log_path is not None: |
2445 |
+ self._status_msg(" '%s'" % (colorize("INFORM", log_path),)) |
2446 |
+ |
2447 |
+ def _status_msg(self, msg): |
2448 |
+ """ |
2449 |
+ Display a brief status message (no newlines) in the status display. |
2450 |
+ This is called by tasks to provide feedback to the user. This |
2451 |
+ delegates the resposibility of generating \r and \n control characters, |
2452 |
+ to guarantee that lines are created or erased when necessary and |
2453 |
+ appropriate. |
2454 |
+ |
2455 |
+ @type msg: str |
2456 |
+ @param msg: a brief status message (no newlines allowed) |
2457 |
+ """ |
2458 |
+ if not self._background: |
2459 |
+ writemsg_level("\n") |
2460 |
+ self._status_display.displayMessage(msg) |
2461 |
+ |
2462 |
+ def _save_resume_list(self): |
2463 |
+ """ |
2464 |
+ Do this before verifying the ebuild Manifests since it might |
2465 |
+ be possible for the user to use --resume --skipfirst get past |
2466 |
+ a non-essential package with a broken digest. |
2467 |
+ """ |
2468 |
+ mtimedb = self._mtimedb |
2469 |
+ |
2470 |
+ mtimedb["resume"] = {} |
2471 |
+ # Stored as a dict starting with portage-2.1.6_rc1, and supported |
2472 |
+ # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support |
2473 |
+ # a list type for options. |
2474 |
+ mtimedb["resume"]["myopts"] = self.myopts.copy() |
2475 |
+ |
2476 |
+ # Convert Atom instances to plain str. |
2477 |
+ mtimedb["resume"]["favorites"] = [str(x) for x in self._favorites] |
2478 |
+ mtimedb["resume"]["mergelist"] = [list(x) \ |
2479 |
+ for x in self._mergelist \ |
2480 |
+ if isinstance(x, Package) and x.operation == "merge"] |
2481 |
+ |
2482 |
+ mtimedb.commit() |
2483 |
+ |
2484 |
+ def _calc_resume_list(self): |
2485 |
+ """ |
2486 |
+ Use the current resume list to calculate a new one, |
2487 |
+ dropping any packages with unsatisfied deps. |
2488 |
+ @rtype: bool |
2489 |
+ @returns: True if successful, False otherwise. |
2490 |
+ """ |
2491 |
+ print(colorize("GOOD", "*** Resuming merge...")) |
2492 |
+ |
2493 |
+ # free some memory before creating |
2494 |
+ # the resume depgraph |
2495 |
+ self._destroy_graph() |
2496 |
+ |
2497 |
+ myparams = create_depgraph_params(self.myopts, None) |
2498 |
+ success = False |
2499 |
+ e = None |
2500 |
+ try: |
2501 |
+ success, mydepgraph, dropped_tasks = resume_depgraph( |
2502 |
+ self.settings, self.trees, self._mtimedb, self.myopts, |
2503 |
+ myparams, self._spinner) |
2504 |
+ except depgraph.UnsatisfiedResumeDep as exc: |
2505 |
+ # rename variable to avoid python-3.0 error: |
2506 |
+ # SyntaxError: can not delete variable 'e' referenced in nested |
2507 |
+ # scope |
2508 |
+ e = exc |
2509 |
+ mydepgraph = e.depgraph |
2510 |
+ dropped_tasks = set() |
2511 |
+ |
2512 |
+ if e is not None: |
2513 |
+ def unsatisfied_resume_dep_msg(): |
2514 |
+ mydepgraph.display_problems() |
2515 |
+ out = portage.output.EOutput() |
2516 |
+ out.eerror("One or more packages are either masked or " + \ |
2517 |
+ "have missing dependencies:") |
2518 |
+ out.eerror("") |
2519 |
+ indent = " " |
2520 |
+ show_parents = set() |
2521 |
+ for dep in e.value: |
2522 |
+ if dep.parent in show_parents: |
2523 |
+ continue |
2524 |
+ show_parents.add(dep.parent) |
2525 |
+ if dep.atom is None: |
2526 |
+ out.eerror(indent + "Masked package:") |
2527 |
+ out.eerror(2 * indent + str(dep.parent)) |
2528 |
+ out.eerror("") |
2529 |
+ else: |
2530 |
+ out.eerror(indent + str(dep.atom) + " pulled in by:") |
2531 |
+ out.eerror(2 * indent + str(dep.parent)) |
2532 |
+ out.eerror("") |
2533 |
+ msg = "The resume list contains packages " + \ |
2534 |
+ "that are either masked or have " + \ |
2535 |
+ "unsatisfied dependencies. " + \ |
2536 |
+ "Please restart/continue " + \ |
2537 |
+ "the operation manually, or use --skipfirst " + \ |
2538 |
+ "to skip the first package in the list and " + \ |
2539 |
+ "any other packages that may be " + \ |
2540 |
+ "masked or have missing dependencies." |
2541 |
+ for line in textwrap.wrap(msg, 72): |
2542 |
+ out.eerror(line) |
2543 |
+ self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg) |
2544 |
+ return False |
2545 |
+ |
2546 |
+ if success and self._show_list(): |
2547 |
+ mylist = mydepgraph.altlist() |
2548 |
+ if mylist: |
2549 |
+ if "--tree" in self.myopts: |
2550 |
+ mylist.reverse() |
2551 |
+ mydepgraph.display(mylist, favorites=self._favorites) |
2552 |
+ |
2553 |
+ if not success: |
2554 |
+ self._post_mod_echo_msgs.append(mydepgraph.display_problems) |
2555 |
+ return False |
2556 |
+ mydepgraph.display_problems() |
2557 |
+ self._init_graph(mydepgraph.schedulerGraph()) |
2558 |
+ |
2559 |
+ msg_width = 75 |
2560 |
+ for task in dropped_tasks: |
2561 |
+ if not (isinstance(task, Package) and task.operation == "merge"): |
2562 |
+ continue |
2563 |
+ pkg = task |
2564 |
+ msg = "emerge --keep-going:" + \ |
2565 |
+ " %s" % (pkg.cpv,) |
2566 |
+ if pkg.root != "/": |
2567 |
+ msg += " for %s" % (pkg.root,) |
2568 |
+ msg += " dropped due to unsatisfied dependency." |
2569 |
+ for line in textwrap.wrap(msg, msg_width): |
2570 |
+ eerror(line, phase="other", key=pkg.cpv) |
2571 |
+ settings = self.pkgsettings[pkg.root] |
2572 |
+ # Ensure that log collection from $T is disabled inside |
2573 |
+ # elog_process(), since any logs that might exist are |
2574 |
+ # not valid here. |
2575 |
+ settings.pop("T", None) |
2576 |
+ portage.elog.elog_process(pkg.cpv, settings) |
2577 |
+ self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg)) |
2578 |
+ |
2579 |
+ return True |
2580 |
+ |
2581 |
+ def _show_list(self): |
2582 |
+ myopts = self.myopts |
2583 |
+ if "--quiet" not in myopts and \ |
2584 |
+ ("--ask" in myopts or "--tree" in myopts or \ |
2585 |
+ "--verbose" in myopts): |
2586 |
+ return True |
2587 |
+ return False |
2588 |
+ |
2589 |
+ def _world_atom(self, pkg): |
2590 |
+ """ |
2591 |
+ Add or remove the package to the world file, but only if |
2592 |
+ it's supposed to be added or removed. Otherwise, do nothing. |
2593 |
+ """ |
2594 |
+ |
2595 |
+ if set(("--buildpkgonly", "--fetchonly", |
2596 |
+ "--fetch-all-uri", |
2597 |
+ "--oneshot", "--onlydeps", |
2598 |
+ "--pretend")).intersection(self.myopts): |
2599 |
+ return |
2600 |
+ |
2601 |
+ if pkg.root != self.target_root: |
2602 |
+ return |
2603 |
+ |
2604 |
+ args_set = self._args_set |
2605 |
+ if not args_set.findAtomForPackage(pkg): |
2606 |
+ return |
2607 |
+ |
2608 |
+ logger = self._logger |
2609 |
+ pkg_count = self._pkg_count |
2610 |
+ root_config = pkg.root_config |
2611 |
+ world_set = root_config.sets["selected"] |
2612 |
+ world_locked = False |
2613 |
+ if hasattr(world_set, "lock"): |
2614 |
+ world_set.lock() |
2615 |
+ world_locked = True |
2616 |
+ |
2617 |
+ try: |
2618 |
+ if hasattr(world_set, "load"): |
2619 |
+ world_set.load() # maybe it's changed on disk |
2620 |
+ |
2621 |
+ if pkg.operation == "uninstall": |
2622 |
+ if hasattr(world_set, "cleanPackage"): |
2623 |
+ world_set.cleanPackage(pkg.root_config.trees["vartree"].dbapi, |
2624 |
+ pkg.cpv) |
2625 |
+ if hasattr(world_set, "remove"): |
2626 |
+ for s in pkg.root_config.setconfig.active: |
2627 |
+ world_set.remove(SETPREFIX+s) |
2628 |
+ else: |
2629 |
+ atom = create_world_atom(pkg, args_set, root_config) |
2630 |
+ if atom: |
2631 |
+ if hasattr(world_set, "add"): |
2632 |
+ self._status_msg(('Recording %s in "world" ' + \ |
2633 |
+ 'favorites file...') % atom) |
2634 |
+ logger.log(" === (%s of %s) Updating world file (%s)" % \ |
2635 |
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv)) |
2636 |
+ world_set.add(atom) |
2637 |
+ else: |
2638 |
+ writemsg_level('\n!!! Unable to record %s in "world"\n' % \ |
2639 |
+ (atom,), level=logging.WARN, noiselevel=-1) |
2640 |
+ finally: |
2641 |
+ if world_locked: |
2642 |
+ world_set.unlock() |
2643 |
+ |
2644 |
+ def _pkg(self, cpv, type_name, root_config, installed=False, |
2645 |
+ operation=None, myrepo=None): |
2646 |
+ """ |
2647 |
+ Get a package instance from the cache, or create a new |
2648 |
+ one if necessary. Raises KeyError from aux_get if it |
2649 |
+ failures for some reason (package does not exist or is |
2650 |
+ corrupt). |
2651 |
+ """ |
2652 |
+ |
2653 |
+ # Reuse existing instance when available. |
2654 |
+ pkg = self._pkg_cache.get(Package._gen_hash_key(cpv=cpv, |
2655 |
+ type_name=type_name, repo_name=myrepo, root_config=root_config, |
2656 |
+ installed=installed, operation=operation)) |
2657 |
+ |
2658 |
+ if pkg is not None: |
2659 |
+ return pkg |
2660 |
+ |
2661 |
+ tree_type = depgraph.pkg_tree_map[type_name] |
2662 |
+ db = root_config.trees[tree_type].dbapi |
2663 |
+ db_keys = list(self.trees[root_config.root][ |
2664 |
+ tree_type].dbapi._aux_cache_keys) |
2665 |
+ metadata = zip(db_keys, db.aux_get(cpv, db_keys, myrepo=myrepo)) |
2666 |
+ pkg = Package(built=(type_name != "ebuild"), |
2667 |
+ cpv=cpv, installed=installed, metadata=metadata, |
2668 |
+ root_config=root_config, type_name=type_name) |
2669 |
+ self._pkg_cache[pkg] = pkg |
2670 |
+ return pkg |
2671 |
|
2672 |
diff --git a/gobs/pym/arch.py~ b/gobs/pym/arch.py~ |
2673 |
new file mode 100644 |
2674 |
index 0000000..ebd0017 |
2675 |
--- /dev/null |
2676 |
+++ b/gobs/pym/arch.py~ |
2677 |
@@ -0,0 +1,25 @@ |
2678 |
+import portage |
2679 |
+from gobs.readconf import get_conf_settings |
2680 |
+reader=get_conf_settings() |
2681 |
+gobs_settings_dict=reader.read_gobs_settings_all() |
2682 |
+# make a CM |
2683 |
+from gobs.ConnectionManager import connectionManager |
2684 |
+CM=connectionManager(gobs_settings_dict) |
2685 |
+#selectively import the pgsql/mysql querys |
2686 |
+if CM.getName()=='pgsql': |
2687 |
+ from gobs.pgsql import * |
2688 |
+ |
2689 |
+class gobs_arch(object): |
2690 |
+ |
2691 |
+ def update_arch_db(self): |
2692 |
+ conn = CM.getConnection() |
2693 |
+ # FIXME: check for new keyword |
2694 |
+ # Add arch db (keywords) |
2695 |
+ if get_arch_db(conn) is None: |
2696 |
+ arch_list = portage.archlist |
2697 |
+ for arch in arch_list: |
2698 |
+ if arch[0] not in ["~","-"]: |
2699 |
+ arch_list.append("-" + arch) |
2700 |
+ arch_list.append("-*") |
2701 |
+ add_new_arch_db(conn,arch_list) |
2702 |
+ CM.putConnection(conn) |
2703 |
\ No newline at end of file |
2704 |
|
2705 |
diff --git a/gobs/pym/build_log.py b/gobs/pym/build_log.py |
2706 |
index 80a186a..35724c4 100644 |
2707 |
--- a/gobs/pym/build_log.py |
2708 |
+++ b/gobs/pym/build_log.py |
2709 |
@@ -110,15 +110,15 @@ class gobs_buildlog(object): |
2710 |
|
2711 |
def search_info(self, textline, error_log_list): |
2712 |
if re.search(" * Package:", textline): |
2713 |
- error_log_list.append(textline) |
2714 |
+ error_log_list.append(textline + '\n') |
2715 |
if re.search(" * Repository:", textline): |
2716 |
- error_log_list.append(textline) |
2717 |
+ error_log_list.append(textline + '\n') |
2718 |
if re.search(" * Maintainer:", textline): |
2719 |
- error_log_list.append(textline) |
2720 |
+ error_log_list.append(textline + '\n') |
2721 |
if re.search(" * USE:", textline): |
2722 |
- error_log_list.append(textline) |
2723 |
+ error_log_list.append(textline + '\n') |
2724 |
if re.search(" * FEATURES:", textline): |
2725 |
- error_log_list.append(textline) |
2726 |
+ error_log_list.append(textline + '\n') |
2727 |
return error_log_list |
2728 |
|
2729 |
def search_error(self, logfile_text, textline, error_log_list, sum_build_log_list, i): |
2730 |
@@ -128,7 +128,7 @@ class gobs_buildlog(object): |
2731 |
error_log_list.append(".....\n") |
2732 |
while x != i + 3 and endline: |
2733 |
try: |
2734 |
- error_log_list.append(logfile_text[x]) |
2735 |
+ error_log_list.append(logfile_text[x] + '\n') |
2736 |
except: |
2737 |
endline = False |
2738 |
else: |
2739 |
@@ -141,7 +141,7 @@ class gobs_buildlog(object): |
2740 |
error_log_list.append(".....\n") |
2741 |
while x != i + 10 and endline: |
2742 |
try: |
2743 |
- error_log_list.append(logfile_text[x]) |
2744 |
+ error_log_list.append(logfile_text[x] + '\n') |
2745 |
except: |
2746 |
endline = False |
2747 |
else: |
2748 |
@@ -152,7 +152,7 @@ class gobs_buildlog(object): |
2749 |
error_log_list.append(".....\n") |
2750 |
while x != i + 3 and endline: |
2751 |
try: |
2752 |
- error_log_list.append(logfile_text[x]) |
2753 |
+ error_log_list.append(logfile_text[x] + '\n') |
2754 |
except: |
2755 |
endline = False |
2756 |
else: |
2757 |
@@ -162,12 +162,12 @@ class gobs_buildlog(object): |
2758 |
def search_qa(self, logfile_text, textline, qa_error_list, error_log_list,i): |
2759 |
if re.search(" * QA Notice:", textline): |
2760 |
x = i |
2761 |
- qa_error_list.append(logfile_text[x]) |
2762 |
+ qa_error_list.append(logfile_text[x] + '\n') |
2763 |
endline= True |
2764 |
error_log_list.append(".....\n") |
2765 |
while x != i + 3 and endline: |
2766 |
try: |
2767 |
- error_log_list.append(logfile_text[x]) |
2768 |
+ error_log_list.append(logfile_text[x] + '\n') |
2769 |
except: |
2770 |
endline = False |
2771 |
else: |
2772 |
|
2773 |
diff --git a/gobs/pym/build_log.py b/gobs/pym/build_log.py~ |
2774 |
similarity index 100% |
2775 |
copy from gobs/pym/build_log.py |
2776 |
copy to gobs/pym/build_log.py~ |
2777 |
|
2778 |
diff --git a/gobs/pym/build_queru.py~ b/gobs/pym/build_queru.py~ |
2779 |
new file mode 100644 |
2780 |
index 0000000..3f0bde8 |
2781 |
--- /dev/null |
2782 |
+++ b/gobs/pym/build_queru.py~ |
2783 |
@@ -0,0 +1,708 @@ |
2784 |
+# Get the options from the config file set in gobs.readconf |
2785 |
+from __future__ import print_function |
2786 |
+from gobs.readconf import get_conf_settings |
2787 |
+reader=get_conf_settings() |
2788 |
+gobs_settings_dict=reader.read_gobs_settings_all() |
2789 |
+# make a CM |
2790 |
+from gobs.ConnectionManager import connectionManager |
2791 |
+CM=connectionManager(gobs_settings_dict) |
2792 |
+#selectively import the pgsql/mysql querys |
2793 |
+if CM.getName()=='pgsql': |
2794 |
+ from gobs.pgsql import * |
2795 |
+ |
2796 |
+import portage |
2797 |
+import os |
2798 |
+import re |
2799 |
+import sys |
2800 |
+import signal |
2801 |
+from gobs.manifest import gobs_manifest |
2802 |
+from gobs.depclean import main_depclean |
2803 |
+from gobs.flags import gobs_use_flags |
2804 |
+from portage import _encodings |
2805 |
+from portage import _unicode_decode |
2806 |
+from portage.versions import cpv_getkey |
2807 |
+import portage.xpak, errno, re, time |
2808 |
+from _emerge.main import parse_opts, profile_check, apply_priorities, repo_name_duplicate_check, \ |
2809 |
+ config_protect_check, check_procfs, ensure_required_sets, expand_set_arguments, \ |
2810 |
+ validate_ebuild_environment, chk_updated_info_files, display_preserved_libs |
2811 |
+from _emerge.actions import action_config, action_sync, action_metadata, \ |
2812 |
+ action_regen, action_search, action_uninstall, \ |
2813 |
+ adjust_configs, chk_updated_cfg_files, display_missing_pkg_set, \ |
2814 |
+ display_news_notification, getportageversion, load_emerge_config |
2815 |
+from portage.util import cmp_sort_key, writemsg, \ |
2816 |
+ writemsg_level, writemsg_stdout, shlex_split |
2817 |
+from _emerge.sync.old_tree_timestamp import old_tree_timestamp_warn |
2818 |
+from _emerge.create_depgraph_params import create_depgraph_params |
2819 |
+from _emerge.depgraph import backtrack_depgraph, depgraph, resume_depgraph |
2820 |
+from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange |
2821 |
+from gobs.Scheduler import Scheduler |
2822 |
+from _emerge.clear_caches import clear_caches |
2823 |
+from _emerge.unmerge import unmerge |
2824 |
+from _emerge.emergelog import emergelog |
2825 |
+from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo |
2826 |
+from portage._global_updates import _global_updates |
2827 |
+from portage._sets import SETPREFIX |
2828 |
+from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH |
2829 |
+from _emerge.is_valid_package_atom import is_valid_package_atom |
2830 |
+from _emerge.stdout_spinner import stdout_spinner |
2831 |
+from portage.output import blue, bold, colorize, create_color_func, darkgreen, \ |
2832 |
+ red, yellow, colorize, xtermTitle, xtermTitleReset |
2833 |
+good = create_color_func("GOOD") |
2834 |
+bad = create_color_func("BAD") |
2835 |
+ |
2836 |
+class queruaction(object): |
2837 |
+ |
2838 |
+ def __init__(self, config_profile): |
2839 |
+ self._mysettings = portage.config(config_root = "/") |
2840 |
+ self._config_profile = config_profile |
2841 |
+ self._myportdb = portage.portdb |
2842 |
+ |
2843 |
+ def log_fail_queru(self, build_dict, settings): |
2844 |
+ conn=CM.getConnection() |
2845 |
+ print('build_dict', build_dict) |
2846 |
+ fail_querue_dict = get_fail_querue_dict(conn, build_dict) |
2847 |
+ print('fail_querue_dict', fail_querue_dict) |
2848 |
+ if fail_querue_dict is None: |
2849 |
+ fail_querue_dict = {} |
2850 |
+ fail_querue_dict['querue_id'] = build_dict['queue_id'] |
2851 |
+ fail_querue_dict['fail_type'] = build_dict['type_fail'] |
2852 |
+ fail_querue_dict['fail_times'] = 1 |
2853 |
+ print('fail_querue_dict', fail_querue_dict) |
2854 |
+ add_fail_querue_dict(conn, fail_querue_dict) |
2855 |
+ else: |
2856 |
+ if fail_querue_dict['fail_times'][0] < 6: |
2857 |
+ fail_querue_dict['fail_times'] = fail_querue_dict['fail_times'][0] + 1 |
2858 |
+ fail_querue_dict['querue_id'] = build_dict['queue_id'] |
2859 |
+ fail_querue_dict['fail_type'] = build_dict['type_fail'] |
2860 |
+ update_fail_times(conn, fail_querue_dict) |
2861 |
+ return |
2862 |
+ else: |
2863 |
+ build_log_dict = {} |
2864 |
+ error_log_list = [] |
2865 |
+ qa_error_list = [] |
2866 |
+ repoman_error_list = [] |
2867 |
+ sum_build_log_list = [] |
2868 |
+ sum_build_log_list.append("fail") |
2869 |
+ error_log_list.append(build_dict['type_fail']) |
2870 |
+ build_log_dict['repoman_error_list'] = repoman_error_list |
2871 |
+ build_log_dict['qa_error_list'] = qa_error_list |
2872 |
+ build_log_dict['summary_error_list'] = sum_build_log_list |
2873 |
+ if build_dict['type_fail'] == 'merge fail': |
2874 |
+ error_log_list = [] |
2875 |
+ for k, v in build_dict['failed_merge'].iteritems(): |
2876 |
+ error_log_list.append(v['fail_msg']) |
2877 |
+ build_log_dict['error_log_list'] = error_log_list |
2878 |
+ build_error = "" |
2879 |
+ if error_log_list != []: |
2880 |
+ for log_line in error_log_list: |
2881 |
+ build_error = build_error + log_line |
2882 |
+ summary_error = "" |
2883 |
+ if sum_build_log_list != []: |
2884 |
+ for sum_log_line in sum_build_log_list: |
2885 |
+ summary_error = summary_error + " " + sum_log_line |
2886 |
+ if settings.get("PORTAGE_LOG_FILE") is not None: |
2887 |
+ build_log_dict['logfilename'] = re.sub("\/var\/log\/portage\/", "", settings.get("PORTAGE_LOG_FILE")) |
2888 |
+ # os.chmode(settings.get("PORTAGE_LOG_FILE"), 224) |
2889 |
+ else: |
2890 |
+ build_log_dict['logfilename'] = "" |
2891 |
+ move_queru_buildlog(conn, build_dict['queue_id'], build_error, summary_error, build_log_dict) |
2892 |
+ CM.putConnection(conn) |
2893 |
+ |
2894 |
+ def action_build(self, settings, trees, mtimedb, myopts, myaction, myfiles, spinner, build_dict): |
2895 |
+ |
2896 |
+ if '--usepkgonly' not in myopts: |
2897 |
+ old_tree_timestamp_warn(settings['PORTDIR'], settings) |
2898 |
+ |
2899 |
+ # It's best for config updates in /etc/portage to be processed |
2900 |
+ # before we get here, so warn if they're not (bug #267103). |
2901 |
+ chk_updated_cfg_files(settings['EROOT'], ['/etc/portage']) |
2902 |
+ |
2903 |
+ resume = False |
2904 |
+ |
2905 |
+ ldpath_mtimes = mtimedb["ldpath"] |
2906 |
+ favorites=[] |
2907 |
+ buildpkgonly = "--buildpkgonly" in myopts |
2908 |
+ pretend = "--pretend" in myopts |
2909 |
+ fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts |
2910 |
+ ask = "--ask" in myopts |
2911 |
+ enter_invalid = '--ask-enter-invalid' in myopts |
2912 |
+ nodeps = "--nodeps" in myopts |
2913 |
+ oneshot = "--oneshot" in myopts or "--onlydeps" in myopts |
2914 |
+ tree = "--tree" in myopts |
2915 |
+ if nodeps and tree: |
2916 |
+ tree = False |
2917 |
+ del myopts["--tree"] |
2918 |
+ portage.writemsg(colorize("WARN", " * ") + \ |
2919 |
+ "--tree is broken with --nodeps. Disabling...\n") |
2920 |
+ debug = "--debug" in myopts |
2921 |
+ verbose = "--verbose" in myopts |
2922 |
+ quiet = "--quiet" in myopts |
2923 |
+ |
2924 |
+ myparams = create_depgraph_params(myopts, myaction) |
2925 |
+ try: |
2926 |
+ success, mydepgraph, favorites = backtrack_depgraph( |
2927 |
+ settings, trees, myopts, myparams, myaction, myfiles, spinner) |
2928 |
+ except portage.exception.PackageSetNotFound as e: |
2929 |
+ root_config = trees[settings["ROOT"]]["root_config"] |
2930 |
+ display_missing_pkg_set(root_config, e.value) |
2931 |
+ build_dict['type_fail'] = "depgraph fail" |
2932 |
+ build_dict['check_fail'] = True |
2933 |
+ use_changes = None |
2934 |
+ if mydepgraph._dynamic_config._needed_use_config_changes: |
2935 |
+ use_changes = {} |
2936 |
+ for pkg, needed_use_config_changes in mydepgraph._dynamic_config._needed_use_config_changes.items(): |
2937 |
+ new_use, changes = needed_use_config_changes |
2938 |
+ use_changes[pkg.cpv] = changes |
2939 |
+ iteritems_packages = {} |
2940 |
+ for k, v in use_changes.iteritems(): |
2941 |
+ k_package = portage.versions.cpv_getkey(k) |
2942 |
+ iteritems_packages[ k_package ] = v |
2943 |
+ print('iteritems_packages', iteritems_packages) |
2944 |
+ build_cpv_dict = iteritems_packages |
2945 |
+ if use_changes is not None: |
2946 |
+ for k, v in build_cpv_dict.iteritems(): |
2947 |
+ build_use_flags_list = [] |
2948 |
+ for x, y in v.iteritems(): |
2949 |
+ if y is True: |
2950 |
+ build_use_flags_list.append(x) |
2951 |
+ if y is False: |
2952 |
+ build_use_flags_list.append("-" + x) |
2953 |
+ print(k, build_use_flags_list) |
2954 |
+ if not build_use_flags_list == []: |
2955 |
+ build_use_flags = "" |
2956 |
+ for flags in build_use_flags_list: |
2957 |
+ build_use_flags = build_use_flags + flags + ' ' |
2958 |
+ filetext = k + ' ' + build_use_flags |
2959 |
+ print('filetext', filetext) |
2960 |
+ with open("/etc/portage/package.use/gobs.use", "a") as f: |
2961 |
+ f.write(filetext) |
2962 |
+ f.write('\n') |
2963 |
+ |
2964 |
+ settings, trees, mtimedb = load_emerge_config() |
2965 |
+ myparams = create_depgraph_params(myopts, myaction) |
2966 |
+ try: |
2967 |
+ success, mydepgraph, favorites = backtrack_depgraph( |
2968 |
+ settings, trees, myopts, myparams, myaction, myfiles, spinner) |
2969 |
+ except portage.exception.PackageSetNotFound as e: |
2970 |
+ root_config = trees[settings["ROOT"]]["root_config"] |
2971 |
+ display_missing_pkg_set(root_config, e.value) |
2972 |
+ build_dict['type_fail'] = "depgraph fail" |
2973 |
+ build_dict['check_fail'] = True |
2974 |
+ if not success: |
2975 |
+ mydepgraph.display_problems() |
2976 |
+ build_dict['type_fail'] = "depgraph fail" |
2977 |
+ build_dict['check_fail'] = True |
2978 |
+ |
2979 |
+ if build_dict['check_fail'] is True: |
2980 |
+ self.log_fail_queru(build_dict, settings) |
2981 |
+ return 1, settings, trees, mtimedb |
2982 |
+ |
2983 |
+ if "--buildpkgonly" in myopts: |
2984 |
+ graph_copy = mydepgraph._dynamic_config.digraph.copy() |
2985 |
+ removed_nodes = set() |
2986 |
+ for node in graph_copy: |
2987 |
+ if not isinstance(node, Package) or \ |
2988 |
+ node.operation == "nomerge": |
2989 |
+ removed_nodes.add(node) |
2990 |
+ graph_copy.difference_update(removed_nodes) |
2991 |
+ if not graph_copy.hasallzeros(ignore_priority = \ |
2992 |
+ DepPrioritySatisfiedRange.ignore_medium): |
2993 |
+ print("\n!!! --buildpkgonly requires all dependencies to be merged.") |
2994 |
+ print("!!! Cannot merge requested packages. Merge deps and try again.\n") |
2995 |
+ return 1, settings, trees, mtimedb |
2996 |
+ |
2997 |
+ mydepgraph.saveNomergeFavorites() |
2998 |
+ |
2999 |
+ mergetask = Scheduler(settings, trees, mtimedb, myopts, |
3000 |
+ spinner, favorites=favorites, |
3001 |
+ graph_config=mydepgraph.schedulerGraph()) |
3002 |
+ |
3003 |
+ del mydepgraph |
3004 |
+ clear_caches(trees) |
3005 |
+ |
3006 |
+ retval = mergetask.merge() |
3007 |
+ print('retval', retval) |
3008 |
+ if retval: |
3009 |
+ build_dict['type_fail'] = 'merge fail' |
3010 |
+ build_dict['check_fail'] = True |
3011 |
+ attict = {} |
3012 |
+ failed_pkgs_dict = {} |
3013 |
+ for x in mergetask._failed_pkgs_all: |
3014 |
+ attict['fail_msg'] = str(x.pkg)[0] + ' ' + str(x.pkg)[1] + ' ' + re.sub("\/var\/log\/portage\/", "", mergetask._locate_failure_log(x)) |
3015 |
+ failed_pkgs_dict[str(x.pkg.cpv)] = attict |
3016 |
+ build_dict['failed_merge'] = failed_pkgs_dict |
3017 |
+ self.log_fail_queru(build_dict, settings) |
3018 |
+ if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend): |
3019 |
+ if "yes" == settings.get("AUTOCLEAN"): |
3020 |
+ portage.writemsg_stdout(">>> Auto-cleaning packages...\n") |
3021 |
+ unmerge(trees[settings["ROOT"]]["root_config"], |
3022 |
+ myopts, "clean", [], |
3023 |
+ ldpath_mtimes, autoclean=1) |
3024 |
+ else: |
3025 |
+ portage.writemsg_stdout(colorize("WARN", "WARNING:") |
3026 |
+ + " AUTOCLEAN is disabled. This can cause serious" |
3027 |
+ + " problems due to overlapping packages.\n") |
3028 |
+ |
3029 |
+ return retval, settings, trees, mtimedb |
3030 |
+ |
3031 |
+ def post_emerge(self, myaction, myopts, myfiles, target_root, trees, mtimedb, retval): |
3032 |
+ |
3033 |
+ root_config = trees[target_root]["root_config"] |
3034 |
+ vardbapi = trees[target_root]["vartree"].dbapi |
3035 |
+ settings = vardbapi.settings |
3036 |
+ info_mtimes = mtimedb["info"] |
3037 |
+ |
3038 |
+ # Load the most current variables from ${ROOT}/etc/profile.env |
3039 |
+ settings.unlock() |
3040 |
+ settings.reload() |
3041 |
+ settings.regenerate() |
3042 |
+ settings.lock() |
3043 |
+ |
3044 |
+ config_protect = shlex_split(settings.get("CONFIG_PROTECT", "")) |
3045 |
+ infodirs = settings.get("INFOPATH","").split(":") + \ |
3046 |
+ settings.get("INFODIR","").split(":") |
3047 |
+ |
3048 |
+ os.chdir("/") |
3049 |
+ |
3050 |
+ if retval == os.EX_OK: |
3051 |
+ exit_msg = " *** exiting successfully." |
3052 |
+ else: |
3053 |
+ exit_msg = " *** exiting unsuccessfully with status '%s'." % retval |
3054 |
+ emergelog("notitles" not in settings.features, exit_msg) |
3055 |
+ |
3056 |
+ _flush_elog_mod_echo() |
3057 |
+ |
3058 |
+ if not vardbapi._pkgs_changed: |
3059 |
+ display_news_notification(root_config, myopts) |
3060 |
+ # If vdb state has not changed then there's nothing else to do. |
3061 |
+ return |
3062 |
+ |
3063 |
+ vdb_path = os.path.join(root_config.settings['EROOT'], portage.VDB_PATH) |
3064 |
+ portage.util.ensure_dirs(vdb_path) |
3065 |
+ vdb_lock = None |
3066 |
+ if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts: |
3067 |
+ vardbapi.lock() |
3068 |
+ vdb_lock = True |
3069 |
+ |
3070 |
+ if vdb_lock: |
3071 |
+ try: |
3072 |
+ if "noinfo" not in settings.features: |
3073 |
+ chk_updated_info_files(target_root, |
3074 |
+ infodirs, info_mtimes, retval) |
3075 |
+ mtimedb.commit() |
3076 |
+ finally: |
3077 |
+ if vdb_lock: |
3078 |
+ vardbapi.unlock() |
3079 |
+ |
3080 |
+ chk_updated_cfg_files(settings['EROOT'], config_protect) |
3081 |
+ |
3082 |
+ display_news_notification(root_config, myopts) |
3083 |
+ if retval in (None, os.EX_OK) or (not "--pretend" in myopts): |
3084 |
+ display_preserved_libs(vardbapi, myopts) |
3085 |
+ |
3086 |
+ postemerge = os.path.join(settings["PORTAGE_CONFIGROOT"], |
3087 |
+ portage.USER_CONFIG_PATH, "bin", "post_emerge") |
3088 |
+ if os.access(postemerge, os.X_OK): |
3089 |
+ hook_retval = portage.process.spawn( |
3090 |
+ [postemerge], env=settings.environ()) |
3091 |
+ if hook_retval != os.EX_OK: |
3092 |
+ writemsg_level( |
3093 |
+ " %s spawn failed of %s\n" % (bad("*"), postemerge,), |
3094 |
+ level=logging.ERROR, noiselevel=-1) |
3095 |
+ |
3096 |
+ if "--quiet" not in myopts and \ |
3097 |
+ myaction is None and "@world" in myfiles: |
3098 |
+ show_depclean_suggestion() |
3099 |
+ |
3100 |
+ return |
3101 |
+ |
3102 |
+ def emerge_main(self, args, build_dict): |
3103 |
+ |
3104 |
+ portage._disable_legacy_globals() |
3105 |
+ portage.dep._internal_warnings = True |
3106 |
+ # Disable color until we're sure that it should be enabled (after |
3107 |
+ # EMERGE_DEFAULT_OPTS has been parsed). |
3108 |
+ portage.output.havecolor = 0 |
3109 |
+ # This first pass is just for options that need to be known as early as |
3110 |
+ # possible, such as --config-root. They will be parsed again later, |
3111 |
+ # together with EMERGE_DEFAULT_OPTS (which may vary depending on the |
3112 |
+ # the value of --config-root). |
3113 |
+ myaction, myopts, myfiles = parse_opts(args, silent=True) |
3114 |
+ if "--debug" in myopts: |
3115 |
+ os.environ["PORTAGE_DEBUG"] = "1" |
3116 |
+ if "--config-root" in myopts: |
3117 |
+ os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"] |
3118 |
+ if "--root" in myopts: |
3119 |
+ os.environ["ROOT"] = myopts["--root"] |
3120 |
+ if "--accept-properties" in myopts: |
3121 |
+ os.environ["ACCEPT_PROPERTIES"] = myopts["--accept-properties"] |
3122 |
+ |
3123 |
+ # Portage needs to ensure a sane umask for the files it creates. |
3124 |
+ os.umask(0o22) |
3125 |
+ settings, trees, mtimedb = load_emerge_config() |
3126 |
+ portdb = trees[settings["ROOT"]]["porttree"].dbapi |
3127 |
+ rval = profile_check(trees, myaction) |
3128 |
+ if rval != os.EX_OK: |
3129 |
+ return rval |
3130 |
+ |
3131 |
+ tmpcmdline = [] |
3132 |
+ if "--ignore-default-opts" not in myopts: |
3133 |
+ tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split()) |
3134 |
+ tmpcmdline.extend(args) |
3135 |
+ myaction, myopts, myfiles = parse_opts(tmpcmdline) |
3136 |
+ |
3137 |
+ if myaction not in ('help', 'info', 'version') and \ |
3138 |
+ myopts.get('--package-moves') != 'n' and \ |
3139 |
+ _global_updates(trees, mtimedb["updates"], quiet=("--quiet" in myopts)): |
3140 |
+ mtimedb.commit() |
3141 |
+ # Reload the whole config from scratch. |
3142 |
+ settings, trees, mtimedb = load_emerge_config(trees=trees) |
3143 |
+ portdb = trees[settings["ROOT"]]["porttree"].dbapi |
3144 |
+ |
3145 |
+ xterm_titles = "notitles" not in settings.features |
3146 |
+ if xterm_titles: |
3147 |
+ xtermTitle("emerge") |
3148 |
+ |
3149 |
+ adjust_configs(myopts, trees) |
3150 |
+ apply_priorities(settings) |
3151 |
+ |
3152 |
+ spinner = stdout_spinner() |
3153 |
+ if "candy" in settings.features: |
3154 |
+ spinner.update = spinner.update_scroll |
3155 |
+ |
3156 |
+ if "--quiet" not in myopts: |
3157 |
+ portage.deprecated_profile_check(settings=settings) |
3158 |
+ if portage.const._ENABLE_REPO_NAME_WARN: |
3159 |
+ # Bug #248603 - Disable warnings about missing |
3160 |
+ # repo_name entries for stable branch. |
3161 |
+ repo_name_check(trees) |
3162 |
+ repo_name_duplicate_check(trees) |
3163 |
+ config_protect_check(trees) |
3164 |
+ check_procfs() |
3165 |
+ |
3166 |
+ if "getbinpkg" in settings.features: |
3167 |
+ myopts["--getbinpkg"] = True |
3168 |
+ |
3169 |
+ if "--getbinpkgonly" in myopts: |
3170 |
+ myopts["--getbinpkg"] = True |
3171 |
+ |
3172 |
+ if "--getbinpkgonly" in myopts: |
3173 |
+ myopts["--usepkgonly"] = True |
3174 |
+ |
3175 |
+ if "--getbinpkg" in myopts: |
3176 |
+ myopts["--usepkg"] = True |
3177 |
+ |
3178 |
+ if "--usepkgonly" in myopts: |
3179 |
+ myopts["--usepkg"] = True |
3180 |
+ |
3181 |
+ if "buildpkg" in settings.features or "--buildpkgonly" in myopts: |
3182 |
+ myopts["--buildpkg"] = True |
3183 |
+ |
3184 |
+ if "--buildpkgonly" in myopts: |
3185 |
+ # --buildpkgonly will not merge anything, so |
3186 |
+ # it cancels all binary package options. |
3187 |
+ for opt in ("--getbinpkg", "--getbinpkgonly", |
3188 |
+ "--usepkg", "--usepkgonly"): |
3189 |
+ myopts.pop(opt, None) |
3190 |
+ |
3191 |
+ for mytrees in trees.values(): |
3192 |
+ mydb = mytrees["porttree"].dbapi |
3193 |
+ # Freeze the portdbapi for performance (memoize all xmatch results). |
3194 |
+ mydb.freeze() |
3195 |
+ |
3196 |
+ if myaction in ('search', None) and \ |
3197 |
+ "--usepkg" in myopts: |
3198 |
+ # Populate the bintree with current --getbinpkg setting. |
3199 |
+ # This needs to happen before expand_set_arguments(), in case |
3200 |
+ # any sets use the bintree. |
3201 |
+ mytrees["bintree"].populate( |
3202 |
+ getbinpkgs="--getbinpkg" in myopts) |
3203 |
+ |
3204 |
+ del mytrees, mydb |
3205 |
+ |
3206 |
+ for x in myfiles: |
3207 |
+ ext = os.path.splitext(x)[1] |
3208 |
+ if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)): |
3209 |
+ print(colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")) |
3210 |
+ break |
3211 |
+ |
3212 |
+ root_config = trees[settings["ROOT"]]["root_config"] |
3213 |
+ if myaction == "list-sets": |
3214 |
+ writemsg_stdout("".join("%s\n" % s for s in sorted(root_config.sets))) |
3215 |
+ return os.EX_OK |
3216 |
+ |
3217 |
+ ensure_required_sets(trees) |
3218 |
+ |
3219 |
+ # only expand sets for actions taking package arguments |
3220 |
+ oldargs = myfiles[:] |
3221 |
+ if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None): |
3222 |
+ myfiles, retval = expand_set_arguments(myfiles, myaction, root_config) |
3223 |
+ if retval != os.EX_OK: |
3224 |
+ return retval |
3225 |
+ |
3226 |
+ # Need to handle empty sets specially, otherwise emerge will react |
3227 |
+ # with the help message for empty argument lists |
3228 |
+ if oldargs and not myfiles: |
3229 |
+ print("emerge: no targets left after set expansion") |
3230 |
+ return 0 |
3231 |
+ |
3232 |
+ if ("--tree" in myopts) and ("--columns" in myopts): |
3233 |
+ print("emerge: can't specify both of \"--tree\" and \"--columns\".") |
3234 |
+ return 1 |
3235 |
+ |
3236 |
+ if '--emptytree' in myopts and '--noreplace' in myopts: |
3237 |
+ writemsg_level("emerge: can't specify both of " + \ |
3238 |
+ "\"--emptytree\" and \"--noreplace\".\n", |
3239 |
+ level=logging.ERROR, noiselevel=-1) |
3240 |
+ return 1 |
3241 |
+ |
3242 |
+ if ("--quiet" in myopts): |
3243 |
+ spinner.update = spinner.update_quiet |
3244 |
+ portage.util.noiselimit = -1 |
3245 |
+ |
3246 |
+ if "--fetch-all-uri" in myopts: |
3247 |
+ myopts["--fetchonly"] = True |
3248 |
+ |
3249 |
+ if "--skipfirst" in myopts and "--resume" not in myopts: |
3250 |
+ myopts["--resume"] = True |
3251 |
+ |
3252 |
+ # Allow -p to remove --ask |
3253 |
+ if "--pretend" in myopts: |
3254 |
+ myopts.pop("--ask", None) |
3255 |
+ |
3256 |
+ # forbid --ask when not in a terminal |
3257 |
+ # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway. |
3258 |
+ if ("--ask" in myopts) and (not sys.stdin.isatty()): |
3259 |
+ portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n", |
3260 |
+ noiselevel=-1) |
3261 |
+ return 1 |
3262 |
+ |
3263 |
+ if settings.get("PORTAGE_DEBUG", "") == "1": |
3264 |
+ spinner.update = spinner.update_quiet |
3265 |
+ portage.util.noiselimit = 0 |
3266 |
+ if "python-trace" in settings.features: |
3267 |
+ import portage.debug as portage_debug |
3268 |
+ portage_debug.set_trace(True) |
3269 |
+ |
3270 |
+ if not ("--quiet" in myopts): |
3271 |
+ if '--nospinner' in myopts or \ |
3272 |
+ settings.get('TERM') == 'dumb' or \ |
3273 |
+ not sys.stdout.isatty(): |
3274 |
+ spinner.update = spinner.update_basic |
3275 |
+ |
3276 |
+ if "--debug" in myopts: |
3277 |
+ print("myaction", myaction) |
3278 |
+ print("myopts", myopts) |
3279 |
+ |
3280 |
+ pretend = "--pretend" in myopts |
3281 |
+ fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts |
3282 |
+ buildpkgonly = "--buildpkgonly" in myopts |
3283 |
+ |
3284 |
+ # check if root user is the current user for the actions where emerge needs this |
3285 |
+ if portage.secpass < 2: |
3286 |
+ # We've already allowed "--version" and "--help" above. |
3287 |
+ if "--pretend" not in myopts and myaction not in ("search","info"): |
3288 |
+ need_superuser = myaction in ('clean', 'depclean', 'deselect', |
3289 |
+ 'prune', 'unmerge') or not \ |
3290 |
+ (fetchonly or \ |
3291 |
+ (buildpkgonly and secpass >= 1) or \ |
3292 |
+ myaction in ("metadata", "regen", "sync")) |
3293 |
+ if portage.secpass < 1 or \ |
3294 |
+ need_superuser: |
3295 |
+ if need_superuser: |
3296 |
+ access_desc = "superuser" |
3297 |
+ else: |
3298 |
+ access_desc = "portage group" |
3299 |
+ # Always show portage_group_warning() when only portage group |
3300 |
+ # access is required but the user is not in the portage group. |
3301 |
+ from portage.data import portage_group_warning |
3302 |
+ if "--ask" in myopts: |
3303 |
+ myopts["--pretend"] = True |
3304 |
+ del myopts["--ask"] |
3305 |
+ print(("%s access is required... " + \ |
3306 |
+ "adding --pretend to options\n") % access_desc) |
3307 |
+ if portage.secpass < 1 and not need_superuser: |
3308 |
+ portage_group_warning() |
3309 |
+ else: |
3310 |
+ sys.stderr.write(("emerge: %s access is required\n") \ |
3311 |
+ % access_desc) |
3312 |
+ if portage.secpass < 1 and not need_superuser: |
3313 |
+ portage_group_warning() |
3314 |
+ return 1 |
3315 |
+ |
3316 |
+ disable_emergelog = False |
3317 |
+ if disable_emergelog: |
3318 |
+ """ Disable emergelog for everything except build or unmerge |
3319 |
+ operations. This helps minimize parallel emerge.log entries that can |
3320 |
+ confuse log parsers. We especially want it disabled during |
3321 |
+ parallel-fetch, which uses --resume --fetchonly.""" |
3322 |
+ _emerge.emergelog._disable = True |
3323 |
+ |
3324 |
+ else: |
3325 |
+ if 'EMERGE_LOG_DIR' in settings: |
3326 |
+ try: |
3327 |
+ # At least the parent needs to exist for the lock file. |
3328 |
+ portage.util.ensure_dirs(settings['EMERGE_LOG_DIR']) |
3329 |
+ except portage.exception.PortageException as e: |
3330 |
+ writemsg_level("!!! Error creating directory for " + \ |
3331 |
+ "EMERGE_LOG_DIR='%s':\n!!! %s\n" % \ |
3332 |
+ (settings['EMERGE_LOG_DIR'], e), |
3333 |
+ noiselevel=-1, level=logging.ERROR) |
3334 |
+ else: |
3335 |
+ global _emerge_log_dir |
3336 |
+ _emerge_log_dir = settings['EMERGE_LOG_DIR'] |
3337 |
+ |
3338 |
+ if not "--pretend" in myopts: |
3339 |
+ emergelog(xterm_titles, "Started emerge on: "+\ |
3340 |
+ _unicode_decode( |
3341 |
+ time.strftime("%b %d, %Y %H:%M:%S", time.localtime()), |
3342 |
+ encoding=_encodings['content'], errors='replace')) |
3343 |
+ myelogstr="" |
3344 |
+ if myopts: |
3345 |
+ myelogstr=" ".join(myopts) |
3346 |
+ if myaction: |
3347 |
+ myelogstr+=" "+myaction |
3348 |
+ if myfiles: |
3349 |
+ myelogstr += " " + " ".join(oldargs) |
3350 |
+ emergelog(xterm_titles, " *** emerge " + myelogstr) |
3351 |
+ del oldargs |
3352 |
+ |
3353 |
+ def emergeexitsig(signum, frame): |
3354 |
+ signal.signal(signal.SIGINT, signal.SIG_IGN) |
3355 |
+ signal.signal(signal.SIGTERM, signal.SIG_IGN) |
3356 |
+ portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum}) |
3357 |
+ sys.exit(128 + signum) |
3358 |
+ signal.signal(signal.SIGINT, emergeexitsig) |
3359 |
+ signal.signal(signal.SIGTERM, emergeexitsig) |
3360 |
+ |
3361 |
+ def emergeexit(): |
3362 |
+ """This gets out final log message in before we quit.""" |
3363 |
+ if "--pretend" not in myopts: |
3364 |
+ emergelog(xterm_titles, " *** terminating.") |
3365 |
+ if xterm_titles: |
3366 |
+ xtermTitleReset() |
3367 |
+ portage.atexit_register(emergeexit) |
3368 |
+ |
3369 |
+ |
3370 |
+ # "update", "system", or just process files |
3371 |
+ validate_ebuild_environment(trees) |
3372 |
+ |
3373 |
+ for x in myfiles: |
3374 |
+ if x.startswith(SETPREFIX) or \ |
3375 |
+ is_valid_package_atom(x, allow_repo=True): |
3376 |
+ continue |
3377 |
+ if x[:1] == os.sep: |
3378 |
+ continue |
3379 |
+ try: |
3380 |
+ os.lstat(x) |
3381 |
+ continue |
3382 |
+ except OSError: |
3383 |
+ pass |
3384 |
+ msg = [] |
3385 |
+ msg.append("'%s' is not a valid package atom." % (x,)) |
3386 |
+ msg.append("Please check ebuild(5) for full details.") |
3387 |
+ writemsg_level("".join("!!! %s\n" % line for line in msg), |
3388 |
+ level=logging.ERROR, noiselevel=-1) |
3389 |
+ return 1 |
3390 |
+ if "--pretend" not in myopts: |
3391 |
+ display_news_notification(root_config, myopts) |
3392 |
+ retval, settings, trees, mtimedb = self.action_build(settings, trees, mtimedb, |
3393 |
+ myopts, myaction, myfiles, spinner, build_dict) |
3394 |
+ self.post_emerge(myaction, myopts, myfiles, settings["ROOT"], |
3395 |
+ trees, mtimedb, retval) |
3396 |
+ |
3397 |
+ return retval |
3398 |
+ |
3399 |
+ def make_build_list(self, build_dict, settings, portdb): |
3400 |
+ cpv = build_dict['category']+'/'+build_dict['package']+'-'+build_dict['ebuild_version'] |
3401 |
+ pkgdir = os.path.join(settings['PORTDIR'], build_dict['category'] + "/" + build_dict['package']) |
3402 |
+ init_manifest = gobs_manifest(settings, pkgdir) |
3403 |
+ try: |
3404 |
+ ebuild_version_checksum_tree = portage.checksum.sha256hash(pkgdir+ "/" + build_dict['package'] + "-" + build_dict['ebuild_version'] + ".ebuild")[0] |
3405 |
+ except: |
3406 |
+ ebuild_version_checksum_tree = None |
3407 |
+ if ebuild_version_checksum_tree == build_dict['checksum']: |
3408 |
+ init_flags = gobs_use_flags(settings, portdb, cpv) |
3409 |
+ build_use_flags_list = init_flags.comper_useflags(build_dict) |
3410 |
+ print("build_use_flags_list", build_use_flags_list) |
3411 |
+ manifest_error = init_manifest.check_file_in_manifest(portdb, cpv, build_dict, build_use_flags_list) |
3412 |
+ if manifest_error is None: |
3413 |
+ build_dict['check_fail'] = False |
3414 |
+ build_cpv_dict = {} |
3415 |
+ build_cpv_dict[cpv] = build_use_flags_list |
3416 |
+ print(build_cpv_dict) |
3417 |
+ return build_cpv_dict |
3418 |
+ else: |
3419 |
+ build_dict['type_fail'] = "Manifest error" |
3420 |
+ build_dict['check_fail'] = True |
3421 |
+ else: |
3422 |
+ build_dict['type_fail'] = "Wrong ebuild checksum" |
3423 |
+ build_dict['check_fail'] = True |
3424 |
+ if build_dict['check_fail'] is True: |
3425 |
+ self.log_fail_queru(build_dict, settings) |
3426 |
+ return None |
3427 |
+ return build_cpv_dict |
3428 |
+ |
3429 |
+ def build_procces(self, buildqueru_cpv_dict, build_dict, settings, portdb): |
3430 |
+ build_cpv_list = [] |
3431 |
+ depclean_fail = True |
3432 |
+ for k, build_use_flags_list in buildqueru_cpv_dict.iteritems(): |
3433 |
+ build_cpv_list.append("=" + k) |
3434 |
+ if not build_use_flags_list == None: |
3435 |
+ build_use_flags = "" |
3436 |
+ for flags in build_use_flags_list: |
3437 |
+ build_use_flags = build_use_flags + flags + " " |
3438 |
+ filetext = '=' + k + ' ' + build_use_flags |
3439 |
+ print('filetext', filetext) |
3440 |
+ with open("/etc/portage/package.use/gobs.use", "a") as f: |
3441 |
+ f.write(filetext) |
3442 |
+ f.write('\n') |
3443 |
+ print('build_cpv_list', build_cpv_list) |
3444 |
+ argscmd = [] |
3445 |
+ if not "nooneshort" in build_dict['post_message']: |
3446 |
+ argscmd.append("--oneshot") |
3447 |
+ argscmd.append("--buildpkg") |
3448 |
+ argscmd.append("--usepkg") |
3449 |
+ for build_cpv in build_cpv_list: |
3450 |
+ argscmd.append(build_cpv) |
3451 |
+ print(argscmd) |
3452 |
+ # Call main_emerge to build the package in build_cpv_list |
3453 |
+ build_fail = self.emerge_main(argscmd, build_dict) |
3454 |
+ # Run depclean |
3455 |
+ print('build_fail', build_fail) |
3456 |
+ if not "noclean" in build_dict['post_message']: |
3457 |
+ depclean_fail = main_depclean() |
3458 |
+ try: |
3459 |
+ os.remove("/etc/portage/package.use/gobs.use") |
3460 |
+ except: |
3461 |
+ pass |
3462 |
+ if build_fail is False or depclean_fail is False: |
3463 |
+ return False |
3464 |
+ return True |
3465 |
+ |
3466 |
+ def procces_qureru(self): |
3467 |
+ conn=CM.getConnection() |
3468 |
+ build_dict = {} |
3469 |
+ build_dict = get_packages_to_build(conn, self._config_profile) |
3470 |
+ settings, trees, mtimedb = load_emerge_config() |
3471 |
+ portdb = trees[settings["ROOT"]]["porttree"].dbapi |
3472 |
+ if build_dict is None: |
3473 |
+ CM.putConnection(conn) |
3474 |
+ return |
3475 |
+ print("build_dict", build_dict) |
3476 |
+ if not build_dict['ebuild_id'] is None and build_dict['checksum'] is not None: |
3477 |
+ buildqueru_cpv_dict = self.make_build_list(build_dict, settings, portdb) |
3478 |
+ print('buildqueru_cpv_dict', buildqueru_cpv_dict) |
3479 |
+ if buildqueru_cpv_dict is None: |
3480 |
+ CM.putConnection(conn) |
3481 |
+ return |
3482 |
+ fail_build_procces = self.build_procces(buildqueru_cpv_dict, build_dict, settings, portdb) |
3483 |
+ CM.putConnection(conn) |
3484 |
+ return |
3485 |
+ if not build_dict['post_message'] is [] and build_dict['ebuild_id'] is None: |
3486 |
+ CM.putConnection(conn) |
3487 |
+ return |
3488 |
+ if not build_dict['ebuild_id'] is None and build_dict['checksum'] is None: |
3489 |
+ del_old_queue(conn, build_dict['queue_id']) |
3490 |
+ CM.putConnection(conn) |
3491 |
+ return |
3492 |
|
3493 |
diff --git a/gobs/pym/categories.py~ b/gobs/pym/categories.py~ |
3494 |
new file mode 100644 |
3495 |
index 0000000..f3b2457 |
3496 |
--- /dev/null |
3497 |
+++ b/gobs/pym/categories.py~ |
3498 |
@@ -0,0 +1,30 @@ |
3499 |
+#from gobs.text import gobs_text |
3500 |
+from gobs.text import get_file_text |
3501 |
+import portage |
3502 |
+from gobs.readconf import get_conf_settings |
3503 |
+reader=get_conf_settings() |
3504 |
+gobs_settings_dict=reader.read_gobs_settings_all() |
3505 |
+# make a CM |
3506 |
+from gobs.ConnectionManager import connectionManager |
3507 |
+CM=connectionManager(gobs_settings_dict) |
3508 |
+#selectively import the pgsql/mysql querys |
3509 |
+if CM.getName()=='pgsql': |
3510 |
+ from gobs.pgsql import * |
3511 |
+ |
3512 |
+class gobs_categories(object): |
3513 |
+ |
3514 |
+ def __init__(self, mysettings): |
3515 |
+ self._mysettings = mysettings |
3516 |
+ |
3517 |
+ def update_categories_db(self, categories): |
3518 |
+ conn=CM.getConnection() |
3519 |
+ # Update categories_meta in the db |
3520 |
+ categories_dir = self._mysettings['PORTDIR'] + "/" + categories + "/" |
3521 |
+ categories_metadata_xml_checksum_tree = portage.checksum.sha256hash(categories_dir + "metadata.xml")[0] |
3522 |
+ categories_metadata_xml_text_tree = get_file_text(categories_dir + "metadata.xml") |
3523 |
+ categories_metadata_xml_checksum_db = get_categories_checksum_db(conn, categories) |
3524 |
+ if categories_metadata_xml_checksum_db is None: |
3525 |
+ add_new_categories_meta_sql(self._conn,categories, categories_metadata_xml_checksum_tree, categories_metadata_xml_text_tree) |
3526 |
+ elif categories_metadata_xml_checksum_db != categories_metadata_xml_checksum_tree: |
3527 |
+ update_categories_meta_sql(self._conn,categories, categories_metadata_xml_checksum_tree, categories_metadata_xml_text_tree) |
3528 |
+ CM.putConnection(conn) |
3529 |
|
3530 |
diff --git a/gobs/pym/check_setup.py b/gobs/pym/check_setup.py |
3531 |
index 8512470..8b9b883 100644 |
3532 |
--- a/gobs/pym/check_setup.py |
3533 |
+++ b/gobs/pym/check_setup.py |
3534 |
@@ -4,6 +4,7 @@ import os |
3535 |
import errno |
3536 |
from git import * |
3537 |
from gobs.text import get_file_text |
3538 |
+from gobs.sync import sync_tree |
3539 |
|
3540 |
from gobs.readconf import get_conf_settings |
3541 |
reader=get_conf_settings() |
3542 |
@@ -68,14 +69,17 @@ def check_make_conf_guest(config_profile): |
3543 |
make_conf_checksum_db = get_profile_checksum(conn,config_profile) |
3544 |
print('make_conf_checksum_db', make_conf_checksum_db) |
3545 |
if make_conf_checksum_db is None: |
3546 |
+ if get_profile_sync(conn, config_profile) is True |
3547 |
+ if sync_tree(): |
3548 |
+ reset_profile_sync(conn, config_profile) |
3549 |
CM.putConnection(conn) |
3550 |
- return "1" |
3551 |
+ return False |
3552 |
make_conf_file = "/etc/portage/make.conf" |
3553 |
make_conf_checksum_tree = portage.checksum.sha256hash(make_conf_file)[0] |
3554 |
print('make_conf_checksum_tree', make_conf_checksum_tree) |
3555 |
if make_conf_checksum_tree != make_conf_checksum_db[0]: |
3556 |
CM.putConnection(conn) |
3557 |
- return "2" |
3558 |
+ return False |
3559 |
# Check if we can open the file and close it |
3560 |
# Check if we have some error in the file (portage.util.getconfig) |
3561 |
# Check if we envorment error with the config (settings.validate) |
3562 |
@@ -88,22 +92,11 @@ def check_make_conf_guest(config_profile): |
3563 |
# With errors we return false |
3564 |
except Exception as e: |
3565 |
CM.putConnection(conn) |
3566 |
- return "3" |
3567 |
+ return False |
3568 |
CM.putConnection(conn) |
3569 |
- return "4" |
3570 |
+ return True |
3571 |
|
3572 |
def check_configure_guest(config_profile): |
3573 |
pass_make_conf = check_make_conf_guest(config_profile) |
3574 |
print(pass_make_conf) |
3575 |
- if pass_make_conf == "1": |
3576 |
- # profile not active or updatedb is runing |
3577 |
- return False |
3578 |
- elif pass_make_conf == "2": |
3579 |
- # update make.conf |
3580 |
- return False |
3581 |
- elif pass_make_conf == "3": |
3582 |
- # set the config as no working |
3583 |
- return False |
3584 |
- elif pass_make_conf == "4": |
3585 |
- # make.conf check OK |
3586 |
- return True |
3587 |
\ No newline at end of file |
3588 |
+ return pass_make_conf |
3589 |
\ No newline at end of file |
3590 |
|
3591 |
diff --git a/gobs/pym/check_setup.py b/gobs/pym/check_setup.py~ |
3592 |
similarity index 92% |
3593 |
copy from gobs/pym/check_setup.py |
3594 |
copy to gobs/pym/check_setup.py~ |
3595 |
index 8512470..8b9b883 100644 |
3596 |
--- a/gobs/pym/check_setup.py |
3597 |
+++ b/gobs/pym/check_setup.py~ |
3598 |
@@ -4,6 +4,7 @@ import os |
3599 |
import errno |
3600 |
from git import * |
3601 |
from gobs.text import get_file_text |
3602 |
+from gobs.sync import sync_tree |
3603 |
|
3604 |
from gobs.readconf import get_conf_settings |
3605 |
reader=get_conf_settings() |
3606 |
@@ -68,14 +69,17 @@ def check_make_conf_guest(config_profile): |
3607 |
make_conf_checksum_db = get_profile_checksum(conn,config_profile) |
3608 |
print('make_conf_checksum_db', make_conf_checksum_db) |
3609 |
if make_conf_checksum_db is None: |
3610 |
+ if get_profile_sync(conn, config_profile) is True |
3611 |
+ if sync_tree(): |
3612 |
+ reset_profile_sync(conn, config_profile) |
3613 |
CM.putConnection(conn) |
3614 |
- return "1" |
3615 |
+ return False |
3616 |
make_conf_file = "/etc/portage/make.conf" |
3617 |
make_conf_checksum_tree = portage.checksum.sha256hash(make_conf_file)[0] |
3618 |
print('make_conf_checksum_tree', make_conf_checksum_tree) |
3619 |
if make_conf_checksum_tree != make_conf_checksum_db[0]: |
3620 |
CM.putConnection(conn) |
3621 |
- return "2" |
3622 |
+ return False |
3623 |
# Check if we can open the file and close it |
3624 |
# Check if we have some error in the file (portage.util.getconfig) |
3625 |
# Check if we envorment error with the config (settings.validate) |
3626 |
@@ -88,22 +92,11 @@ def check_make_conf_guest(config_profile): |
3627 |
# With errors we return false |
3628 |
except Exception as e: |
3629 |
CM.putConnection(conn) |
3630 |
- return "3" |
3631 |
+ return False |
3632 |
CM.putConnection(conn) |
3633 |
- return "4" |
3634 |
+ return True |
3635 |
|
3636 |
def check_configure_guest(config_profile): |
3637 |
pass_make_conf = check_make_conf_guest(config_profile) |
3638 |
print(pass_make_conf) |
3639 |
- if pass_make_conf == "1": |
3640 |
- # profile not active or updatedb is runing |
3641 |
- return False |
3642 |
- elif pass_make_conf == "2": |
3643 |
- # update make.conf |
3644 |
- return False |
3645 |
- elif pass_make_conf == "3": |
3646 |
- # set the config as no working |
3647 |
- return False |
3648 |
- elif pass_make_conf == "4": |
3649 |
- # make.conf check OK |
3650 |
- return True |
3651 |
\ No newline at end of file |
3652 |
+ return pass_make_conf |
3653 |
\ No newline at end of file |
3654 |
|
3655 |
diff --git a/gobs/pym/depclean.py~ b/gobs/pym/depclean.py~ |
3656 |
new file mode 100644 |
3657 |
index 0000000..b6096b6 |
3658 |
--- /dev/null |
3659 |
+++ b/gobs/pym/depclean.py~ |
3660 |
@@ -0,0 +1,632 @@ |
3661 |
+from __future__ import print_function |
3662 |
+import errno |
3663 |
+import portage |
3664 |
+from portage._sets.base import InternalPackageSet |
3665 |
+from _emerge.main import parse_opts |
3666 |
+from _emerge.create_depgraph_params import create_depgraph_params |
3667 |
+from _emerge.depgraph import backtrack_depgraph, depgraph, resume_depgraph |
3668 |
+from _emerge.UnmergeDepPriority import UnmergeDepPriority |
3669 |
+from _emerge.SetArg import SetArg |
3670 |
+from _emerge.actions import load_emerge_config |
3671 |
+from _emerge.Package import Package |
3672 |
+from _emerge.unmerge import unmerge |
3673 |
+from portage.util import cmp_sort_key, writemsg, \ |
3674 |
+ writemsg_level, writemsg_stdout |
3675 |
+from portage.util.digraph import digraph |
3676 |
+ |
3677 |
+def main_depclean(): |
3678 |
+ mysettings, mytrees, mtimedb = load_emerge_config() |
3679 |
+ myroot = mysettings["ROOT"] |
3680 |
+ root_config = mytrees[myroot]["root_config"] |
3681 |
+ psets = root_config.setconfig.psets |
3682 |
+ args_set = InternalPackageSet(allow_repo=True) |
3683 |
+ spinner=None |
3684 |
+ scheduler=None |
3685 |
+ tmpcmdline = [] |
3686 |
+ tmpcmdline.append("--depclean") |
3687 |
+ tmpcmdline.append("--pretend") |
3688 |
+ print("depclean",tmpcmdline) |
3689 |
+ myaction, myopts, myfiles = parse_opts(tmpcmdline, silent=False) |
3690 |
+ if myfiles: |
3691 |
+ args_set.update(myfiles) |
3692 |
+ matched_packages = False |
3693 |
+ for x in args_set: |
3694 |
+ if vardb.match(x): |
3695 |
+ matched_packages = True |
3696 |
+ if not matched_packages: |
3697 |
+ return 0 |
3698 |
+ |
3699 |
+ rval, cleanlist, ordered, req_pkg_count, unresolvable = calc_depclean(mysettings, mytrees, mtimedb["ldpath"], myopts, myaction, args_set, spinner) |
3700 |
+ print('rval, cleanlist, ordered, req_pkg_count, unresolvable', rval, cleanlist, ordered, req_pkg_count, unresolvable) |
3701 |
+ if unresolvable != []: |
3702 |
+ return True |
3703 |
+ if cleanlist != []: |
3704 |
+ conflict_package_list = [] |
3705 |
+ for depclean_cpv in cleanlist: |
3706 |
+ if portage.versions.cpv_getkey(depclean_cpv) in list(psets["system"]): |
3707 |
+ conflict_package_list.append(depclean_cpv) |
3708 |
+ if portage.versions.cpv_getkey(depclean_cpv) in list(psets['selected']): |
3709 |
+ conflict_package_list.append(depclean_cpv) |
3710 |
+ print('conflict_package_list', conflict_package_list) |
3711 |
+ if conflict_package_list == []: |
3712 |
+ tmpcmdline = [] |
3713 |
+ tmpcmdline.append("--depclean") |
3714 |
+ myaction, myopts, myfiles = parse_opts(tmpcmdline, silent=False) |
3715 |
+ unmerge(root_config, myopts, "unmerge", cleanlist, mtimedb["ldpath"], ordered=ordered, scheduler=scheduler) |
3716 |
+ print("Number removed: "+str(len(cleanlist))) |
3717 |
+ return True |
3718 |
+ return True |
3719 |
+ |
3720 |
+def calc_depclean(settings, trees, ldpath_mtimes, |
3721 |
+ myopts, action, args_set, spinner): |
3722 |
+ allow_missing_deps = bool(args_set) |
3723 |
+ |
3724 |
+ debug = '--debug' in myopts |
3725 |
+ xterm_titles = "notitles" not in settings.features |
3726 |
+ myroot = settings["ROOT"] |
3727 |
+ root_config = trees[myroot]["root_config"] |
3728 |
+ psets = root_config.setconfig.psets |
3729 |
+ deselect = myopts.get('--deselect') != 'n' |
3730 |
+ required_sets = {} |
3731 |
+ required_sets['world'] = psets['world'] |
3732 |
+ |
3733 |
+ # When removing packages, a temporary version of the world 'selected' |
3734 |
+ # set may be used which excludes packages that are intended to be |
3735 |
+ # eligible for removal. |
3736 |
+ selected_set = psets['selected'] |
3737 |
+ required_sets['selected'] = selected_set |
3738 |
+ protected_set = InternalPackageSet() |
3739 |
+ protected_set_name = '____depclean_protected_set____' |
3740 |
+ required_sets[protected_set_name] = protected_set |
3741 |
+ system_set = psets["system"] |
3742 |
+ |
3743 |
+ if not system_set or not selected_set: |
3744 |
+ |
3745 |
+ if not system_set: |
3746 |
+ writemsg_level("!!! You have no system list.\n", |
3747 |
+ level=logging.ERROR, noiselevel=-1) |
3748 |
+ |
3749 |
+ if not selected_set: |
3750 |
+ writemsg_level("!!! You have no world file.\n", |
3751 |
+ level=logging.WARNING, noiselevel=-1) |
3752 |
+ |
3753 |
+ writemsg_level("!!! Proceeding is likely to " + \ |
3754 |
+ "break your installation.\n", |
3755 |
+ level=logging.WARNING, noiselevel=-1) |
3756 |
+ if "--pretend" not in myopts: |
3757 |
+ countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean") |
3758 |
+ |
3759 |
+ if action == "depclean": |
3760 |
+ print(" >>> depclean") |
3761 |
+ |
3762 |
+ writemsg_level("\nCalculating dependencies ") |
3763 |
+ resolver_params = create_depgraph_params(myopts, "remove") |
3764 |
+ resolver = depgraph(settings, trees, myopts, resolver_params, spinner) |
3765 |
+ resolver._load_vdb() |
3766 |
+ vardb = resolver._frozen_config.trees[myroot]["vartree"].dbapi |
3767 |
+ real_vardb = trees[myroot]["vartree"].dbapi |
3768 |
+ |
3769 |
+ if action == "depclean": |
3770 |
+ |
3771 |
+ if args_set: |
3772 |
+ |
3773 |
+ if deselect: |
3774 |
+ # Start with an empty set. |
3775 |
+ selected_set = InternalPackageSet() |
3776 |
+ required_sets['selected'] = selected_set |
3777 |
+ # Pull in any sets nested within the selected set. |
3778 |
+ selected_set.update(psets['selected'].getNonAtoms()) |
3779 |
+ |
3780 |
+ # Pull in everything that's installed but not matched |
3781 |
+ # by an argument atom since we don't want to clean any |
3782 |
+ # package if something depends on it. |
3783 |
+ for pkg in vardb: |
3784 |
+ if spinner: |
3785 |
+ spinner.update() |
3786 |
+ |
3787 |
+ try: |
3788 |
+ if args_set.findAtomForPackage(pkg) is None: |
3789 |
+ protected_set.add("=" + pkg.cpv) |
3790 |
+ continue |
3791 |
+ except portage.exception.InvalidDependString as e: |
3792 |
+ show_invalid_depstring_notice(pkg, |
3793 |
+ pkg.metadata["PROVIDE"], str(e)) |
3794 |
+ del e |
3795 |
+ protected_set.add("=" + pkg.cpv) |
3796 |
+ continue |
3797 |
+ |
3798 |
+ elif action == "prune": |
3799 |
+ |
3800 |
+ if deselect: |
3801 |
+ # Start with an empty set. |
3802 |
+ selected_set = InternalPackageSet() |
3803 |
+ required_sets['selected'] = selected_set |
3804 |
+ # Pull in any sets nested within the selected set. |
3805 |
+ selected_set.update(psets['selected'].getNonAtoms()) |
3806 |
+ |
3807 |
+ # Pull in everything that's installed since we don't |
3808 |
+ # to prune a package if something depends on it. |
3809 |
+ protected_set.update(vardb.cp_all()) |
3810 |
+ |
3811 |
+ if not args_set: |
3812 |
+ |
3813 |
+ # Try to prune everything that's slotted. |
3814 |
+ for cp in vardb.cp_all(): |
3815 |
+ if len(vardb.cp_list(cp)) > 1: |
3816 |
+ args_set.add(cp) |
3817 |
+ |
3818 |
+ # Remove atoms from world that match installed packages |
3819 |
+ # that are also matched by argument atoms, but do not remove |
3820 |
+ # them if they match the highest installed version. |
3821 |
+ for pkg in vardb: |
3822 |
+ spinner.update() |
3823 |
+ pkgs_for_cp = vardb.match_pkgs(pkg.cp) |
3824 |
+ if not pkgs_for_cp or pkg not in pkgs_for_cp: |
3825 |
+ raise AssertionError("package expected in matches: " + \ |
3826 |
+ "cp = %s, cpv = %s matches = %s" % \ |
3827 |
+ (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp])) |
3828 |
+ |
3829 |
+ highest_version = pkgs_for_cp[-1] |
3830 |
+ if pkg == highest_version: |
3831 |
+ # pkg is the highest version |
3832 |
+ protected_set.add("=" + pkg.cpv) |
3833 |
+ continue |
3834 |
+ |
3835 |
+ if len(pkgs_for_cp) <= 1: |
3836 |
+ raise AssertionError("more packages expected: " + \ |
3837 |
+ "cp = %s, cpv = %s matches = %s" % \ |
3838 |
+ (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp])) |
3839 |
+ |
3840 |
+ try: |
3841 |
+ if args_set.findAtomForPackage(pkg) is None: |
3842 |
+ protected_set.add("=" + pkg.cpv) |
3843 |
+ continue |
3844 |
+ except portage.exception.InvalidDependString as e: |
3845 |
+ show_invalid_depstring_notice(pkg, |
3846 |
+ pkg.metadata["PROVIDE"], str(e)) |
3847 |
+ del e |
3848 |
+ protected_set.add("=" + pkg.cpv) |
3849 |
+ continue |
3850 |
+ |
3851 |
+ if resolver._frozen_config.excluded_pkgs: |
3852 |
+ excluded_set = resolver._frozen_config.excluded_pkgs |
3853 |
+ required_sets['__excluded__'] = InternalPackageSet() |
3854 |
+ |
3855 |
+ for pkg in vardb: |
3856 |
+ if spinner: |
3857 |
+ spinner.update() |
3858 |
+ |
3859 |
+ try: |
3860 |
+ if excluded_set.findAtomForPackage(pkg): |
3861 |
+ required_sets['__excluded__'].add("=" + pkg.cpv) |
3862 |
+ except portage.exception.InvalidDependString as e: |
3863 |
+ show_invalid_depstring_notice(pkg, |
3864 |
+ pkg.metadata["PROVIDE"], str(e)) |
3865 |
+ del e |
3866 |
+ required_sets['__excluded__'].add("=" + pkg.cpv) |
3867 |
+ |
3868 |
+ success = resolver._complete_graph(required_sets={myroot:required_sets}) |
3869 |
+ writemsg_level("\b\b... done!\n") |
3870 |
+ |
3871 |
+ resolver.display_problems() |
3872 |
+ |
3873 |
+ if not success: |
3874 |
+ return True, [], False, 0, [] |
3875 |
+ |
3876 |
+ def unresolved_deps(): |
3877 |
+ |
3878 |
+ unresolvable = set() |
3879 |
+ for dep in resolver._dynamic_config._initially_unsatisfied_deps: |
3880 |
+ if isinstance(dep.parent, Package) and \ |
3881 |
+ (dep.priority > UnmergeDepPriority.SOFT): |
3882 |
+ unresolvable.add((dep.atom, dep.parent.cpv)) |
3883 |
+ |
3884 |
+ if not unresolvable: |
3885 |
+ return None |
3886 |
+ |
3887 |
+ if unresolvable and not allow_missing_deps: |
3888 |
+ |
3889 |
+ prefix = bad(" * ") |
3890 |
+ msg = [] |
3891 |
+ msg.append("Dependencies could not be completely resolved due to") |
3892 |
+ msg.append("the following required packages not being installed:") |
3893 |
+ msg.append("") |
3894 |
+ for atom, parent in unresolvable: |
3895 |
+ msg.append(" %s pulled in by:" % (atom,)) |
3896 |
+ msg.append(" %s" % (parent,)) |
3897 |
+ msg.append("") |
3898 |
+ msg.extend(textwrap.wrap( |
3899 |
+ "Have you forgotten to do a complete update prior " + \ |
3900 |
+ "to depclean? The most comprehensive command for this " + \ |
3901 |
+ "purpose is as follows:", 65 |
3902 |
+ )) |
3903 |
+ msg.append("") |
3904 |
+ msg.append(" " + \ |
3905 |
+ good("emerge --update --newuse --deep --with-bdeps=y @world")) |
3906 |
+ msg.append("") |
3907 |
+ msg.extend(textwrap.wrap( |
3908 |
+ "Note that the --with-bdeps=y option is not required in " + \ |
3909 |
+ "many situations. Refer to the emerge manual page " + \ |
3910 |
+ "(run `man emerge`) for more information about " + \ |
3911 |
+ "--with-bdeps.", 65 |
3912 |
+ )) |
3913 |
+ msg.append("") |
3914 |
+ msg.extend(textwrap.wrap( |
3915 |
+ "Also, note that it may be necessary to manually uninstall " + \ |
3916 |
+ "packages that no longer exist in the portage tree, since " + \ |
3917 |
+ "it may not be possible to satisfy their dependencies.", 65 |
3918 |
+ )) |
3919 |
+ if action == "prune": |
3920 |
+ msg.append("") |
3921 |
+ msg.append("If you would like to ignore " + \ |
3922 |
+ "dependencies then use %s." % good("--nodeps")) |
3923 |
+ writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg), |
3924 |
+ level=logging.ERROR, noiselevel=-1) |
3925 |
+ return unresolvable |
3926 |
+ return None |
3927 |
+ |
3928 |
+ unresolvable = unresolved_deps() |
3929 |
+ if not unresolvable is None: |
3930 |
+ return False, [], False, 0, unresolvable |
3931 |
+ |
3932 |
+ graph = resolver._dynamic_config.digraph.copy() |
3933 |
+ required_pkgs_total = 0 |
3934 |
+ for node in graph: |
3935 |
+ if isinstance(node, Package): |
3936 |
+ required_pkgs_total += 1 |
3937 |
+ |
3938 |
+ def show_parents(child_node): |
3939 |
+ parent_nodes = graph.parent_nodes(child_node) |
3940 |
+ if not parent_nodes: |
3941 |
+ # With --prune, the highest version can be pulled in without any |
3942 |
+ # real parent since all installed packages are pulled in. In that |
3943 |
+ # case there's nothing to show here. |
3944 |
+ return |
3945 |
+ parent_strs = [] |
3946 |
+ for node in parent_nodes: |
3947 |
+ parent_strs.append(str(getattr(node, "cpv", node))) |
3948 |
+ parent_strs.sort() |
3949 |
+ msg = [] |
3950 |
+ msg.append(" %s pulled in by:\n" % (child_node.cpv,)) |
3951 |
+ for parent_str in parent_strs: |
3952 |
+ msg.append(" %s\n" % (parent_str,)) |
3953 |
+ msg.append("\n") |
3954 |
+ portage.writemsg_stdout("".join(msg), noiselevel=-1) |
3955 |
+ |
3956 |
+ def cmp_pkg_cpv(pkg1, pkg2): |
3957 |
+ """Sort Package instances by cpv.""" |
3958 |
+ if pkg1.cpv > pkg2.cpv: |
3959 |
+ return 1 |
3960 |
+ elif pkg1.cpv == pkg2.cpv: |
3961 |
+ return 0 |
3962 |
+ else: |
3963 |
+ return -1 |
3964 |
+ |
3965 |
+ def create_cleanlist(): |
3966 |
+ |
3967 |
+ # Never display the special internal protected_set. |
3968 |
+ for node in graph: |
3969 |
+ if isinstance(node, SetArg) and node.name == protected_set_name: |
3970 |
+ graph.remove(node) |
3971 |
+ break |
3972 |
+ |
3973 |
+ pkgs_to_remove = [] |
3974 |
+ |
3975 |
+ if action == "depclean": |
3976 |
+ if args_set: |
3977 |
+ |
3978 |
+ for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)): |
3979 |
+ arg_atom = None |
3980 |
+ try: |
3981 |
+ arg_atom = args_set.findAtomForPackage(pkg) |
3982 |
+ except portage.exception.InvalidDependString: |
3983 |
+ # this error has already been displayed by now |
3984 |
+ continue |
3985 |
+ |
3986 |
+ if arg_atom: |
3987 |
+ if pkg not in graph: |
3988 |
+ pkgs_to_remove.append(pkg) |
3989 |
+ elif "--verbose" in myopts: |
3990 |
+ show_parents(pkg) |
3991 |
+ |
3992 |
+ else: |
3993 |
+ for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)): |
3994 |
+ if pkg not in graph: |
3995 |
+ pkgs_to_remove.append(pkg) |
3996 |
+ elif "--verbose" in myopts: |
3997 |
+ show_parents(pkg) |
3998 |
+ |
3999 |
+ elif action == "prune": |
4000 |
+ |
4001 |
+ for atom in args_set: |
4002 |
+ for pkg in vardb.match_pkgs(atom): |
4003 |
+ if pkg not in graph: |
4004 |
+ pkgs_to_remove.append(pkg) |
4005 |
+ elif "--verbose" in myopts: |
4006 |
+ show_parents(pkg) |
4007 |
+ |
4008 |
+ return pkgs_to_remove |
4009 |
+ |
4010 |
+ cleanlist = create_cleanlist() |
4011 |
+ clean_set = set(cleanlist) |
4012 |
+ |
4013 |
+ if cleanlist and \ |
4014 |
+ real_vardb._linkmap is not None and \ |
4015 |
+ myopts.get("--depclean-lib-check") != "n" and \ |
4016 |
+ "preserve-libs" not in settings.features: |
4017 |
+ |
4018 |
+ # Check if any of these packages are the sole providers of libraries |
4019 |
+ # with consumers that have not been selected for removal. If so, these |
4020 |
+ # packages and any dependencies need to be added to the graph. |
4021 |
+ linkmap = real_vardb._linkmap |
4022 |
+ consumer_cache = {} |
4023 |
+ provider_cache = {} |
4024 |
+ consumer_map = {} |
4025 |
+ |
4026 |
+ writemsg_level(">>> Checking for lib consumers...\n") |
4027 |
+ |
4028 |
+ for pkg in cleanlist: |
4029 |
+ pkg_dblink = real_vardb._dblink(pkg.cpv) |
4030 |
+ consumers = {} |
4031 |
+ |
4032 |
+ for lib in pkg_dblink.getcontents(): |
4033 |
+ lib = lib[len(myroot):] |
4034 |
+ lib_key = linkmap._obj_key(lib) |
4035 |
+ lib_consumers = consumer_cache.get(lib_key) |
4036 |
+ if lib_consumers is None: |
4037 |
+ try: |
4038 |
+ lib_consumers = linkmap.findConsumers(lib_key) |
4039 |
+ except KeyError: |
4040 |
+ continue |
4041 |
+ consumer_cache[lib_key] = lib_consumers |
4042 |
+ if lib_consumers: |
4043 |
+ consumers[lib_key] = lib_consumers |
4044 |
+ |
4045 |
+ if not consumers: |
4046 |
+ continue |
4047 |
+ |
4048 |
+ for lib, lib_consumers in list(consumers.items()): |
4049 |
+ for consumer_file in list(lib_consumers): |
4050 |
+ if pkg_dblink.isowner(consumer_file): |
4051 |
+ lib_consumers.remove(consumer_file) |
4052 |
+ if not lib_consumers: |
4053 |
+ del consumers[lib] |
4054 |
+ |
4055 |
+ if not consumers: |
4056 |
+ continue |
4057 |
+ |
4058 |
+ for lib, lib_consumers in consumers.items(): |
4059 |
+ |
4060 |
+ soname = linkmap.getSoname(lib) |
4061 |
+ |
4062 |
+ consumer_providers = [] |
4063 |
+ for lib_consumer in lib_consumers: |
4064 |
+ providers = provider_cache.get(lib) |
4065 |
+ if providers is None: |
4066 |
+ providers = linkmap.findProviders(lib_consumer) |
4067 |
+ provider_cache[lib_consumer] = providers |
4068 |
+ if soname not in providers: |
4069 |
+ # Why does this happen? |
4070 |
+ continue |
4071 |
+ consumer_providers.append( |
4072 |
+ (lib_consumer, providers[soname])) |
4073 |
+ |
4074 |
+ consumers[lib] = consumer_providers |
4075 |
+ |
4076 |
+ consumer_map[pkg] = consumers |
4077 |
+ |
4078 |
+ if consumer_map: |
4079 |
+ |
4080 |
+ search_files = set() |
4081 |
+ for consumers in consumer_map.values(): |
4082 |
+ for lib, consumer_providers in consumers.items(): |
4083 |
+ for lib_consumer, providers in consumer_providers: |
4084 |
+ search_files.add(lib_consumer) |
4085 |
+ search_files.update(providers) |
4086 |
+ |
4087 |
+ writemsg_level(">>> Assigning files to packages...\n") |
4088 |
+ file_owners = real_vardb._owners.getFileOwnerMap(search_files) |
4089 |
+ |
4090 |
+ for pkg, consumers in list(consumer_map.items()): |
4091 |
+ for lib, consumer_providers in list(consumers.items()): |
4092 |
+ lib_consumers = set() |
4093 |
+ |
4094 |
+ for lib_consumer, providers in consumer_providers: |
4095 |
+ owner_set = file_owners.get(lib_consumer) |
4096 |
+ provider_dblinks = set() |
4097 |
+ provider_pkgs = set() |
4098 |
+ |
4099 |
+ if len(providers) > 1: |
4100 |
+ for provider in providers: |
4101 |
+ provider_set = file_owners.get(provider) |
4102 |
+ if provider_set is not None: |
4103 |
+ provider_dblinks.update(provider_set) |
4104 |
+ |
4105 |
+ if len(provider_dblinks) > 1: |
4106 |
+ for provider_dblink in provider_dblinks: |
4107 |
+ provider_pkg = resolver._pkg( |
4108 |
+ provider_dblink.mycpv, "installed", |
4109 |
+ root_config, installed=True) |
4110 |
+ if provider_pkg not in clean_set: |
4111 |
+ provider_pkgs.add(provider_pkg) |
4112 |
+ |
4113 |
+ if provider_pkgs: |
4114 |
+ continue |
4115 |
+ |
4116 |
+ if owner_set is not None: |
4117 |
+ lib_consumers.update(owner_set) |
4118 |
+ |
4119 |
+ for consumer_dblink in list(lib_consumers): |
4120 |
+ if resolver._pkg(consumer_dblink.mycpv, "installed", |
4121 |
+ root_config, installed=True) in clean_set: |
4122 |
+ lib_consumers.remove(consumer_dblink) |
4123 |
+ continue |
4124 |
+ |
4125 |
+ if lib_consumers: |
4126 |
+ consumers[lib] = lib_consumers |
4127 |
+ else: |
4128 |
+ del consumers[lib] |
4129 |
+ if not consumers: |
4130 |
+ del consumer_map[pkg] |
4131 |
+ |
4132 |
+ if consumer_map: |
4133 |
+ # TODO: Implement a package set for rebuilding consumer packages. |
4134 |
+ |
4135 |
+ msg = "In order to avoid breakage of link level " + \ |
4136 |
+ "dependencies, one or more packages will not be removed. " + \ |
4137 |
+ "This can be solved by rebuilding " + \ |
4138 |
+ "the packages that pulled them in." |
4139 |
+ |
4140 |
+ prefix = bad(" * ") |
4141 |
+ from textwrap import wrap |
4142 |
+ writemsg_level("".join(prefix + "%s\n" % line for \ |
4143 |
+ line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1) |
4144 |
+ |
4145 |
+ msg = [] |
4146 |
+ for pkg in sorted(consumer_map, key=cmp_sort_key(cmp_pkg_cpv)): |
4147 |
+ consumers = consumer_map[pkg] |
4148 |
+ consumer_libs = {} |
4149 |
+ for lib, lib_consumers in consumers.items(): |
4150 |
+ for consumer in lib_consumers: |
4151 |
+ consumer_libs.setdefault( |
4152 |
+ consumer.mycpv, set()).add(linkmap.getSoname(lib)) |
4153 |
+ unique_consumers = set(chain(*consumers.values())) |
4154 |
+ unique_consumers = sorted(consumer.mycpv \ |
4155 |
+ for consumer in unique_consumers) |
4156 |
+ msg.append("") |
4157 |
+ msg.append(" %s pulled in by:" % (pkg.cpv,)) |
4158 |
+ for consumer in unique_consumers: |
4159 |
+ libs = consumer_libs[consumer] |
4160 |
+ msg.append(" %s needs %s" % \ |
4161 |
+ (consumer, ', '.join(sorted(libs)))) |
4162 |
+ msg.append("") |
4163 |
+ writemsg_level("".join(prefix + "%s\n" % line for line in msg), |
4164 |
+ level=logging.WARNING, noiselevel=-1) |
4165 |
+ |
4166 |
+ # Add lib providers to the graph as children of lib consumers, |
4167 |
+ # and also add any dependencies pulled in by the provider. |
4168 |
+ writemsg_level(">>> Adding lib providers to graph...\n") |
4169 |
+ |
4170 |
+ for pkg, consumers in consumer_map.items(): |
4171 |
+ for consumer_dblink in set(chain(*consumers.values())): |
4172 |
+ consumer_pkg = resolver._pkg(consumer_dblink.mycpv, |
4173 |
+ "installed", root_config, installed=True) |
4174 |
+ if not resolver._add_pkg(pkg, |
4175 |
+ Dependency(parent=consumer_pkg, |
4176 |
+ priority=UnmergeDepPriority(runtime=True), |
4177 |
+ root=pkg.root)): |
4178 |
+ resolver.display_problems() |
4179 |
+ return True, [], False, 0, [] |
4180 |
+ |
4181 |
+ writemsg_level("\nCalculating dependencies ") |
4182 |
+ success = resolver._complete_graph( |
4183 |
+ required_sets={myroot:required_sets}) |
4184 |
+ writemsg_level("\b\b... done!\n") |
4185 |
+ resolver.display_problems() |
4186 |
+ if not success: |
4187 |
+ return True, [], False, 0, [] |
4188 |
+ unresolvable = unresolved_deps() |
4189 |
+ if not unresolvable is None: |
4190 |
+ return False, [], False, 0, unresolvable |
4191 |
+ |
4192 |
+ graph = resolver._dynamic_config.digraph.copy() |
4193 |
+ required_pkgs_total = 0 |
4194 |
+ for node in graph: |
4195 |
+ if isinstance(node, Package): |
4196 |
+ required_pkgs_total += 1 |
4197 |
+ cleanlist = create_cleanlist() |
4198 |
+ if not cleanlist: |
4199 |
+ return 0, [], False, required_pkgs_total, unresolvable |
4200 |
+ clean_set = set(cleanlist) |
4201 |
+ |
4202 |
+ if clean_set: |
4203 |
+ writemsg_level(">>> Calculating removal order...\n") |
4204 |
+ # Use a topological sort to create an unmerge order such that |
4205 |
+ # each package is unmerged before it's dependencies. This is |
4206 |
+ # necessary to avoid breaking things that may need to run |
4207 |
+ # during pkg_prerm or pkg_postrm phases. |
4208 |
+ |
4209 |
+ # Create a new graph to account for dependencies between the |
4210 |
+ # packages being unmerged. |
4211 |
+ graph = digraph() |
4212 |
+ del cleanlist[:] |
4213 |
+ |
4214 |
+ dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"] |
4215 |
+ runtime = UnmergeDepPriority(runtime=True) |
4216 |
+ runtime_post = UnmergeDepPriority(runtime_post=True) |
4217 |
+ buildtime = UnmergeDepPriority(buildtime=True) |
4218 |
+ priority_map = { |
4219 |
+ "RDEPEND": runtime, |
4220 |
+ "PDEPEND": runtime_post, |
4221 |
+ "DEPEND": buildtime, |
4222 |
+ } |
4223 |
+ |
4224 |
+ for node in clean_set: |
4225 |
+ graph.add(node, None) |
4226 |
+ mydeps = [] |
4227 |
+ for dep_type in dep_keys: |
4228 |
+ depstr = node.metadata[dep_type] |
4229 |
+ if not depstr: |
4230 |
+ continue |
4231 |
+ priority = priority_map[dep_type] |
4232 |
+ |
4233 |
+ try: |
4234 |
+ atoms = resolver._select_atoms(myroot, depstr, |
4235 |
+ myuse=node.use.enabled, parent=node, |
4236 |
+ priority=priority)[node] |
4237 |
+ except portage.exception.InvalidDependString: |
4238 |
+ # Ignore invalid deps of packages that will |
4239 |
+ # be uninstalled anyway. |
4240 |
+ continue |
4241 |
+ |
4242 |
+ for atom in atoms: |
4243 |
+ if not isinstance(atom, portage.dep.Atom): |
4244 |
+ # Ignore invalid atoms returned from dep_check(). |
4245 |
+ continue |
4246 |
+ if atom.blocker: |
4247 |
+ continue |
4248 |
+ matches = vardb.match_pkgs(atom) |
4249 |
+ if not matches: |
4250 |
+ continue |
4251 |
+ for child_node in matches: |
4252 |
+ if child_node in clean_set: |
4253 |
+ graph.add(child_node, node, priority=priority) |
4254 |
+ |
4255 |
+ ordered = True |
4256 |
+ if len(graph.order) == len(graph.root_nodes()): |
4257 |
+ # If there are no dependencies between packages |
4258 |
+ # let unmerge() group them by cat/pn. |
4259 |
+ ordered = False |
4260 |
+ cleanlist = [pkg.cpv for pkg in graph.order] |
4261 |
+ else: |
4262 |
+ # Order nodes from lowest to highest overall reference count for |
4263 |
+ # optimal root node selection (this can help minimize issues |
4264 |
+ # with unaccounted implicit dependencies). |
4265 |
+ node_refcounts = {} |
4266 |
+ for node in graph.order: |
4267 |
+ node_refcounts[node] = len(graph.parent_nodes(node)) |
4268 |
+ def cmp_reference_count(node1, node2): |
4269 |
+ return node_refcounts[node1] - node_refcounts[node2] |
4270 |
+ graph.order.sort(key=cmp_sort_key(cmp_reference_count)) |
4271 |
+ |
4272 |
+ ignore_priority_range = [None] |
4273 |
+ ignore_priority_range.extend( |
4274 |
+ range(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1)) |
4275 |
+ while graph: |
4276 |
+ for ignore_priority in ignore_priority_range: |
4277 |
+ nodes = graph.root_nodes(ignore_priority=ignore_priority) |
4278 |
+ if nodes: |
4279 |
+ break |
4280 |
+ if not nodes: |
4281 |
+ raise AssertionError("no root nodes") |
4282 |
+ if ignore_priority is not None: |
4283 |
+ # Some deps have been dropped due to circular dependencies, |
4284 |
+ # so only pop one node in order to minimize the number that |
4285 |
+ # are dropped. |
4286 |
+ del nodes[1:] |
4287 |
+ for node in nodes: |
4288 |
+ graph.remove(node) |
4289 |
+ cleanlist.append(node.cpv) |
4290 |
+ |
4291 |
+ return True, cleanlist, ordered, required_pkgs_total, [] |
4292 |
+ return True, [], False, required_pkgs_total, [] |
4293 |
|
4294 |
diff --git a/gobs/pym/flags.py~ b/gobs/pym/flags.py~ |
4295 |
new file mode 100644 |
4296 |
index 0000000..7ccf90b |
4297 |
--- /dev/null |
4298 |
+++ b/gobs/pym/flags.py~ |
4299 |
@@ -0,0 +1,219 @@ |
4300 |
+from __future__ import print_function |
4301 |
+from _emerge.main import parse_opts |
4302 |
+from _emerge.depgraph import backtrack_depgraph, depgraph, resume_depgraph |
4303 |
+from _emerge.create_depgraph_params import create_depgraph_params |
4304 |
+from _emerge.actions import load_emerge_config |
4305 |
+import portage |
4306 |
+import os |
4307 |
+ |
4308 |
+class gobs_use_flags(object): |
4309 |
+ |
4310 |
+ def __init__(self, mysettings, myportdb, cpv): |
4311 |
+ self._mysettings = mysettings |
4312 |
+ self._myportdb = myportdb |
4313 |
+ self._cpv = cpv |
4314 |
+ |
4315 |
+ def get_iuse(self): |
4316 |
+ """Gets the current IUSE flags from the tree |
4317 |
+ To be used when a gentoolkit package object is not needed |
4318 |
+ @type: cpv: string |
4319 |
+ @param cpv: cat/pkg-ver |
4320 |
+ @rtype list |
4321 |
+ @returns [] or the list of IUSE flags |
4322 |
+ """ |
4323 |
+ return self._myportdb.aux_get(self._cpv, ["IUSE"])[0].split() |
4324 |
+ |
4325 |
+ def reduce_flag(self, flag): |
4326 |
+ """Absolute value function for a USE flag |
4327 |
+ @type flag: string |
4328 |
+ @param flag: the use flag to absolute. |
4329 |
+ @rtype: string |
4330 |
+ @return absolute USE flag |
4331 |
+ """ |
4332 |
+ if flag[0] in ["+","-"]: |
4333 |
+ return flag[1:] |
4334 |
+ else: |
4335 |
+ return flag |
4336 |
+ |
4337 |
+ def reduce_flags(self, the_list): |
4338 |
+ """Absolute value function for a USE flag list |
4339 |
+ @type the_list: list |
4340 |
+ @param the_list: the use flags to absolute. |
4341 |
+ @rtype: list |
4342 |
+ @return absolute USE flags |
4343 |
+ """ |
4344 |
+ r=[] |
4345 |
+ for member in the_list: |
4346 |
+ r.append(self.reduce_flag(member)) |
4347 |
+ return r |
4348 |
+ |
4349 |
+ def filter_flags(self, use, use_expand_hidden, usemasked, useforced): |
4350 |
+ """Filter function to remove hidden or otherwise not normally |
4351 |
+ visible USE flags from a list. |
4352 |
+ @type use: list |
4353 |
+ @param use: the USE flag list to be filtered. |
4354 |
+ @type use_expand_hidden: list |
4355 |
+ @param use_expand_hidden: list of flags hidden. |
4356 |
+ @type usemasked: list |
4357 |
+ @param usemasked: list of masked USE flags. |
4358 |
+ @type useforced: list |
4359 |
+ @param useforced: the forced USE flags. |
4360 |
+ @rtype: list |
4361 |
+ @return the filtered USE flags. |
4362 |
+ """ |
4363 |
+ # clean out some environment flags, since they will most probably |
4364 |
+ # be confusing for the user |
4365 |
+ for f in use_expand_hidden: |
4366 |
+ f=f.lower() + "_" |
4367 |
+ for x in use: |
4368 |
+ if f in x: |
4369 |
+ use.remove(x) |
4370 |
+ # clean out any arch's |
4371 |
+ archlist = self._mysettings["PORTAGE_ARCHLIST"].split() |
4372 |
+ for a in use[:]: |
4373 |
+ if a in archlist: |
4374 |
+ use.remove(a) |
4375 |
+ # dbl check if any from usemasked or useforced are still there |
4376 |
+ masked = usemasked + useforced |
4377 |
+ for a in use[:]: |
4378 |
+ if a in masked: |
4379 |
+ use.remove(a) |
4380 |
+ return use |
4381 |
+ |
4382 |
+ def get_all_cpv_use(self): |
4383 |
+ """Uses portage to determine final USE flags and settings for an emerge |
4384 |
+ @type cpv: string |
4385 |
+ @param cpv: eg cat/pkg-ver |
4386 |
+ @rtype: lists |
4387 |
+ @return use, use_expand_hidden, usemask, useforce |
4388 |
+ """ |
4389 |
+ use = None |
4390 |
+ self._mysettings.unlock() |
4391 |
+ try: |
4392 |
+ self._mysettings.setcpv(self._cpv, use_cache=None, mydb=self._myportdb) |
4393 |
+ use = self._mysettings['PORTAGE_USE'].split() |
4394 |
+ use_expand_hidden = self._mysettings["USE_EXPAND_HIDDEN"].split() |
4395 |
+ usemask = list(self._mysettings.usemask) |
4396 |
+ useforce = list(self._mysettings.useforce) |
4397 |
+ except KeyError: |
4398 |
+ self._mysettings.reset() |
4399 |
+ self._mysettings.lock() |
4400 |
+ return [], [], [], [] |
4401 |
+ # reset cpv filter |
4402 |
+ self._mysettings.reset() |
4403 |
+ self._mysettings.lock() |
4404 |
+ return use, use_expand_hidden, usemask, useforce |
4405 |
+ |
4406 |
+ def get_all_cpv_use_looked(self): |
4407 |
+ """Uses portage to determine final USE flags and settings for an emerge |
4408 |
+ @type cpv: string |
4409 |
+ @param cpv: eg cat/pkg-ver |
4410 |
+ @rtype: lists |
4411 |
+ @return use, use_expand_hidden, usemask, useforce |
4412 |
+ """ |
4413 |
+ # use = self._mysettings['PORTAGE_USE'].split() |
4414 |
+ use = os.environ['USE'].split() |
4415 |
+ use_expand_hidden = self._mysettings["USE_EXPAND_HIDDEN"].split() |
4416 |
+ usemask = list(self._mysettings.usemask) |
4417 |
+ useforce = list(self._mysettings.useforce) |
4418 |
+ return use, use_expand_hidden, usemask, useforce |
4419 |
+ |
4420 |
+ def get_all_cpv_use_pkg(self, pkg, settings): |
4421 |
+ """Uses portage to determine final USE flags and settings for an emerge |
4422 |
+ @type cpv: string |
4423 |
+ @param cpv: eg cat/pkg-ver |
4424 |
+ @rtype: lists |
4425 |
+ @return use, use_expand_hidden, usemask, useforce |
4426 |
+ """ |
4427 |
+ # use = self._mysettings['PORTAGE_USE'].split() |
4428 |
+ use_list = list(pkg.use.enabled) |
4429 |
+ use_expand_hidden = settings["USE_EXPAND_HIDDEN"].split() |
4430 |
+ usemask = list(settings.usemask) |
4431 |
+ useforced = list(settings.useforce) |
4432 |
+ return use_list, use_expand_hidden, usemask, useforced |
4433 |
+ |
4434 |
+ def get_flags(self): |
4435 |
+ """Retrieves all information needed to filter out hidden, masked, etc. |
4436 |
+ USE flags for a given package. |
4437 |
+ |
4438 |
+ @type cpv: string |
4439 |
+ @param cpv: eg. cat/pkg-ver |
4440 |
+ @type final_setting: boolean |
4441 |
+ @param final_setting: used to also determine the final |
4442 |
+ enviroment USE flag settings and return them as well. |
4443 |
+ @rtype: list or list, list |
4444 |
+ @return IUSE or IUSE, final_flags |
4445 |
+ """ |
4446 |
+ final_use, use_expand_hidden, usemasked, useforced = self.get_all_cpv_use() |
4447 |
+ iuse_flags = self.filter_flags(self.get_iuse(), use_expand_hidden, usemasked, useforced) |
4448 |
+ #flags = filter_flags(use_flags, use_expand_hidden, usemasked, useforced) |
4449 |
+ final_flags = self.filter_flags(final_use, use_expand_hidden, usemasked, useforced) |
4450 |
+ return iuse_flags, final_flags |
4451 |
+ |
4452 |
+ def get_flags_looked(self): |
4453 |
+ """Retrieves all information needed to filter out hidden, masked, etc. |
4454 |
+ USE flags for a given package. |
4455 |
+ |
4456 |
+ @type cpv: string |
4457 |
+ @param cpv: eg. cat/pkg-ver |
4458 |
+ @type final_setting: boolean |
4459 |
+ @param final_setting: used to also determine the final |
4460 |
+ enviroment USE flag settings and return them as well. |
4461 |
+ @rtype: list or list, list |
4462 |
+ @return IUSE or IUSE, final_flags |
4463 |
+ """ |
4464 |
+ final_use, use_expand_hidden, usemasked, useforced = self.get_all_cpv_use_looked() |
4465 |
+ iuse_flags = self.filter_flags(self.get_iuse(), use_expand_hidden, usemasked, useforced) |
4466 |
+ #flags = filter_flags(use_flags, use_expand_hidden, usemasked, useforced) |
4467 |
+ final_flags = self.filter_flags(final_use, use_expand_hidden, usemasked, useforced) |
4468 |
+ return iuse_flags, final_flags |
4469 |
+ |
4470 |
+ def get_flags_pkg(self, pkg, settings): |
4471 |
+ """Retrieves all information needed to filter out hidden, masked, etc. |
4472 |
+ USE flags for a given package. |
4473 |
+ @type cpv: string |
4474 |
+ @param cpv: eg. cat/pkg-ver |
4475 |
+ @type final_setting: boolean |
4476 |
+ @param final_setting: used to also determine the final |
4477 |
+ enviroment USE flag settings and return them as well. |
4478 |
+ @rtype: list or list, list |
4479 |
+ @return IUSE or IUSE, final_flags |
4480 |
+ """ |
4481 |
+ final_use, use_expand_hidden, usemasked, useforced = self.get_all_cpv_use_pkg(pkg, settings) |
4482 |
+ iuse_flags = self.filter_flags(list(pkg.iuse.all), use_expand_hidden, usemasked, useforced) |
4483 |
+ #flags = filter_flags(use_flags, use_expand_hidden, usemasked, useforced) |
4484 |
+ final_flags = self.filter_flags(final_use, use_expand_hidden, usemasked, useforced) |
4485 |
+ return iuse_flags, final_flags |
4486 |
+ |
4487 |
+ def comper_useflags(self, build_dict): |
4488 |
+ iuse_flags, use_enable = self.get_flags() |
4489 |
+ iuse = [] |
4490 |
+ print("use_enable", use_enable) |
4491 |
+ build_use_flags_dict = build_dict['build_useflags'] |
4492 |
+ print("build_use_flags_dict", build_use_flags_dict) |
4493 |
+ build_use_flags_list = [] |
4494 |
+ if use_enable == []: |
4495 |
+ if build_use_flags_dict is None: |
4496 |
+ return None |
4497 |
+ for iuse_line in iuse_flags: |
4498 |
+ iuse.append(self.reduce_flag(iuse_line)) |
4499 |
+ iuse_flags_list = list(set(iuse)) |
4500 |
+ use_disable = list(set(iuse_flags_list).difference(set(use_enable))) |
4501 |
+ use_flagsDict = {} |
4502 |
+ for x in use_enable: |
4503 |
+ use_flagsDict[x] = True |
4504 |
+ for x in use_disable: |
4505 |
+ use_flagsDict[x] = False |
4506 |
+ print("use_flagsDict", use_flagsDict) |
4507 |
+ for k, v in use_flagsDict.iteritems(): |
4508 |
+ print("tree use flags", k, v) |
4509 |
+ print("db use flags", k, build_use_flags_dict[k]) |
4510 |
+ if build_use_flags_dict[k] != v: |
4511 |
+ if build_use_flags_dict[k] is True: |
4512 |
+ build_use_flags_list.append(k) |
4513 |
+ if build_use_flags_dict[k] is False: |
4514 |
+ build_use_flags_list.append("-" + k) |
4515 |
+ if build_use_flags_list == []: |
4516 |
+ build_use_flags_list = None |
4517 |
+ print(build_use_flags_list) |
4518 |
+ return build_use_flags_list |
4519 |
|
4520 |
diff --git a/gobs/pym/init_setup_profile.py~ b/gobs/pym/init_setup_profile.py~ |
4521 |
new file mode 100644 |
4522 |
index 0000000..e647e1f |
4523 |
--- /dev/null |
4524 |
+++ b/gobs/pym/init_setup_profile.py~ |
4525 |
@@ -0,0 +1,86 @@ |
4526 |
+#!/usr/bin/python |
4527 |
+# Copyright 2006-2011 Gentoo Foundation |
4528 |
+# Distributed under the terms of the GNU General Public License v2 |
4529 |
+ |
4530 |
+""" This code will update the sql backend with needed info for |
4531 |
+ the Frontend and the Guest deamon. """ |
4532 |
+ |
4533 |
+import sys |
4534 |
+import os |
4535 |
+ |
4536 |
+# Get the options from the config file set in gobs.readconf |
4537 |
+from gobs.readconf import get_conf_settings |
4538 |
+reader=get_conf_settings() |
4539 |
+gobs_settings_dict=reader.read_gobs_settings_all() |
4540 |
+# make a CM |
4541 |
+from gobs.ConnectionManager import connectionManager |
4542 |
+CM=connectionManager(gobs_settings_dict) |
4543 |
+#selectively import the pgsql/mysql querys |
4544 |
+if CM.getName()=='pgsql': |
4545 |
+ from gobs.pgsql import * |
4546 |
+ |
4547 |
+from gobs.check_setup import check_make_conf, git_pull |
4548 |
+from gobs.package import gobs_package |
4549 |
+import portage |
4550 |
+ |
4551 |
+def setup_profile_main(args=None): |
4552 |
+ """ |
4553 |
+ @param args: command arguments (default: sys.argv[1:]) |
4554 |
+ @type args: list |
4555 |
+ """ |
4556 |
+ conn=CM.getConnection() |
4557 |
+ if args is None: |
4558 |
+ args = sys.argv[1:] |
4559 |
+ if args[0] == "-add": |
4560 |
+ git_pull() |
4561 |
+ check_make_conf() |
4562 |
+ print "Check configs done" |
4563 |
+ # Get default config from the configs table and default_config=1 |
4564 |
+ config_id = args[1] |
4565 |
+ default_config_root = "/var/lib/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/" + config_id + "/" |
4566 |
+ # Set config_root (PORTAGE_CONFIGROOT) to default_config_root |
4567 |
+ mysettings = portage.config(config_root = default_config_root) |
4568 |
+ myportdb = portage.portdbapi(mysettings=mysettings) |
4569 |
+ init_package = gobs_package(mysettings, myportdb) |
4570 |
+ # get the cp list |
4571 |
+ package_list_tree = package_list_tree = myportdb.cp_all() |
4572 |
+ print "Setting default config to:", config_id |
4573 |
+ config_id_list = [] |
4574 |
+ config_id_list.append(config_id) |
4575 |
+ for package_line in sorted(package_list_tree): |
4576 |
+ # FIXME: remove the check for gobs when in tree |
4577 |
+ if package_line != "dev-python/gobs": |
4578 |
+ build_dict = {} |
4579 |
+ packageDict = {} |
4580 |
+ ebuild_id_list = [] |
4581 |
+ # split the cp to categories and package |
4582 |
+ element = package_line.split('/') |
4583 |
+ categories = element[0] |
4584 |
+ package = element[1] |
4585 |
+ print "C", categories + "/" + package # C = Checking |
4586 |
+ pkgdir = mysettings['PORTDIR'] + "/" + categories + "/" + package |
4587 |
+ config_cpv_listDict = init_package.config_match_ebuild(categories, package, config_id_list) |
4588 |
+ if config_cpv_listDict != {}: |
4589 |
+ cpv = categories + "/" + package + "-" + config_cpv_listDict[config_id]['ebuild_version'] |
4590 |
+ attDict = {} |
4591 |
+ attDict['categories'] = categories |
4592 |
+ attDict['package'] = package |
4593 |
+ attDict['ebuild_version_tree'] = config_cpv_listDict[config_id]['ebuild_version'] |
4594 |
+ packageDict[cpv] = attDict |
4595 |
+ build_dict['checksum'] = portage.checksum.sha256hash(pkgdir + "/" + package + "-" + config_cpv_listDict[config_id]['ebuild_version'] + ".ebuild")[0] |
4596 |
+ build_dict['package_id'] = have_package_db(conn, categories, package)[0] |
4597 |
+ build_dict['ebuild_version'] = config_cpv_listDict[config_id]['ebuild_version'] |
4598 |
+ ebuild_id = get_ebuild_id_db_checksum(conn, build_dict) |
4599 |
+ if ebuild_id is not None: |
4600 |
+ ebuild_id_list.append(ebuild_id) |
4601 |
+ init_package.add_new_ebuild_buildquery_db(ebuild_id_list, packageDict, config_cpv_listDict) |
4602 |
+ |
4603 |
+ if args[0] == "-del": |
4604 |
+ config_id = args[1] |
4605 |
+ querue_id_list = get_queue_id_list_config(conn, config_id) |
4606 |
+ if querue_id_list is not None: |
4607 |
+ for querue_id in querue_id_list: |
4608 |
+ del_old_queue(conn, querue_id) |
4609 |
+ CM.putConnection(conn) |
4610 |
+ |
4611 |
+ |
4612 |
\ No newline at end of file |
4613 |
|
4614 |
diff --git a/gobs/pym/manifest.py~ b/gobs/pym/manifest.py~ |
4615 |
new file mode 100644 |
4616 |
index 0000000..fb29f0a |
4617 |
--- /dev/null |
4618 |
+++ b/gobs/pym/manifest.py~ |
4619 |
@@ -0,0 +1,124 @@ |
4620 |
+import os |
4621 |
+import warnings |
4622 |
+from portage import os, _encodings, _unicode_decode |
4623 |
+from portage.exception import DigestException, FileNotFound |
4624 |
+from portage.localization import _ |
4625 |
+from portage.manifest import Manifest |
4626 |
+import portage |
4627 |
+ |
4628 |
+class gobs_manifest(object): |
4629 |
+ |
4630 |
+ def __init__ (self, mysettings, pkgdir): |
4631 |
+ self._mysettings = mysettings |
4632 |
+ self._pkgdir = pkgdir |
4633 |
+ |
4634 |
+ # Copy of portage.digestcheck() but without the writemsg() stuff |
4635 |
+ def digestcheck(self): |
4636 |
+ """ |
4637 |
+ Verifies checksums. Assumes all files have been downloaded. |
4638 |
+ @rtype: int |
4639 |
+ @returns: None on success and error msg on failure |
4640 |
+ """ |
4641 |
+ |
4642 |
+ myfiles = [] |
4643 |
+ justmanifest = None |
4644 |
+ self._mysettings['PORTAGE_QUIET'] = '1' |
4645 |
+ |
4646 |
+ if self._mysettings.get("EBUILD_SKIP_MANIFEST") == "1": |
4647 |
+ return None |
4648 |
+ manifest_path = os.path.join(self._pkgdir, "Manifest") |
4649 |
+ if not os.path.exists(manifest_path): |
4650 |
+ return ("!!! Manifest file not found: '%s'") % manifest_path |
4651 |
+ mf = Manifest(self._pkgdir, self._mysettings["DISTDIR"]) |
4652 |
+ manifest_empty = True |
4653 |
+ for d in mf.fhashdict.values(): |
4654 |
+ if d: |
4655 |
+ manifest_empty = False |
4656 |
+ break |
4657 |
+ if manifest_empty: |
4658 |
+ return ("!!! Manifest is empty: '%s'") % manifest_path |
4659 |
+ try: |
4660 |
+ if "PORTAGE_PARALLEL_FETCHONLY" not in self._mysettings: |
4661 |
+ mf.checkTypeHashes("EBUILD") |
4662 |
+ mf.checkTypeHashes("AUX") |
4663 |
+ mf.checkTypeHashes("MISC", ignoreMissingFiles=True) |
4664 |
+ for f in myfiles: |
4665 |
+ ftype = mf.findFile(f) |
4666 |
+ if ftype is None: |
4667 |
+ return ("!!! Missing digest for '%s'") % (f,) |
4668 |
+ mf.checkFileHashes(ftype, f) |
4669 |
+ except FileNotFound as e: |
4670 |
+ return ("!!! A file listed in the Manifest could not be found: %s") % str(e) |
4671 |
+ except DigestException as e: |
4672 |
+ return ("!!! Digest verification failed: %s\nReason: %s\nGot: %s\nExpected: %s") \ |
4673 |
+ % (e.value[0], e.value[1], e.value[2], e.value[3]) |
4674 |
+ # Make sure that all of the ebuilds are actually listed in the Manifest. |
4675 |
+ for f in os.listdir(self._pkgdir): |
4676 |
+ pf = None |
4677 |
+ if f[-7:] == '.ebuild': |
4678 |
+ pf = f[:-7] |
4679 |
+ if pf is not None and not mf.hasFile("EBUILD", f): |
4680 |
+ return ("!!! A file is not listed in the Manifest: '%s'") \ |
4681 |
+ % os.path.join(pkgdir, f) |
4682 |
+ """ epatch will just grab all the patches out of a directory, so we have to |
4683 |
+ make sure there aren't any foreign files that it might grab.""" |
4684 |
+ filesdir = os.path.join(self._pkgdir, "files") |
4685 |
+ for parent, dirs, files in os.walk(filesdir): |
4686 |
+ try: |
4687 |
+ parent = _unicode_decode(parent, |
4688 |
+ encoding=_encodings['fs'], errors='strict') |
4689 |
+ except UnicodeDecodeError: |
4690 |
+ parent = _unicode_decode(parent, encoding=_encodings['fs'], errors='replace') |
4691 |
+ return ("!!! Path contains invalid character(s) for encoding '%s': '%s'") \ |
4692 |
+ % (_encodings['fs'], parent) |
4693 |
+ for d in dirs: |
4694 |
+ d_bytes = d |
4695 |
+ try: |
4696 |
+ d = _unicode_decode(d, encoding=_encodings['fs'], errors='strict') |
4697 |
+ except UnicodeDecodeError: |
4698 |
+ d = _unicode_decode(d, encoding=_encodings['fs'], errors='replace') |
4699 |
+ return ("!!! Path contains invalid character(s) for encoding '%s': '%s'") \ |
4700 |
+ % (_encodings['fs'], os.path.join(parent, d)) |
4701 |
+ if d.startswith(".") or d == "CVS": |
4702 |
+ dirs.remove(d_bytes) |
4703 |
+ for f in files: |
4704 |
+ try: |
4705 |
+ f = _unicode_decode(f, encoding=_encodings['fs'], errors='strict') |
4706 |
+ except UnicodeDecodeError: |
4707 |
+ f = _unicode_decode(f, encoding=_encodings['fs'], errors='replace') |
4708 |
+ if f.startswith("."): |
4709 |
+ continue |
4710 |
+ f = os.path.join(parent, f)[len(filesdir) + 1:] |
4711 |
+ return ("!!! File name contains invalid character(s) for encoding '%s': '%s'") \ |
4712 |
+ % (_encodings['fs'], f) |
4713 |
+ if f.startswith("."): |
4714 |
+ continue |
4715 |
+ f = os.path.join(parent, f)[len(filesdir) + 1:] |
4716 |
+ file_type = mf.findFile(f) |
4717 |
+ if file_type != "AUX" and not f.startswith("digest-"): |
4718 |
+ return ("!!! A file is not listed in the Manifest: '%s'") \ |
4719 |
+ % os.path.join(filesdir, f) |
4720 |
+ return None |
4721 |
+ |
4722 |
+ def check_file_in_manifest(self, portdb, cpv, build_dict, build_use_flags_list): |
4723 |
+ myfetchlistdict = portage.FetchlistDict(self._pkgdir, self._mysettings, portdb) |
4724 |
+ my_manifest = portage.Manifest(self._pkgdir, self._mysettings['DISTDIR'], fetchlist_dict=myfetchlistdict, manifest1_compat=False, from_scratch=False) |
4725 |
+ if my_manifest.findFile(build_dict['package'] + "-" + build_dict['ebuild_version'] + ".ebuild") is None: |
4726 |
+ return "Ebuild file not found." |
4727 |
+ cpv_fetchmap = portdb.getFetchMap(cpv, useflags=build_use_flags_list, mytree=None) |
4728 |
+ self._mysettings.unlock() |
4729 |
+ try: |
4730 |
+ portage.fetch(cpv_fetchmap, self._mysettings, listonly=0, fetchonly=0, locks_in_subdir='.locks', use_locks=1, try_mirrors=1) |
4731 |
+ except: |
4732 |
+ self._mysettings.lock() |
4733 |
+ return "Can't fetch the file." |
4734 |
+ self._mysettings.lock() |
4735 |
+ try: |
4736 |
+ my_manifest.checkCpvHashes(cpv, checkDistfiles=True, onlyDistfiles=False, checkMiscfiles=True) |
4737 |
+ except: |
4738 |
+ return "Can't fetch the file or the hash failed." |
4739 |
+ try: |
4740 |
+ portdb.fetch_check(cpv, useflags=build_use_flags_list, mysettings=self._mysettings, all=False) |
4741 |
+ except: |
4742 |
+ return "Fetch check failed." |
4743 |
+ return None |
4744 |
\ No newline at end of file |
4745 |
|
4746 |
diff --git a/gobs/pym/old_cpv.py~ b/gobs/pym/old_cpv.py~ |
4747 |
new file mode 100644 |
4748 |
index 0000000..4923bf7 |
4749 |
--- /dev/null |
4750 |
+++ b/gobs/pym/old_cpv.py~ |
4751 |
@@ -0,0 +1,89 @@ |
4752 |
+from __future__ import print_function |
4753 |
+from gobs.readconf import get_conf_settings |
4754 |
+reader=get_conf_settings() |
4755 |
+gobs_settings_dict=reader.read_gobs_settings_all() |
4756 |
+# make a CM |
4757 |
+from gobs.ConnectionManager import connectionManager |
4758 |
+CM=connectionManager(gobs_settings_dict) |
4759 |
+#selectively import the pgsql/mysql querys |
4760 |
+if CM.getName()=='pgsql': |
4761 |
+ from gobs.pgsql import * |
4762 |
+ |
4763 |
+class gobs_old_cpv(object): |
4764 |
+ |
4765 |
+ def __init__(self, myportdb, mysettings): |
4766 |
+ self._mysettings = mysettings |
4767 |
+ self._myportdb = myportdb |
4768 |
+ |
4769 |
+ def mark_old_ebuild_db(self, categories, package, package_id): |
4770 |
+ conn=CM.getConnection() |
4771 |
+ ebuild_list_tree = sorted(self._myportdb.cp_list((categories + "/" + package), use_cache=1, mytree=None)) |
4772 |
+ # Get ebuild list on categories, package in the db |
4773 |
+ ebuild_list_db = cp_list_db(conn,package_id) |
4774 |
+ # Check if don't have the ebuild in the tree |
4775 |
+ # Add it to the no active list |
4776 |
+ old_ebuild_list = [] |
4777 |
+ for ebuild_line in ebuild_list_db: |
4778 |
+ ebuild_line_db = categories + "/" + package + "-" + ebuild_line[0] |
4779 |
+ if not ebuild_line_db in ebuild_list_tree: |
4780 |
+ old_ebuild_list.append(ebuild_line) |
4781 |
+ # Set no active on ebuilds in the db that no longer in tree |
4782 |
+ if old_ebuild_list != []: |
4783 |
+ for old_ebuild in old_ebuild_list: |
4784 |
+ print("O", categories + "/" + package + "-" + old_ebuild[0]) |
4785 |
+ add_old_ebuild(conn,package_id, old_ebuild_list) |
4786 |
+ # Check if we have older no activ ebuilds then 60 days |
4787 |
+ ebuild_old_list_db = cp_list_old_db(conn,package_id) |
4788 |
+ # Delete older ebuilds in the db |
4789 |
+ if ebuild_old_list_db != []: |
4790 |
+ for del_ebuild_old in ebuild_old_list_db: |
4791 |
+ print("D", categories + "/" + package + "-" + del_ebuild_old[1]) |
4792 |
+ del_old_ebuild(conn,ebuild_old_list_db) |
4793 |
+ CM.putConnection(conn) |
4794 |
+ |
4795 |
+ def mark_old_package_db(self, package_id_list_tree): |
4796 |
+ conn=CM.getConnection() |
4797 |
+ # Get categories/package list from db |
4798 |
+ package_list_db = cp_all_db(conn) |
4799 |
+ old_package_id_list = [] |
4800 |
+ # Check if don't have the categories/package in the tree |
4801 |
+ # Add it to the no active list |
4802 |
+ for package_line in package_list_db: |
4803 |
+ if not package_line in package_id_list_tree: |
4804 |
+ old_package_id_list.append(package_line) |
4805 |
+ # Set no active on categories/package and ebuilds in the db that no longer in tree |
4806 |
+ if old_package_id_list != []: |
4807 |
+ mark_old_list = add_old_package(conn,old_package_id_list) |
4808 |
+ if mark_old_list != []: |
4809 |
+ for x in mark_old_list: |
4810 |
+ element = get_cp_from_package_id(conn,x) |
4811 |
+ print("O", element[0]) |
4812 |
+ # Check if we have older no activ categories/package then 60 days |
4813 |
+ del_package_id_old_list = cp_all_old_db(conn,old_package_id_list) |
4814 |
+ # Delete older categories/package and ebuilds in the db |
4815 |
+ if del_package_id_old_list != []: |
4816 |
+ for i in del_package_id_old_list: |
4817 |
+ element = get_cp_from_package_id(conn,i) |
4818 |
+ print("D", element) |
4819 |
+ del_old_package(conn,del_package_id_old_list) |
4820 |
+ CM.putConnection(conn) |
4821 |
+ |
4822 |
+ def mark_old_categories_db(self): |
4823 |
+ conn=CM.getConnection() |
4824 |
+ # Get categories list from the tree and db |
4825 |
+ categories_list_tree = self._mysettings.categories |
4826 |
+ categories_list_db =get_categories_db(conn) |
4827 |
+ categories_old_list = [] |
4828 |
+ # Check if don't have the categories in the tree |
4829 |
+ # Add it to the no active list |
4830 |
+ for categories_line in categories_list_db: |
4831 |
+ if not categories_line[0] in categories_list_tree: |
4832 |
+ old_c = get_old_categories(conn,categories_line[0]) |
4833 |
+ if old_c is not None: |
4834 |
+ categories_old_list.append(categories_line) |
4835 |
+ # Delete older categories in the db |
4836 |
+ if categories_old_list != []: |
4837 |
+ for real_old_categories in categories_old_list: |
4838 |
+ del_old_categories(conn,real_old_categoriess) |
4839 |
+ print("D", real_old_categories) |
4840 |
+ CM.putConnection(conn) |
4841 |
\ No newline at end of file |
4842 |
|
4843 |
diff --git a/gobs/pym/package.py b/gobs/pym/package.py |
4844 |
index 1f592ee..9ae6c57 100644 |
4845 |
--- a/gobs/pym/package.py |
4846 |
+++ b/gobs/pym/package.py |
4847 |
@@ -176,14 +176,15 @@ class gobs_package(object): |
4848 |
def add_new_package_db(self, categories, package): |
4849 |
conn=CM.getConnection() |
4850 |
# add new categories package ebuild to tables package and ebuilds |
4851 |
- print("N", categories + "/" + package) # N = New Package |
4852 |
+ print("C", categories + "/" + package) # C = Checking |
4853 |
+ print("N", categories + "/" + package) # N = New Package |
4854 |
pkgdir = self._mysettings['PORTDIR'] + "/" + categories + "/" + package # Get PORTDIR + cp |
4855 |
categories_dir = self._mysettings['PORTDIR'] + "/" + categories + "/" |
4856 |
# Get the ebuild list for cp |
4857 |
ebuild_list_tree = self._myportdb.cp_list((categories + "/" + package), use_cache=1, mytree=None) |
4858 |
if ebuild_list_tree == []: |
4859 |
CM.putConnection(conn) |
4860 |
- return None |
4861 |
+ return |
4862 |
config_list = get_config_list(conn) |
4863 |
config_cpv_listDict = self.config_match_ebuild(categories, package, config_list) |
4864 |
config_id = get_default_config(conn) |
4865 |
@@ -217,7 +218,7 @@ class gobs_package(object): |
4866 |
get_manifest_text = get_file_text(pkgdir + "/Manifest") |
4867 |
add_new_manifest_sql(conn,package_id, get_manifest_text, manifest_checksum_tree) |
4868 |
CM.putConnection(conn) |
4869 |
- return package_id |
4870 |
+ print("C", categories + "/" + package + " ... Done.") |
4871 |
|
4872 |
def update_package_db(self, categories, package, package_id): |
4873 |
conn=CM.getConnection() |
4874 |
@@ -229,6 +230,7 @@ class gobs_package(object): |
4875 |
manifest_checksum_db = get_manifest_db(conn,package_id) |
4876 |
# if we have the same checksum return else update the package |
4877 |
ebuild_list_tree = self._myportdb.cp_list((categories + "/" + package), use_cache=1, mytree=None) |
4878 |
+ print("C", categories + "/" + package) # C = Checking |
4879 |
if manifest_checksum_tree != manifest_checksum_db: |
4880 |
print("U", categories + "/" + package) # U = Update |
4881 |
# Get package_metadataDict and update the db with it |
4882 |
@@ -280,6 +282,7 @@ class gobs_package(object): |
4883 |
init_old_cpv = gobs_old_cpv(self._myportdb, self._mysettings) |
4884 |
init_old_cpv.mark_old_ebuild_db(categories, package, package_id) |
4885 |
CM.putConnection(conn) |
4886 |
+ print("C", categories + "/" + package + " ... Done.") |
4887 |
|
4888 |
def update_ebuild_db(self, build_dict): |
4889 |
conn=CM.getConnection() |
4890 |
|
4891 |
diff --git a/gobs/pym/package.py b/gobs/pym/package.py~ |
4892 |
similarity index 97% |
4893 |
copy from gobs/pym/package.py |
4894 |
copy to gobs/pym/package.py~ |
4895 |
index 1f592ee..9ae6c57 100644 |
4896 |
--- a/gobs/pym/package.py |
4897 |
+++ b/gobs/pym/package.py~ |
4898 |
@@ -176,14 +176,15 @@ class gobs_package(object): |
4899 |
def add_new_package_db(self, categories, package): |
4900 |
conn=CM.getConnection() |
4901 |
# add new categories package ebuild to tables package and ebuilds |
4902 |
- print("N", categories + "/" + package) # N = New Package |
4903 |
+ print("C", categories + "/" + package) # C = Checking |
4904 |
+ print("N", categories + "/" + package) # N = New Package |
4905 |
pkgdir = self._mysettings['PORTDIR'] + "/" + categories + "/" + package # Get PORTDIR + cp |
4906 |
categories_dir = self._mysettings['PORTDIR'] + "/" + categories + "/" |
4907 |
# Get the ebuild list for cp |
4908 |
ebuild_list_tree = self._myportdb.cp_list((categories + "/" + package), use_cache=1, mytree=None) |
4909 |
if ebuild_list_tree == []: |
4910 |
CM.putConnection(conn) |
4911 |
- return None |
4912 |
+ return |
4913 |
config_list = get_config_list(conn) |
4914 |
config_cpv_listDict = self.config_match_ebuild(categories, package, config_list) |
4915 |
config_id = get_default_config(conn) |
4916 |
@@ -217,7 +218,7 @@ class gobs_package(object): |
4917 |
get_manifest_text = get_file_text(pkgdir + "/Manifest") |
4918 |
add_new_manifest_sql(conn,package_id, get_manifest_text, manifest_checksum_tree) |
4919 |
CM.putConnection(conn) |
4920 |
- return package_id |
4921 |
+ print("C", categories + "/" + package + " ... Done.") |
4922 |
|
4923 |
def update_package_db(self, categories, package, package_id): |
4924 |
conn=CM.getConnection() |
4925 |
@@ -229,6 +230,7 @@ class gobs_package(object): |
4926 |
manifest_checksum_db = get_manifest_db(conn,package_id) |
4927 |
# if we have the same checksum return else update the package |
4928 |
ebuild_list_tree = self._myportdb.cp_list((categories + "/" + package), use_cache=1, mytree=None) |
4929 |
+ print("C", categories + "/" + package) # C = Checking |
4930 |
if manifest_checksum_tree != manifest_checksum_db: |
4931 |
print("U", categories + "/" + package) # U = Update |
4932 |
# Get package_metadataDict and update the db with it |
4933 |
@@ -280,6 +282,7 @@ class gobs_package(object): |
4934 |
init_old_cpv = gobs_old_cpv(self._myportdb, self._mysettings) |
4935 |
init_old_cpv.mark_old_ebuild_db(categories, package, package_id) |
4936 |
CM.putConnection(conn) |
4937 |
+ print("C", categories + "/" + package + " ... Done.") |
4938 |
|
4939 |
def update_ebuild_db(self, build_dict): |
4940 |
conn=CM.getConnection() |
4941 |
|
4942 |
diff --git a/gobs/pym/pgsql.py b/gobs/pym/pgsql.py |
4943 |
index b0a6c83..db70f6c 100644 |
4944 |
--- a/gobs/pym/pgsql.py |
4945 |
+++ b/gobs/pym/pgsql.py |
4946 |
@@ -9,10 +9,40 @@ def get_default_config(connection): |
4947 |
|
4948 |
def get_profile_checksum(connection, config_profile): |
4949 |
cursor = connection.cursor() |
4950 |
- sqlQ = "SELECT make_conf_checksum FROM configs WHERE active = 'True' AND id = %s AND updateing = 'False'" |
4951 |
+ sqlQ = "SELECT make_conf_checksum FROM configs WHERE active = 'True' AND id = %s AND updateing = 'False' AND sync = 'False'" |
4952 |
cursor.execute(sqlQ, (config_profile,)) |
4953 |
return cursor.fetchone() |
4954 |
|
4955 |
+def get_profile_sync(connection, config_profile): |
4956 |
+ cursor = connection.cursor() |
4957 |
+ sqlQ = "SELECT sync FROM configs WHERE active = 'True' AND id = %s AND updateing = 'False'" |
4958 |
+ cursor.execute(sqlQ, (config_profile,)) |
4959 |
+ return cursor.fetchone() |
4960 |
+ |
4961 |
+def set_profile_sync(connection): |
4962 |
+ cursor = connection.cursor() |
4963 |
+ sqlQ = "UPDATE configs SET sync = 'True' WHERE active = 'True'" |
4964 |
+ cursor.execute(sqlQ) |
4965 |
+ connection.commit() |
4966 |
+ |
4967 |
+def reset_profile_sync(connection, config_profile): |
4968 |
+ cursor = connection.cursor() |
4969 |
+ sqlQ = "UPDATE configs SET sync = 'False' WHERE active = 'True' AND id = %s" |
4970 |
+ cursor.execute(sqlQ, (config_profile,)) |
4971 |
+ connection.commit() |
4972 |
+ |
4973 |
+def set_profile_updating(connection): |
4974 |
+ cursor = connection.cursor() |
4975 |
+ sqlQ = "UPDATE configs SET updating = 'True' WHERE active = 'True'" |
4976 |
+ cursor.execute(sqlQ) |
4977 |
+ connection.commit() |
4978 |
+ |
4979 |
+def reset_profile_sync(connection, config_profile): |
4980 |
+ cursor = connection.cursor() |
4981 |
+ sqlQ = "UPDATE configs SET updating = 'False' WHERE active = 'True'" |
4982 |
+ cursor.execute(sqlQ) |
4983 |
+ connection.commit() |
4984 |
+ |
4985 |
def get_packages_to_build(connection, config_profile): |
4986 |
cursor =connection.cursor() |
4987 |
# no point in returning dead ebuilds, to just chuck em out later |
4988 |
|
4989 |
diff --git a/gobs/pym/pgsql.py b/gobs/pym/pgsql.py~ |
4990 |
similarity index 95% |
4991 |
copy from gobs/pym/pgsql.py |
4992 |
copy to gobs/pym/pgsql.py~ |
4993 |
index b0a6c83..ea1f1f1 100644 |
4994 |
--- a/gobs/pym/pgsql.py |
4995 |
+++ b/gobs/pym/pgsql.py~ |
4996 |
@@ -9,10 +9,40 @@ def get_default_config(connection): |
4997 |
|
4998 |
def get_profile_checksum(connection, config_profile): |
4999 |
cursor = connection.cursor() |
5000 |
- sqlQ = "SELECT make_conf_checksum FROM configs WHERE active = 'True' AND id = %s AND updateing = 'False'" |
5001 |
+ sqlQ = "SELECT make_conf_checksum FROM configs WHERE active = 'True' AND id = %s AND updateing = 'False' AND sync = 'False'" |
5002 |
cursor.execute(sqlQ, (config_profile,)) |
5003 |
return cursor.fetchone() |
5004 |
|
5005 |
+def get_profile_sync(connection, config_profile): |
5006 |
+ cursor = connection.cursor() |
5007 |
+ sqlQ = "SELECT sync FROM configs WHERE active = 'True' AND id = %s AND updateing = 'False'" |
5008 |
+ cursor.execute(sqlQ, (config_profile,)) |
5009 |
+ return cursor.fetchone() |
5010 |
+ |
5011 |
+def set_profile_sync(connection): |
5012 |
+ cursor = connection.cursor() |
5013 |
+ sqlQ = "UPDATE configs SET sync = 'False' WHERE active = 'True'" |
5014 |
+ cursor.execute(sqlQ) |
5015 |
+ connection.commit() |
5016 |
+ |
5017 |
+def reset_profile_sync(connection, config_profile): |
5018 |
+ cursor = connection.cursor() |
5019 |
+ sqlQ = "UPDATE configs SET sync = 'False' WHERE active = 'True' AND id = %s" |
5020 |
+ cursor.execute(sqlQ, (config_profile,)) |
5021 |
+ connection.commit() |
5022 |
+ |
5023 |
+def set_profile_updating(connection): |
5024 |
+ cursor = connection.cursor() |
5025 |
+ sqlQ = "UPDATE configs SET updating = 'True' WHERE active = 'True'" |
5026 |
+ cursor.execute(sqlQ) |
5027 |
+ connection.commit() |
5028 |
+ |
5029 |
+def reset_profile_sync(connection, config_profile): |
5030 |
+ cursor = connection.cursor() |
5031 |
+ sqlQ = "UPDATE configs SET updating = 'False' WHERE active = 'True'" |
5032 |
+ cursor.execute(sqlQ) |
5033 |
+ connection.commit() |
5034 |
+ |
5035 |
def get_packages_to_build(connection, config_profile): |
5036 |
cursor =connection.cursor() |
5037 |
# no point in returning dead ebuilds, to just chuck em out later |
5038 |
|
5039 |
diff --git a/gobs/pym/readconf.py~ b/gobs/pym/readconf.py~ |
5040 |
new file mode 100644 |
5041 |
index 0000000..c017561 |
5042 |
--- /dev/null |
5043 |
+++ b/gobs/pym/readconf.py~ |
5044 |
@@ -0,0 +1,46 @@ |
5045 |
+import os |
5046 |
+import sys |
5047 |
+import re |
5048 |
+ |
5049 |
+class get_conf_settings(object): |
5050 |
+# open the /etc/buildhost/buildhost.conf file and get the needed |
5051 |
+# settings for gobs |
5052 |
+ def __init__(self): |
5053 |
+ self.configfile = "/etc/gobs/gobs.conf" |
5054 |
+ |
5055 |
+ def read_gobs_settings_all(self): |
5056 |
+ # It will return a dict with options from the configfile |
5057 |
+ try: |
5058 |
+ open_conffile = open(self.configfile, 'r') |
5059 |
+ except: |
5060 |
+ sys.exit("Fail to open config file:" + self.configfile) |
5061 |
+ textlines = open_conffile.readlines() |
5062 |
+ for line in textlines: |
5063 |
+ element = line.split('=') |
5064 |
+ if element[0] == 'SQLBACKEND': # Databas backend |
5065 |
+ get_sql_backend = element[1] |
5066 |
+ if element[0] == 'SQLDB': # Database |
5067 |
+ get_sql_db = element[1] |
5068 |
+ if element[0] == 'SQLHOST': # Host |
5069 |
+ get_sql_host = element[1] |
5070 |
+ if element[0] == 'SQLUSER': # User |
5071 |
+ get_sql_user = element[1] |
5072 |
+ if element[0] == 'SQLPASSWD': # Password |
5073 |
+ get_sql_passwd = element[1] |
5074 |
+ # Buildhost root (dir for host/setup on host) |
5075 |
+ if element[0] == 'GOBSROOT': |
5076 |
+ get_gobs_root = element[1] |
5077 |
+ # Buildhost setup (host/setup on guest) |
5078 |
+ if element[0] == 'GOBSCONFIG': |
5079 |
+ get_gobs_config = element[1] |
5080 |
+ |
5081 |
+ open_conffile.close() |
5082 |
+ gobs_settings_dict = {} |
5083 |
+ gobs_settings_dict['sql_backend'] = get_sql_backend.rstrip('\n') |
5084 |
+ gobs_settings_dict['sql_db'] = get_sql_db.rstrip('\n') |
5085 |
+ gobs_settings_dict['sql_host'] = get_sql_host.rstrip('\n') |
5086 |
+ gobs_settings_dict['sql_user'] = get_sql_user.rstrip('\n') |
5087 |
+ gobs_settings_dict['sql_passwd'] = get_sql_passwd.rstrip('\n') |
5088 |
+ gobs_settings_dict['gobs_root'] = get_gobs_root.rstrip('\n') |
5089 |
+ gobs_settings_dict['gobs_config'] = get_gobs_config.rstrip('\n') |
5090 |
+ return gobs_settings_dict |
5091 |
|
5092 |
diff --git a/gobs/pym/repoman_gobs.py~ b/gobs/pym/repoman_gobs.py~ |
5093 |
new file mode 100644 |
5094 |
index 0000000..2141342 |
5095 |
--- /dev/null |
5096 |
+++ b/gobs/pym/repoman_gobs.py~ |
5097 |
@@ -0,0 +1,48 @@ |
5098 |
+import sys |
5099 |
+import os |
5100 |
+import portage |
5101 |
+from portage import os, _encodings, _unicode_decode |
5102 |
+from portage import _unicode_encode |
5103 |
+from portage.exception import DigestException, FileNotFound, ParseError, PermissionDenied |
5104 |
+from _emerge.Package import Package |
5105 |
+from _emerge.RootConfig import RootConfig |
5106 |
+from repoman.checks import run_checks |
5107 |
+import codecs |
5108 |
+ |
5109 |
+class gobs_repoman(object): |
5110 |
+ |
5111 |
+ def __init__(self, mysettings, myportdb): |
5112 |
+ self._mysettings = mysettings |
5113 |
+ self._myportdb = myportdb |
5114 |
+ |
5115 |
+ def check_repoman(self, categories, package, ebuild_version_tree, config_id): |
5116 |
+ # We run repoman run_checks on the ebuild |
5117 |
+ pkgdir = self._mysettings['PORTDIR'] + "/" + categories + "/" + package |
5118 |
+ full_path = pkgdir + "/" + package + "-" + ebuild_version_tree + ".ebuild" |
5119 |
+ cpv = categories + "/" + package + "-" + ebuild_version_tree |
5120 |
+ root = '/' |
5121 |
+ trees = { |
5122 |
+ root : {'porttree' : portage.portagetree(root, settings=self._mysettings)} |
5123 |
+ } |
5124 |
+ root_config = RootConfig(self._mysettings, trees[root], None) |
5125 |
+ allvars = set(x for x in portage.auxdbkeys if not x.startswith("UNUSED_")) |
5126 |
+ allvars.update(Package.metadata_keys) |
5127 |
+ allvars = sorted(allvars) |
5128 |
+ myaux = dict(zip(allvars, self._myportdb.aux_get(cpv, allvars))) |
5129 |
+ pkg = Package(cpv=cpv, metadata=myaux, root_config=root_config) |
5130 |
+ fails = [] |
5131 |
+ try: |
5132 |
+ # All ebuilds should have utf_8 encoding. |
5133 |
+ f = codecs.open(_unicode_encode(full_path, |
5134 |
+ encoding = _encodings['fs'], errors = 'strict'), |
5135 |
+ mode = 'r', encoding = _encodings['repo.content']) |
5136 |
+ try: |
5137 |
+ for check_name, e in run_checks(f, pkg): |
5138 |
+ fails.append(check_name + ": " + e) |
5139 |
+ finally: |
5140 |
+ f.close() |
5141 |
+ except UnicodeDecodeError: |
5142 |
+ # A file.UTF8 failure will have already been recorded above. |
5143 |
+ pass |
5144 |
+ # fails will have a list with repoman errors |
5145 |
+ return fails |
5146 |
\ No newline at end of file |
5147 |
|
5148 |
diff --git a/gobs/pym/sync.py b/gobs/pym/sync.py |
5149 |
new file mode 100644 |
5150 |
index 0000000..35833f9 |
5151 |
--- /dev/null |
5152 |
+++ b/gobs/pym/sync.py |
5153 |
@@ -0,0 +1,23 @@ |
5154 |
+from __future__ import print_function |
5155 |
+import portage |
5156 |
+import os |
5157 |
+import errno |
5158 |
+from git import * |
5159 |
+ |
5160 |
+def git_pull(): |
5161 |
+ repo = Repo("/var/lib/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/") |
5162 |
+ repo_remote = repo.remotes.origin |
5163 |
+ repo_remote.pull() |
5164 |
+ master = repo.head.reference |
5165 |
+ print(master.log()) |
5166 |
+ |
5167 |
+def sync_tree() |
5168 |
+ settings, trees, mtimedb = load_emerge_config() |
5169 |
+ portdb = trees[settings["ROOT"]]["porttree"].dbapi |
5170 |
+ tmpcmdline = [] |
5171 |
+ tmpcmdline.append("--sync") |
5172 |
+ tmpcmdline.append("--quiet") |
5173 |
+ myaction, myopts, myfiles = parse_opts(tmpcmdline) |
5174 |
+ fail_sync = action_sync(settings, trees, mtimedb, myopts, myaction) |
5175 |
+ print("fail_sync", fail_sync) |
5176 |
+ return fail_sync |
5177 |
\ No newline at end of file |
5178 |
|
5179 |
diff --git a/gobs/pym/sync.py~ b/gobs/pym/sync.py~ |
5180 |
new file mode 100644 |
5181 |
index 0000000..35833f9 |
5182 |
--- /dev/null |
5183 |
+++ b/gobs/pym/sync.py~ |
5184 |
@@ -0,0 +1,23 @@ |
5185 |
+from __future__ import print_function |
5186 |
+import portage |
5187 |
+import os |
5188 |
+import errno |
5189 |
+from git import * |
5190 |
+ |
5191 |
+def git_pull(): |
5192 |
+ repo = Repo("/var/lib/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/") |
5193 |
+ repo_remote = repo.remotes.origin |
5194 |
+ repo_remote.pull() |
5195 |
+ master = repo.head.reference |
5196 |
+ print(master.log()) |
5197 |
+ |
5198 |
+def sync_tree() |
5199 |
+ settings, trees, mtimedb = load_emerge_config() |
5200 |
+ portdb = trees[settings["ROOT"]]["porttree"].dbapi |
5201 |
+ tmpcmdline = [] |
5202 |
+ tmpcmdline.append("--sync") |
5203 |
+ tmpcmdline.append("--quiet") |
5204 |
+ myaction, myopts, myfiles = parse_opts(tmpcmdline) |
5205 |
+ fail_sync = action_sync(settings, trees, mtimedb, myopts, myaction) |
5206 |
+ print("fail_sync", fail_sync) |
5207 |
+ return fail_sync |
5208 |
\ No newline at end of file |
5209 |
|
5210 |
diff --git a/gobs/pym/text.py~ b/gobs/pym/text.py~ |
5211 |
new file mode 100644 |
5212 |
index 0000000..7523015 |
5213 |
--- /dev/null |
5214 |
+++ b/gobs/pym/text.py~ |
5215 |
@@ -0,0 +1,48 @@ |
5216 |
+import sys |
5217 |
+import re |
5218 |
+import os |
5219 |
+import errno |
5220 |
+ |
5221 |
+def get_file_text(filename): |
5222 |
+ # Return the filename contents |
5223 |
+ try: |
5224 |
+ textfile = open(filename) |
5225 |
+ except: |
5226 |
+ return "No file", filename |
5227 |
+ text = "" |
5228 |
+ for line in textfile: |
5229 |
+ text += unicode(line, 'utf-8') |
5230 |
+ textfile.close() |
5231 |
+ return text |
5232 |
+ |
5233 |
+def get_ebuild_text(filename): |
5234 |
+ """Return the ebuild contents""" |
5235 |
+ try: |
5236 |
+ ebuildfile = open(filename) |
5237 |
+ except: |
5238 |
+ return "No Ebuild file there" |
5239 |
+ text = "" |
5240 |
+ dataLines = ebuildfile.readlines() |
5241 |
+ for i in dataLines: |
5242 |
+ text = text + i + " " |
5243 |
+ line2 = dataLines[2] |
5244 |
+ field = line2.split(" ") |
5245 |
+ ebuildfile.close() |
5246 |
+ try: |
5247 |
+ cvs_revision = field[3] |
5248 |
+ except: |
5249 |
+ cvs_revision = '' |
5250 |
+ return text, cvs_revision |
5251 |
+ |
5252 |
+def get_log_text_list(filename): |
5253 |
+ """Return the log contents as a list""" |
5254 |
+ print "filename", filename |
5255 |
+ try: |
5256 |
+ logfile = open(filename) |
5257 |
+ except: |
5258 |
+ return None |
5259 |
+ text = [] |
5260 |
+ dataLines = logfile.readlines() |
5261 |
+ for i in dataLines: |
5262 |
+ text.append(i) |
5263 |
+ return text |