Gentoo Archives: gentoo-commits

From: Magnus Granberg <zorry@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] dev/zorry:master commit in: gobs/bin/, ebuild/dev-python/gobs/, gobs/doc/portage/all/, gobs/pym/, gobs/doc/
Date: Fri, 27 Apr 2012 17:50:21
Message-Id: 1335548934.d6c34d2aec0cdaac88c1dd6ed2866db7c5afebb5.zorry@gentoo
1 commit: d6c34d2aec0cdaac88c1dd6ed2866db7c5afebb5
2 Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
3 AuthorDate: Fri Apr 27 17:48:54 2012 +0000
4 Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
5 CommitDate: Fri Apr 27 17:48:54 2012 +0000
6 URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=d6c34d2a
7
8 major update part2
9
10 ---
11 ebuild/dev-python/gobs/gobs-9999.ebuild~ | 50 -
12 gobs/bin/gobs_buildquerys~ | 38 -
13 gobs/bin/gobs_portage_hooks~ | 79 --
14 gobs/bin/gobs_setup_profile~ | 12 -
15 gobs/bin/gobs_updatedb~ | 123 --
16 gobs/doc/Setup.txt~ | 7 -
17 gobs/doc/portage/all/bashrc~ | 7 -
18 gobs/pym/ConnectionManager.py~ | 56 -
19 gobs/pym/Scheduler.py~ | 1994 ------------------------------
20 gobs/pym/arch.py~ | 25 -
21 gobs/pym/build_log.py~ | 572 ---------
22 gobs/pym/build_queru.py~ | 708 -----------
23 gobs/pym/categories.py~ | 30 -
24 gobs/pym/check_setup.py~ | 102 --
25 gobs/pym/depclean.py~ | 632 ----------
26 gobs/pym/init_setup_profile.py~ | 86 --
27 gobs/pym/manifest.py~ | 124 --
28 gobs/pym/old_cpv.py~ | 89 --
29 gobs/pym/package.py~ | 306 -----
30 gobs/pym/pgsql.py~ | 638 ----------
31 gobs/pym/readconf.py~ | 46 -
32 gobs/pym/repoman_gobs.py~ | 48 -
33 gobs/pym/sync.py~ | 23 -
34 gobs/pym/text.py~ | 48 -
35 24 files changed, 0 insertions(+), 5843 deletions(-)
36
37 diff --git a/ebuild/dev-python/gobs/gobs-9999.ebuild~ b/ebuild/dev-python/gobs/gobs-9999.ebuild~
38 deleted file mode 100644
39 index 22fa1b7..0000000
40 --- a/ebuild/dev-python/gobs/gobs-9999.ebuild~
41 +++ /dev/null
42 @@ -1,50 +0,0 @@
43 -# Copyright 1999-2010 Gentoo Foundation
44 -# Distributed under the terms of the GNU General Public License v2
45 -# $Header: $
46 -
47 -EAPI="2"
48 -PYTHON_DEPEND="*:2.7"
49 -SUPPORT_PYTHON_ABIS="1"
50 -
51 -inherit distutils git-2
52 -
53 -DESCRIPTION="Gobs"
54 -HOMEPAGE="http://git.overlays.gentoo.org/gitroot/dev/zorry.git"
55 -SRC_URI=""
56 -LICENSE="GPL-2"
57 -KEYWORDS="~amd64"
58 -SLOT="0"
59 -IUSE="+postgresql"
60 -
61 -RDEPEND="sys-apps/portage
62 - >=dev-python/git-python-0.3.2_rc1
63 - postgresql? ( dev-python/psycopg )"
64 -
65 -DEPEND="${RDEPEND}
66 - dev-python/setuptools"
67 -
68 -# RESTRICT_PYTHON_ABIS="3.*"
69 -
70 -EGIT_REPO_URI="http://git.overlays.gentoo.org/gitroot/dev/zorry.git"
71 -#EGIT_FETCH_CMD="git clone"
72 -##EGIT_BRANCH="master"
73 -##EGIT_COMMIT=${EGIT_BRANCH}
74 -# The eclass is based on subversion eclass.
75 -# If you use this eclass, the ${S} is ${WORKDIR}/${P}.
76 -# It is necessary to define the EGIT_REPO_URI variable at least.
77 -
78 -PYTHON_MODNAME="gobs"
79 -
80 -src_install() {
81 - dodir /var/lib/gobs || die
82 - dodir etc/gobs || die
83 - insinto /etc/gobs
84 - doins ${FILESDIR}/gobs.conf || die
85 - dobin ${S}/gobs/bin/gobs_updatedb || die
86 - dobin ${S}/gobs/bin/gobs_portage_hooks || die
87 - dosbin ${S}/gobs/bin/gobs_buildquerys || die
88 - dodoc ${S}/gobs/sql/pgdump.sql.gz || die
89 - dodoc ${S}/gobs/doc/Setup.txt || die
90 -
91 - distutils_src_install
92 -}
93 \ No newline at end of file
94
95 diff --git a/gobs/bin/gobs_buildquerys~ b/gobs/bin/gobs_buildquerys~
96 deleted file mode 100755
97 index 842b324..0000000
98 --- a/gobs/bin/gobs_buildquerys~
99 +++ /dev/null
100 @@ -1,38 +0,0 @@
101 -#!/usr/bin/python
102 -
103 -# Get the options from the config file set in gobs.readconf
104 -from gobs.readconf import get_conf_settings
105 -reader=get_conf_settings()
106 -gobs_settings_dict=reader.read_gobs_settings_all()
107 -# make a CM
108 -from gobs.ConnectionManager import connectionManager
109 -
110 -from gobs.check_setup import check_configure_guest
111 -from gobs.sync import git_pull
112 -from gobs.build_queru import queruaction
113 -import portage
114 -import sys
115 -import os
116 -
117 -def main_loop(config_profile):
118 - repeat = True
119 - #get a connection from the pool
120 - init_queru = queruaction(config_profile)
121 - while repeat:
122 - git_pull()
123 - if check_configure_guest(config_profile) is not True:
124 - # time.sleep(60)
125 - continue # retunr to the start of the function
126 - else:
127 - init_queru.procces_qureru()
128 - # time.sleep(60)
129 -
130 -def main():
131 - # Main
132 - config_profile = gobs_settings_dict['gobs_config']
133 - #we provide the main_loop with the ConnectionManager so we can hand out connections from within the loop
134 - main_loop(config_profile)
135 - connectionManager.closeAllConnections()
136 -
137 -if __name__ == "__main__":
138 - main()
139
140 diff --git a/gobs/bin/gobs_portage_hooks~ b/gobs/bin/gobs_portage_hooks~
141 deleted file mode 100755
142 index 5432545..0000000
143 --- a/gobs/bin/gobs_portage_hooks~
144 +++ /dev/null
145 @@ -1,79 +0,0 @@
146 -#!/usr/bin/python
147 -from __future__ import print_function
148 -import os
149 -import sys
150 -# Get the options from the config file set in gobs.readconf
151 -from gobs.readconf import get_conf_settings
152 -reader=get_conf_settings()
153 -gobs_settings_dict=reader.read_gobs_settings_all()
154 -# make a CM
155 -from gobs.ConnectionManager import connectionManager
156 -CM=connectionManager(gobs_settings_dict)
157 -#selectively import the pgsql/mysql querys
158 -if CM.getName()=='pgsql':
159 - from gobs.pgsql import *
160 -
161 -from gobs.package import gobs_package
162 -from gobs.build_log import gobs_buildlog
163 -from gobs.flags import gobs_use_flags
164 -from portage.util import writemsg, writemsg_level, writemsg_stdout
165 -import portage
166 -
167 -def get_build_dict_db(mysettings, config_profile, gobs_settings_dict):
168 - conn=CM.getConnection()
169 - myportdb = portage.portdbapi(mysettings=mysettings)
170 - categories = os.environ['CATEGORY']
171 - package = os.environ['PN']
172 - ebuild_version = os.environ['PVR']
173 - cpv = categories + "/" + package + "-" + ebuild_version
174 - init_package = gobs_package(mysettings, myportdb)
175 - print("cpv", cpv)
176 - package_id = have_package_db(conn, categories, package)
177 - # print("package_id %s" % package_id, file=sys.stdout)
178 - build_dict = {}
179 - mybuild_dict = {}
180 - build_dict['ebuild_version'] = ebuild_version
181 - build_dict['package_id'] = package_id
182 - build_dict['cpv'] = cpv
183 - build_dict['categories'] = categories
184 - build_dict['package'] = package
185 - build_dict['config_profile'] = config_profile
186 - init_useflags = gobs_use_flags(mysettings, myportdb, cpv)
187 - iuse_flags_list, final_use_list = init_useflags.get_flags_looked()
188 - #print 'final_use_list', final_use_list
189 - if final_use_list != []:
190 - build_dict['build_useflags'] = final_use_list
191 - else:
192 - build_dict['build_useflags'] = None
193 - #print "build_dict['build_useflags']", build_dict['build_useflags']
194 - pkgdir = os.path.join(mysettings['PORTDIR'], categories + "/" + package)
195 - ebuild_version_checksum_tree = portage.checksum.sha256hash(pkgdir+ "/" + package + "-" + ebuild_version + ".ebuild")[0]
196 - build_dict['checksum'] = ebuild_version_checksum_tree
197 - print('checksum' ,ebuild_version_checksum_tree)
198 - ebuild_id = get_ebuild_id_db_checksum(conn, build_dict)
199 - print('ebuild_id in db', ebuild_id)
200 - if ebuild_id is None:
201 - #print 'have any ebuild', get_ebuild_checksum(conn, package_id, ebuild_version)
202 - init_package.update_ebuild_db(build_dict)
203 - ebuild_id = get_ebuild_id_db_checksum(conn, build_dict)
204 - build_dict['ebuild_id'] = ebuild_id
205 - queue_id = check_revision(conn, build_dict)
206 - print("queue_id in db", queue_id)
207 - if queue_id is None:
208 - build_dict['queue_id'] = None
209 - else:
210 - build_dict['queue_id'] = queue_id
211 - return build_dict
212 -
213 -def main():
214 - # Main
215 - config_profile = gobs_settings_dict['gobs_config']
216 - #we provide the main_loop with the ConnectionManager so we can hand out connections from within the loop
217 - mysettings = portage.settings
218 - build_dict = get_build_dict_db( mysettings, config_profile, gobs_settings_dict)
219 - init_buildlog = gobs_buildlog(mysettings, build_dict)
220 - init_buildlog.add_buildlog_main()
221 - #connectionManager.closeAllConnections()
222 -
223 -if __name__ == "__main__":
224 - main()
225 \ No newline at end of file
226
227 diff --git a/gobs/bin/gobs_setup_profile~ b/gobs/bin/gobs_setup_profile~
228 deleted file mode 100755
229 index cf926fa..0000000
230 --- a/gobs/bin/gobs_setup_profile~
231 +++ /dev/null
232 @@ -1,12 +0,0 @@
233 -#!/usr/bin/python
234 -# Copyright 2006-2011 Gentoo Foundation
235 -# Distributed under the terms of the GNU General Public License v2
236 -
237 -from gobs.init_setup_profile import setup_profile_main
238 -
239 -def main():
240 - # Main
241 - setup_profile_main(args=None):
242 -
243 -if __name__ == "__main__":
244 - main()
245 \ No newline at end of file
246
247 diff --git a/gobs/bin/gobs_updatedb~ b/gobs/bin/gobs_updatedb~
248 deleted file mode 100755
249 index 15271e4..0000000
250 --- a/gobs/bin/gobs_updatedb~
251 +++ /dev/null
252 @@ -1,123 +0,0 @@
253 -#!/usr/bin/python
254 -# Copyright 2006-2011 Gentoo Foundation
255 -# Distributed under the terms of the GNU General Public License v2
256 -
257 -""" This code will update the sql backend with needed info for
258 - the Frontend and the Guest deamon. """
259 -
260 -import sys
261 -import os
262 -import multiprocessing
263 -
264 -
265 -# Get the options from the config file set in gobs.readconf
266 -from gobs.readconf import get_conf_settings
267 -reader=get_conf_settings()
268 -gobs_settings_dict=reader.read_gobs_settings_all()
269 -# make a CM
270 -from gobs.ConnectionManager import connectionManager
271 -CM=connectionManager(gobs_settings_dict)
272 -#selectively import the pgsql/mysql querys
273 -if CM.getName()=='pgsql':
274 - from gobs.pgsql import *
275 -
276 -from gobs.check_setup import check_make_conf
277 -from gobs.arch import gobs_arch
278 -from gobs.package import gobs_package
279 -from gobs.categories import gobs_categories
280 -from gobs.old_cpv import gobs_old_cpv
281 -from gobs.categories import gobs_categories
282 -from gobs.sync import git_pull, sync_tree
283 -import portage
284 -
285 -def init_portage_settings():
286 -
287 - """ Get the BASE Setup/Config for portage.settings
288 - @type: module
289 - @module: The SQL Backend
290 - @type: dict
291 - @parms: config options from the config file (host_setup_root)
292 - @rtype: settings
293 - @returns new settings
294 - """
295 - # check config setup
296 - #git stuff
297 - conn=CM.getConnection()
298 - check_make_conf()
299 - print "Check configs done"
300 - # Get default config from the configs table and default_config=1
301 - config_id = get_default_config(conn) # HostConfigDir = table configs id
302 - CM.putConnection(conn);
303 - default_config_root = "/var/lib/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/" + config_id[0] + "/"
304 - # Set config_root (PORTAGE_CONFIGROOT) to default_config_root
305 - mysettings = portage.config(config_root = default_config_root)
306 - print "Setting default config to:", config_id[0]
307 - return mysettings
308 -
309 -def update_cpv_db_pool(mysettings, package_line):
310 - conn=CM.getConnection()
311 - # Setup portdb, gobs_categories, gobs_old_cpv, package
312 - myportdb = portage.portdbapi(mysettings=mysettings)
313 - init_categories = gobs_categories(mysettings)
314 - init_package = gobs_package(mysettings, myportdb)
315 - # split the cp to categories and package
316 - element = package_line.split('/')
317 - categories = element[0]
318 - package = element[1]
319 - # Check if we don't have the cp in the package table
320 - package_id = have_package_db(conn,categories, package)
321 - if package_id is None:
322 - # Add new package with ebuilds
323 - init_package.add_new_package_db(categories, package)
324 - # Ceck if we have the cp in the package table
325 - elif package_id is not None:
326 - # Update the packages with ebuilds
327 - init_package.update_package_db(categories, package, package_id)
328 - # Update the metadata for categories
329 - init_categories.update_categories_db(categories)
330 - CM.putConnection(conn)
331 -
332 -def update_cpv_db(mysettings):
333 - """Code to update the cpv in the database.
334 - @type:settings
335 - @parms: portage.settings
336 - @type: module
337 - @module: The SQL Backend
338 - @type: dict
339 - @parms: config options from the config file
340 - """
341 - print "Checking categories, package, ebuilds"
342 - # Setup portdb, gobs_categories, gobs_old_cpv, package
343 - myportdb = portage.portdbapi(mysettings=mysettings)
344 - package_id_list_tree = []
345 - # Will run some update checks and update package if needed
346 - # Get categories/package list from portage
347 - package_list_tree = myportdb.cp_all()
348 - pool_cores= multiprocessing.cpu_count()
349 - if pool_cores > "3":
350 - use_pool_cores = pool_cores - 2
351 - else
352 - use_pool_cores = 1
353 - pool = multiprocessing.Pool(processes=use_pool_cores)
354 - # Run the update package for all package in the list in
355 - # a multiprocessing pool
356 - for package_line in sorted(package_list_tree):
357 - pool.apply_async(update_cpv_db_pool, (mysettings, package_line,))
358 - pool.close()
359 - pool.join()
360 - print "Checking categories, package and ebuilds done"
361 -
362 -def main():
363 - # Main
364 - # Init settings for the default config
365 - git_pull
366 - if sync_tree():
367 - mysettings = init_portage_settings()
368 - init_arch = gobs_arch()
369 - init_arch.update_arch_db()
370 - # Update the cpv db
371 - update_cpv_db(mysettings)
372 - CM.closeAllConnections()
373 -
374 -if __name__ == "__main__":
375 - main()
376 \ No newline at end of file
377
378 diff --git a/gobs/doc/Setup.txt~ b/gobs/doc/Setup.txt~
379 deleted file mode 100644
380 index 956990f..0000000
381 --- a/gobs/doc/Setup.txt~
382 +++ /dev/null
383 @@ -1,7 +0,0 @@
384 -1. Setup the Backend
385 -Setup the gobs.conf for the db.
386 -Change GOBSGITREPONAME to point to the git repo with your configs for the profiles/setups.
387 -Import the *dump.sql.gz to your sql.
388 -The portage/base/make.conf should be in the base profile/setup
389 -The portage/all/bashrc should be in all the guest profiles/setups
390 -The porfiles dir need a dir call config with a parent file that point to base profile
391
392 diff --git a/gobs/doc/portage/all/bashrc~ b/gobs/doc/portage/all/bashrc~
393 deleted file mode 100644
394 index 092548e..0000000
395 --- a/gobs/doc/portage/all/bashrc~
396 +++ /dev/null
397 @@ -1,7 +0,0 @@
398 -pre_pkg_setup() {
399 - register_die_hook gobs_portage_hook
400 - register_success_hook gobs_portage_hook
401 -}
402 -gobs_portage_hook() {
403 - /home/buildhost/portage_hooks
404 -}
405
406 diff --git a/gobs/pym/ConnectionManager.py~ b/gobs/pym/ConnectionManager.py~
407 deleted file mode 100644
408 index 1bbeb35..0000000
409 --- a/gobs/pym/ConnectionManager.py~
410 +++ /dev/null
411 @@ -1,56 +0,0 @@
412 -#a simple CM build around sie singleton so there can only be 1 CM but you can call the class in different place with out caring about it.
413 -#when the first object is created of this class, the SQL settings are read from the file and stored in the class for later reuse by the next object and so on.
414 -#(maybe later add support for connection pools)
415 -from __future__ import print_function
416 -
417 -class connectionManager(object):
418 - _instance = None
419 -
420 - #size of the connection Pool
421 - def __new__(cls, settings_dict, numberOfconnections=20, *args, **kwargs):
422 - if not cls._instance:
423 - cls._instance = super(connectionManager, cls).__new__(cls, *args, **kwargs)
424 - #read the sql user/host etc and store it in the local object
425 - print(settings_dict['sql_host'])
426 - cls._host=settings_dict['sql_host']
427 - cls._user=settings_dict['sql_user']
428 - cls._password=settings_dict['sql_passwd']
429 - cls._database=settings_dict['sql_db']
430 - #shouldnt we include port also?
431 - try:
432 - from psycopg2 import pool
433 - cls._connectionNumber=numberOfconnections
434 - #always create 1 connection
435 - cls._pool=pool.ThreadedConnectionPool(1,cls._connectionNumber,host=cls._host,database=cls._database,user=cls._user,password=cls._password)
436 - cls._name='pgsql'
437 - except ImportError:
438 - print("Please install a recent version of dev-python/psycopg for Python")
439 - sys.exit(1)
440 - #setup connection pool
441 - return cls._instance
442 -
443 - ## returns the name of the database pgsql/mysql etc
444 - def getName(self):
445 - return self._name
446 -
447 - def getConnection(self):
448 - return self._pool.getconn()
449 -
450 - def putConnection(self, connection):
451 - self._pool.putconn(connection)
452 -
453 - def closeAllConnections(self):
454 - self._pool.closeall()
455 -
456 -##how to use this class
457 -#get a instance of the class (there can only be 1 instance but many pointers (single ton))
458 -#get the connection
459 -#conn=cm.getConnection()
460 -#get a cursor
461 -#cur=conn.cursor()
462 -#do stuff
463 -#cur.execute(stuff)
464 -#"close a connection" temporarily put it away for reuse
465 -#cm.putConnection(conn)
466 -#kill all connections, should only be used just before the program terminates
467 -#cm.closeAllConnections()
468
469 diff --git a/gobs/pym/Scheduler.py~ b/gobs/pym/Scheduler.py~
470 deleted file mode 100644
471 index 005f861..0000000
472 --- a/gobs/pym/Scheduler.py~
473 +++ /dev/null
474 @@ -1,1994 +0,0 @@
475 -# Copyright 1999-2011 Gentoo Foundation
476 -# Distributed under the terms of the GNU General Public License v2
477 -
478 -from __future__ import print_function
479 -
480 -from collections import deque
481 -import gc
482 -import gzip
483 -import logging
484 -import shutil
485 -import signal
486 -import sys
487 -import tempfile
488 -import textwrap
489 -import time
490 -import warnings
491 -import weakref
492 -import zlib
493 -
494 -import portage
495 -from portage import os
496 -from portage import _encodings
497 -from portage import _unicode_decode, _unicode_encode
498 -from portage.cache.mappings import slot_dict_class
499 -from portage.elog.messages import eerror
500 -from portage.localization import _
501 -from portage.output import colorize, create_color_func, red
502 -bad = create_color_func("BAD")
503 -from portage._sets import SETPREFIX
504 -from portage._sets.base import InternalPackageSet
505 -from portage.util import writemsg, writemsg_level
506 -from portage.package.ebuild.digestcheck import digestcheck
507 -from portage.package.ebuild.digestgen import digestgen
508 -from portage.package.ebuild.prepare_build_dirs import prepare_build_dirs
509 -
510 -import _emerge
511 -from _emerge.BinpkgFetcher import BinpkgFetcher
512 -from _emerge.BinpkgPrefetcher import BinpkgPrefetcher
513 -from _emerge.BinpkgVerifier import BinpkgVerifier
514 -from _emerge.Blocker import Blocker
515 -from _emerge.BlockerDB import BlockerDB
516 -from _emerge.clear_caches import clear_caches
517 -from _emerge.create_depgraph_params import create_depgraph_params
518 -from _emerge.create_world_atom import create_world_atom
519 -from _emerge.DepPriority import DepPriority
520 -from _emerge.depgraph import depgraph, resume_depgraph
521 -from _emerge.EbuildFetcher import EbuildFetcher
522 -from _emerge.EbuildPhase import EbuildPhase
523 -from _emerge.emergelog import emergelog
524 -from _emerge.FakeVartree import FakeVartree
525 -from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
526 -from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo
527 -from _emerge.JobStatusDisplay import JobStatusDisplay
528 -from _emerge.MergeListItem import MergeListItem
529 -from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
530 -from _emerge.Package import Package
531 -from _emerge.PackageMerge import PackageMerge
532 -from _emerge.PollScheduler import PollScheduler
533 -from _emerge.RootConfig import RootConfig
534 -from _emerge.SlotObject import SlotObject
535 -from _emerge.SequentialTaskQueue import SequentialTaskQueue
536 -
537 -from gobs.build_log import gobs_buildlog
538 -
539 -if sys.hexversion >= 0x3000000:
540 - basestring = str
541 -
542 -class Scheduler(PollScheduler):
543 -
544 - # max time between display status updates (milliseconds)
545 - _max_display_latency = 3000
546 -
547 - _opts_ignore_blockers = \
548 - frozenset(["--buildpkgonly",
549 - "--fetchonly", "--fetch-all-uri",
550 - "--nodeps", "--pretend"])
551 -
552 - _opts_no_background = \
553 - frozenset(["--pretend",
554 - "--fetchonly", "--fetch-all-uri"])
555 -
556 - _opts_no_restart = frozenset(["--buildpkgonly",
557 - "--fetchonly", "--fetch-all-uri", "--pretend"])
558 -
559 - _bad_resume_opts = set(["--ask", "--changelog",
560 - "--resume", "--skipfirst"])
561 -
562 - class _iface_class(SlotObject):
563 - __slots__ = ("fetch",
564 - "output", "register", "schedule",
565 - "scheduleSetup", "scheduleUnpack", "scheduleYield",
566 - "unregister")
567 -
568 - class _fetch_iface_class(SlotObject):
569 - __slots__ = ("log_file", "schedule")
570 -
571 - _task_queues_class = slot_dict_class(
572 - ("merge", "jobs", "ebuild_locks", "fetch", "unpack"), prefix="")
573 -
574 - class _build_opts_class(SlotObject):
575 - __slots__ = ("buildpkg", "buildpkg_exclude", "buildpkgonly",
576 - "fetch_all_uri", "fetchonly", "pretend")
577 -
578 - class _binpkg_opts_class(SlotObject):
579 - __slots__ = ("fetchonly", "getbinpkg", "pretend")
580 -
581 - class _pkg_count_class(SlotObject):
582 - __slots__ = ("curval", "maxval")
583 -
584 - class _emerge_log_class(SlotObject):
585 - __slots__ = ("xterm_titles",)
586 -
587 - def log(self, *pargs, **kwargs):
588 - if not self.xterm_titles:
589 - # Avoid interference with the scheduler's status display.
590 - kwargs.pop("short_msg", None)
591 - emergelog(self.xterm_titles, *pargs, **kwargs)
592 -
593 - class _failed_pkg(SlotObject):
594 - __slots__ = ("build_dir", "build_log", "pkg", "returncode")
595 -
596 - class _ConfigPool(object):
597 - """Interface for a task to temporarily allocate a config
598 - instance from a pool. This allows a task to be constructed
599 - long before the config instance actually becomes needed, like
600 - when prefetchers are constructed for the whole merge list."""
601 - __slots__ = ("_root", "_allocate", "_deallocate")
602 - def __init__(self, root, allocate, deallocate):
603 - self._root = root
604 - self._allocate = allocate
605 - self._deallocate = deallocate
606 - def allocate(self):
607 - return self._allocate(self._root)
608 - def deallocate(self, settings):
609 - self._deallocate(settings)
610 -
611 - class _unknown_internal_error(portage.exception.PortageException):
612 - """
613 - Used internally to terminate scheduling. The specific reason for
614 - the failure should have been dumped to stderr.
615 - """
616 - def __init__(self, value=""):
617 - portage.exception.PortageException.__init__(self, value)
618 -
619 - def __init__(self, settings, trees, mtimedb, myopts,
620 - spinner, mergelist=None, favorites=None, graph_config=None):
621 - PollScheduler.__init__(self)
622 -
623 - if mergelist is not None:
624 - warnings.warn("The mergelist parameter of the " + \
625 - "_emerge.Scheduler constructor is now unused. Use " + \
626 - "the graph_config parameter instead.",
627 - DeprecationWarning, stacklevel=2)
628 -
629 - self.settings = settings
630 - self.target_root = settings["ROOT"]
631 - self.trees = trees
632 - self.myopts = myopts
633 - self._spinner = spinner
634 - self._mtimedb = mtimedb
635 - self._favorites = favorites
636 - self._args_set = InternalPackageSet(favorites, allow_repo=True)
637 - self._build_opts = self._build_opts_class()
638 -
639 - for k in self._build_opts.__slots__:
640 - setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
641 - self._build_opts.buildpkg_exclude = InternalPackageSet( \
642 - initial_atoms=" ".join(myopts.get("--buildpkg-exclude", [])).split(), \
643 - allow_wildcard=True, allow_repo=True)
644 -
645 - self._binpkg_opts = self._binpkg_opts_class()
646 - for k in self._binpkg_opts.__slots__:
647 - setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
648 -
649 - self.curval = 0
650 - self._logger = self._emerge_log_class()
651 - self._task_queues = self._task_queues_class()
652 - for k in self._task_queues.allowed_keys:
653 - setattr(self._task_queues, k,
654 - SequentialTaskQueue())
655 -
656 - # Holds merges that will wait to be executed when no builds are
657 - # executing. This is useful for system packages since dependencies
658 - # on system packages are frequently unspecified. For example, see
659 - # bug #256616.
660 - self._merge_wait_queue = deque()
661 - # Holds merges that have been transfered from the merge_wait_queue to
662 - # the actual merge queue. They are removed from this list upon
663 - # completion. Other packages can start building only when this list is
664 - # empty.
665 - self._merge_wait_scheduled = []
666 -
667 - # Holds system packages and their deep runtime dependencies. Before
668 - # being merged, these packages go to merge_wait_queue, to be merged
669 - # when no other packages are building.
670 - self._deep_system_deps = set()
671 -
672 - # Holds packages to merge which will satisfy currently unsatisfied
673 - # deep runtime dependencies of system packages. If this is not empty
674 - # then no parallel builds will be spawned until it is empty. This
675 - # minimizes the possibility that a build will fail due to the system
676 - # being in a fragile state. For example, see bug #259954.
677 - self._unsatisfied_system_deps = set()
678 -
679 - self._status_display = JobStatusDisplay(
680 - xterm_titles=('notitles' not in settings.features))
681 - self._max_load = myopts.get("--load-average")
682 - max_jobs = myopts.get("--jobs")
683 - if max_jobs is None:
684 - max_jobs = 1
685 - self._set_max_jobs(max_jobs)
686 -
687 - # The root where the currently running
688 - # portage instance is installed.
689 - self._running_root = trees["/"]["root_config"]
690 - self.edebug = 0
691 - if settings.get("PORTAGE_DEBUG", "") == "1":
692 - self.edebug = 1
693 - self.pkgsettings = {}
694 - self._config_pool = {}
695 - for root in self.trees:
696 - self._config_pool[root] = []
697 -
698 - self._fetch_log = os.path.join(_emerge.emergelog._emerge_log_dir,
699 - 'emerge-fetch.log')
700 - fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
701 - schedule=self._schedule_fetch)
702 - self._sched_iface = self._iface_class(
703 - fetch=fetch_iface, output=self._task_output,
704 - register=self._register,
705 - schedule=self._schedule_wait,
706 - scheduleSetup=self._schedule_setup,
707 - scheduleUnpack=self._schedule_unpack,
708 - scheduleYield=self._schedule_yield,
709 - unregister=self._unregister)
710 -
711 - self._prefetchers = weakref.WeakValueDictionary()
712 - self._pkg_queue = []
713 - self._running_tasks = {}
714 - self._completed_tasks = set()
715 -
716 - self._failed_pkgs = []
717 - self._failed_pkgs_all = []
718 - self._failed_pkgs_die_msgs = []
719 - self._post_mod_echo_msgs = []
720 - self._parallel_fetch = False
721 - self._init_graph(graph_config)
722 - merge_count = len([x for x in self._mergelist \
723 - if isinstance(x, Package) and x.operation == "merge"])
724 - self._pkg_count = self._pkg_count_class(
725 - curval=0, maxval=merge_count)
726 - self._status_display.maxval = self._pkg_count.maxval
727 -
728 - # The load average takes some time to respond when new
729 - # jobs are added, so we need to limit the rate of adding
730 - # new jobs.
731 - self._job_delay_max = 10
732 - self._job_delay_factor = 1.0
733 - self._job_delay_exp = 1.5
734 - self._previous_job_start_time = None
735 -
736 - # This is used to memoize the _choose_pkg() result when
737 - # no packages can be chosen until one of the existing
738 - # jobs completes.
739 - self._choose_pkg_return_early = False
740 -
741 - features = self.settings.features
742 - if "parallel-fetch" in features and \
743 - not ("--pretend" in self.myopts or \
744 - "--fetch-all-uri" in self.myopts or \
745 - "--fetchonly" in self.myopts):
746 - if "distlocks" not in features:
747 - portage.writemsg(red("!!!")+"\n", noiselevel=-1)
748 - portage.writemsg(red("!!!")+" parallel-fetching " + \
749 - "requires the distlocks feature enabled"+"\n",
750 - noiselevel=-1)
751 - portage.writemsg(red("!!!")+" you have it disabled, " + \
752 - "thus parallel-fetching is being disabled"+"\n",
753 - noiselevel=-1)
754 - portage.writemsg(red("!!!")+"\n", noiselevel=-1)
755 - elif merge_count > 1:
756 - self._parallel_fetch = True
757 -
758 - if self._parallel_fetch:
759 - # clear out existing fetch log if it exists
760 - try:
761 - open(self._fetch_log, 'w').close()
762 - except EnvironmentError:
763 - pass
764 -
765 - self._running_portage = None
766 - portage_match = self._running_root.trees["vartree"].dbapi.match(
767 - portage.const.PORTAGE_PACKAGE_ATOM)
768 - if portage_match:
769 - cpv = portage_match.pop()
770 - self._running_portage = self._pkg(cpv, "installed",
771 - self._running_root, installed=True)
772 -
773 - def _terminate_tasks(self):
774 - self._status_display.quiet = True
775 - while self._running_tasks:
776 - task_id, task = self._running_tasks.popitem()
777 - task.cancel()
778 - for q in self._task_queues.values():
779 - q.clear()
780 -
781 - def _init_graph(self, graph_config):
782 - """
783 - Initialization structures used for dependency calculations
784 - involving currently installed packages.
785 - """
786 - self._set_graph_config(graph_config)
787 - self._blocker_db = {}
788 - for root in self.trees:
789 - if graph_config is None:
790 - fake_vartree = FakeVartree(self.trees[root]["root_config"],
791 - pkg_cache=self._pkg_cache)
792 - fake_vartree.sync()
793 - else:
794 - fake_vartree = graph_config.trees[root]['vartree']
795 - self._blocker_db[root] = BlockerDB(fake_vartree)
796 -
797 - def _destroy_graph(self):
798 - """
799 - Use this to free memory at the beginning of _calc_resume_list().
800 - After _calc_resume_list(), the _init_graph() method
801 - must to be called in order to re-generate the structures that
802 - this method destroys.
803 - """
804 - self._blocker_db = None
805 - self._set_graph_config(None)
806 - gc.collect()
807 -
808 - def _poll(self, timeout=None):
809 -
810 - self._schedule()
811 -
812 - if timeout is None:
813 - while True:
814 - if not self._poll_event_handlers:
815 - self._schedule()
816 - if not self._poll_event_handlers:
817 - raise StopIteration(
818 - "timeout is None and there are no poll() event handlers")
819 - previous_count = len(self._poll_event_queue)
820 - PollScheduler._poll(self, timeout=self._max_display_latency)
821 - self._status_display.display()
822 - if previous_count != len(self._poll_event_queue):
823 - break
824 -
825 - elif timeout <= self._max_display_latency:
826 - PollScheduler._poll(self, timeout=timeout)
827 - if timeout == 0:
828 - # The display is updated by _schedule() above, so it would be
829 - # redundant to update it here when timeout is 0.
830 - pass
831 - else:
832 - self._status_display.display()
833 -
834 - else:
835 - remaining_timeout = timeout
836 - start_time = time.time()
837 - while True:
838 - previous_count = len(self._poll_event_queue)
839 - PollScheduler._poll(self,
840 - timeout=min(self._max_display_latency, remaining_timeout))
841 - self._status_display.display()
842 - if previous_count != len(self._poll_event_queue):
843 - break
844 - elapsed_time = time.time() - start_time
845 - if elapsed_time < 0:
846 - # The system clock has changed such that start_time
847 - # is now in the future, so just assume that the
848 - # timeout has already elapsed.
849 - break
850 - remaining_timeout = timeout - 1000 * elapsed_time
851 - if remaining_timeout <= 0:
852 - break
853 -
854 - def _set_max_jobs(self, max_jobs):
855 - self._max_jobs = max_jobs
856 - self._task_queues.jobs.max_jobs = max_jobs
857 - if "parallel-install" in self.settings.features:
858 - self._task_queues.merge.max_jobs = max_jobs
859 -
860 - def _background_mode(self):
861 - """
862 - Check if background mode is enabled and adjust states as necessary.
863 -
864 - @rtype: bool
865 - @returns: True if background mode is enabled, False otherwise.
866 - """
867 - background = (self._max_jobs is True or \
868 - self._max_jobs > 1 or "--quiet" in self.myopts \
869 - or "--quiet-build" in self.myopts) and \
870 - not bool(self._opts_no_background.intersection(self.myopts))
871 -
872 - if background:
873 - interactive_tasks = self._get_interactive_tasks()
874 - if interactive_tasks:
875 - background = False
876 - writemsg_level(">>> Sending package output to stdio due " + \
877 - "to interactive package(s):\n",
878 - level=logging.INFO, noiselevel=-1)
879 - msg = [""]
880 - for pkg in interactive_tasks:
881 - pkg_str = " " + colorize("INFORM", str(pkg.cpv))
882 - if pkg.root != "/":
883 - pkg_str += " for " + pkg.root
884 - msg.append(pkg_str)
885 - msg.append("")
886 - writemsg_level("".join("%s\n" % (l,) for l in msg),
887 - level=logging.INFO, noiselevel=-1)
888 - if self._max_jobs is True or self._max_jobs > 1:
889 - self._set_max_jobs(1)
890 - writemsg_level(">>> Setting --jobs=1 due " + \
891 - "to the above interactive package(s)\n",
892 - level=logging.INFO, noiselevel=-1)
893 - writemsg_level(">>> In order to temporarily mask " + \
894 - "interactive updates, you may\n" + \
895 - ">>> specify --accept-properties=-interactive\n",
896 - level=logging.INFO, noiselevel=-1)
897 - self._status_display.quiet = \
898 - not background or \
899 - ("--quiet" in self.myopts and \
900 - "--verbose" not in self.myopts)
901 -
902 - self._logger.xterm_titles = \
903 - "notitles" not in self.settings.features and \
904 - self._status_display.quiet
905 -
906 - return background
907 -
908 - def _get_interactive_tasks(self):
909 - interactive_tasks = []
910 - for task in self._mergelist:
911 - if not (isinstance(task, Package) and \
912 - task.operation == "merge"):
913 - continue
914 - if 'interactive' in task.metadata.properties:
915 - interactive_tasks.append(task)
916 - return interactive_tasks
917 -
918 - def _set_graph_config(self, graph_config):
919 -
920 - if graph_config is None:
921 - self._graph_config = None
922 - self._pkg_cache = {}
923 - self._digraph = None
924 - self._mergelist = []
925 - self._deep_system_deps.clear()
926 - return
927 -
928 - self._graph_config = graph_config
929 - self._pkg_cache = graph_config.pkg_cache
930 - self._digraph = graph_config.graph
931 - self._mergelist = graph_config.mergelist
932 -
933 - if "--nodeps" in self.myopts or \
934 - (self._max_jobs is not True and self._max_jobs < 2):
935 - # save some memory
936 - self._digraph = None
937 - graph_config.graph = None
938 - graph_config.pkg_cache.clear()
939 - self._deep_system_deps.clear()
940 - for pkg in self._mergelist:
941 - self._pkg_cache[pkg] = pkg
942 - return
943 -
944 - self._find_system_deps()
945 - self._prune_digraph()
946 - self._prevent_builddir_collisions()
947 - if '--debug' in self.myopts:
948 - writemsg("\nscheduler digraph:\n\n", noiselevel=-1)
949 - self._digraph.debug_print()
950 - writemsg("\n", noiselevel=-1)
951 -
952 - def _find_system_deps(self):
953 - """
954 - Find system packages and their deep runtime dependencies. Before being
955 - merged, these packages go to merge_wait_queue, to be merged when no
956 - other packages are building.
957 - NOTE: This can only find deep system deps if the system set has been
958 - added to the graph and traversed deeply (the depgraph "complete"
959 - parameter will do this, triggered by emerge --complete-graph option).
960 - """
961 - deep_system_deps = self._deep_system_deps
962 - deep_system_deps.clear()
963 - deep_system_deps.update(
964 - _find_deep_system_runtime_deps(self._digraph))
965 - deep_system_deps.difference_update([pkg for pkg in \
966 - deep_system_deps if pkg.operation != "merge"])
967 -
968 - def _prune_digraph(self):
969 - """
970 - Prune any root nodes that are irrelevant.
971 - """
972 -
973 - graph = self._digraph
974 - completed_tasks = self._completed_tasks
975 - removed_nodes = set()
976 - while True:
977 - for node in graph.root_nodes():
978 - if not isinstance(node, Package) or \
979 - (node.installed and node.operation == "nomerge") or \
980 - node.onlydeps or \
981 - node in completed_tasks:
982 - removed_nodes.add(node)
983 - if removed_nodes:
984 - graph.difference_update(removed_nodes)
985 - if not removed_nodes:
986 - break
987 - removed_nodes.clear()
988 -
989 - def _prevent_builddir_collisions(self):
990 - """
991 - When building stages, sometimes the same exact cpv needs to be merged
992 - to both $ROOTs. Add edges to the digraph in order to avoid collisions
993 - in the builddir. Currently, normal file locks would be inappropriate
994 - for this purpose since emerge holds all of it's build dir locks from
995 - the main process.
996 - """
997 - cpv_map = {}
998 - for pkg in self._mergelist:
999 - if not isinstance(pkg, Package):
1000 - # a satisfied blocker
1001 - continue
1002 - if pkg.installed:
1003 - continue
1004 - if pkg.cpv not in cpv_map:
1005 - cpv_map[pkg.cpv] = [pkg]
1006 - continue
1007 - for earlier_pkg in cpv_map[pkg.cpv]:
1008 - self._digraph.add(earlier_pkg, pkg,
1009 - priority=DepPriority(buildtime=True))
1010 - cpv_map[pkg.cpv].append(pkg)
1011 -
1012 - class _pkg_failure(portage.exception.PortageException):
1013 - """
1014 - An instance of this class is raised by unmerge() when
1015 - an uninstallation fails.
1016 - """
1017 - status = 1
1018 - def __init__(self, *pargs):
1019 - portage.exception.PortageException.__init__(self, pargs)
1020 - if pargs:
1021 - self.status = pargs[0]
1022 -
1023 - def _schedule_fetch(self, fetcher):
1024 - """
1025 - Schedule a fetcher, in order to control the number of concurrent
1026 - fetchers. If self._max_jobs is greater than 1 then the fetch
1027 - queue is bypassed and the fetcher is started immediately,
1028 - otherwise it is added to the front of the parallel-fetch queue.
1029 - NOTE: The parallel-fetch queue is currently used to serialize
1030 - access to the parallel-fetch log, so changes in the log handling
1031 - would be required before it would be possible to enable
1032 - concurrent fetching within the parallel-fetch queue.
1033 - """
1034 - if self._max_jobs > 1:
1035 - fetcher.start()
1036 - else:
1037 - self._task_queues.fetch.addFront(fetcher)
1038 -
1039 - def _schedule_setup(self, setup_phase):
1040 - """
1041 - Schedule a setup phase on the merge queue, in order to
1042 - serialize unsandboxed access to the live filesystem.
1043 - """
1044 - if self._task_queues.merge.max_jobs > 1 and \
1045 - "ebuild-locks" in self.settings.features:
1046 - # Use a separate queue for ebuild-locks when the merge
1047 - # queue allows more than 1 job (due to parallel-install),
1048 - # since the portage.locks module does not behave as desired
1049 - # if we try to lock the same file multiple times
1050 - # concurrently from the same process.
1051 - self._task_queues.ebuild_locks.add(setup_phase)
1052 - else:
1053 - self._task_queues.merge.add(setup_phase)
1054 - self._schedule()
1055 -
1056 - def _schedule_unpack(self, unpack_phase):
1057 - """
1058 - Schedule an unpack phase on the unpack queue, in order
1059 - to serialize $DISTDIR access for live ebuilds.
1060 - """
1061 - self._task_queues.unpack.add(unpack_phase)
1062 -
1063 - def _find_blockers(self, new_pkg):
1064 - """
1065 - Returns a callable.
1066 - """
1067 - def get_blockers():
1068 - return self._find_blockers_impl(new_pkg)
1069 - return get_blockers
1070 -
1071 - def _find_blockers_impl(self, new_pkg):
1072 - if self._opts_ignore_blockers.intersection(self.myopts):
1073 - return None
1074 -
1075 - blocker_db = self._blocker_db[new_pkg.root]
1076 -
1077 - blocker_dblinks = []
1078 - for blocking_pkg in blocker_db.findInstalledBlockers(new_pkg):
1079 - if new_pkg.slot_atom == blocking_pkg.slot_atom:
1080 - continue
1081 - if new_pkg.cpv == blocking_pkg.cpv:
1082 - continue
1083 - blocker_dblinks.append(portage.dblink(
1084 - blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
1085 - self.pkgsettings[blocking_pkg.root], treetype="vartree",
1086 - vartree=self.trees[blocking_pkg.root]["vartree"]))
1087 -
1088 - return blocker_dblinks
1089 -
1090 - def _generate_digests(self):
1091 - """
1092 - Generate digests if necessary for --digests or FEATURES=digest.
1093 - In order to avoid interference, this must done before parallel
1094 - tasks are started.
1095 - """
1096 -
1097 - if '--fetchonly' in self.myopts:
1098 - return os.EX_OK
1099 -
1100 - digest = '--digest' in self.myopts
1101 - if not digest:
1102 - for pkgsettings in self.pkgsettings.values():
1103 - if pkgsettings.mycpv is not None:
1104 - # ensure that we are using global features
1105 - # settings rather than those from package.env
1106 - pkgsettings.reset()
1107 - if 'digest' in pkgsettings.features:
1108 - digest = True
1109 - break
1110 -
1111 - if not digest:
1112 - return os.EX_OK
1113 -
1114 - for x in self._mergelist:
1115 - if not isinstance(x, Package) or \
1116 - x.type_name != 'ebuild' or \
1117 - x.operation != 'merge':
1118 - continue
1119 - pkgsettings = self.pkgsettings[x.root]
1120 - if pkgsettings.mycpv is not None:
1121 - # ensure that we are using global features
1122 - # settings rather than those from package.env
1123 - pkgsettings.reset()
1124 - if '--digest' not in self.myopts and \
1125 - 'digest' not in pkgsettings.features:
1126 - continue
1127 - portdb = x.root_config.trees['porttree'].dbapi
1128 - ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
1129 - if ebuild_path is None:
1130 - raise AssertionError("ebuild not found for '%s'" % x.cpv)
1131 - pkgsettings['O'] = os.path.dirname(ebuild_path)
1132 - if not digestgen(mysettings=pkgsettings, myportdb=portdb):
1133 - writemsg_level(
1134 - "!!! Unable to generate manifest for '%s'.\n" \
1135 - % x.cpv, level=logging.ERROR, noiselevel=-1)
1136 - return 1
1137 -
1138 - return os.EX_OK
1139 -
1140 - def _env_sanity_check(self):
1141 - """
1142 - Verify a sane environment before trying to build anything from source.
1143 - """
1144 - have_src_pkg = False
1145 - for x in self._mergelist:
1146 - if isinstance(x, Package) and not x.built:
1147 - have_src_pkg = True
1148 - break
1149 -
1150 - if not have_src_pkg:
1151 - return os.EX_OK
1152 -
1153 - for settings in self.pkgsettings.values():
1154 - for var in ("ARCH", ):
1155 - value = settings.get(var)
1156 - if value and value.strip():
1157 - continue
1158 - msg = _("%(var)s is not set... "
1159 - "Are you missing the '%(configroot)setc/make.profile' symlink? "
1160 - "Is the symlink correct? "
1161 - "Is your portage tree complete?") % \
1162 - {"var": var, "configroot": settings["PORTAGE_CONFIGROOT"]}
1163 -
1164 - out = portage.output.EOutput()
1165 - for line in textwrap.wrap(msg, 70):
1166 - out.eerror(line)
1167 - return 1
1168 -
1169 - return os.EX_OK
1170 -
1171 - def _check_manifests(self):
1172 - # Verify all the manifests now so that the user is notified of failure
1173 - # as soon as possible.
1174 - if "strict" not in self.settings.features or \
1175 - "--fetchonly" in self.myopts or \
1176 - "--fetch-all-uri" in self.myopts:
1177 - return os.EX_OK
1178 -
1179 - shown_verifying_msg = False
1180 - quiet_settings = {}
1181 - for myroot, pkgsettings in self.pkgsettings.items():
1182 - quiet_config = portage.config(clone=pkgsettings)
1183 - quiet_config["PORTAGE_QUIET"] = "1"
1184 - quiet_config.backup_changes("PORTAGE_QUIET")
1185 - quiet_settings[myroot] = quiet_config
1186 - del quiet_config
1187 -
1188 - failures = 0
1189 -
1190 - for x in self._mergelist:
1191 - if not isinstance(x, Package) or \
1192 - x.type_name != "ebuild":
1193 - continue
1194 -
1195 - if x.operation == "uninstall":
1196 - continue
1197 -
1198 - if not shown_verifying_msg:
1199 - shown_verifying_msg = True
1200 - self._status_msg("Verifying ebuild manifests")
1201 -
1202 - root_config = x.root_config
1203 - portdb = root_config.trees["porttree"].dbapi
1204 - quiet_config = quiet_settings[root_config.root]
1205 - ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
1206 - if ebuild_path is None:
1207 - raise AssertionError("ebuild not found for '%s'" % x.cpv)
1208 - quiet_config["O"] = os.path.dirname(ebuild_path)
1209 - if not digestcheck([], quiet_config, strict=True):
1210 - failures |= 1
1211 -
1212 - if failures:
1213 - return 1
1214 - return os.EX_OK
1215 -
1216 - def _add_prefetchers(self):
1217 -
1218 - if not self._parallel_fetch:
1219 - return
1220 -
1221 - if self._parallel_fetch:
1222 - self._status_msg("Starting parallel fetch")
1223 -
1224 - prefetchers = self._prefetchers
1225 - getbinpkg = "--getbinpkg" in self.myopts
1226 -
1227 - for pkg in self._mergelist:
1228 - # mergelist can contain solved Blocker instances
1229 - if not isinstance(pkg, Package) or pkg.operation == "uninstall":
1230 - continue
1231 - prefetcher = self._create_prefetcher(pkg)
1232 - if prefetcher is not None:
1233 - self._task_queues.fetch.add(prefetcher)
1234 - prefetchers[pkg] = prefetcher
1235 -
1236 - # Start the first prefetcher immediately so that self._task()
1237 - # won't discard it. This avoids a case where the first
1238 - # prefetcher is discarded, causing the second prefetcher to
1239 - # occupy the fetch queue before the first fetcher has an
1240 - # opportunity to execute.
1241 - self._task_queues.fetch.schedule()
1242 -
1243 - def _create_prefetcher(self, pkg):
1244 - """
1245 - @return: a prefetcher, or None if not applicable
1246 - """
1247 - prefetcher = None
1248 -
1249 - if not isinstance(pkg, Package):
1250 - pass
1251 -
1252 - elif pkg.type_name == "ebuild":
1253 -
1254 - prefetcher = EbuildFetcher(background=True,
1255 - config_pool=self._ConfigPool(pkg.root,
1256 - self._allocate_config, self._deallocate_config),
1257 - fetchonly=1, logfile=self._fetch_log,
1258 - pkg=pkg, prefetch=True, scheduler=self._sched_iface)
1259 -
1260 - elif pkg.type_name == "binary" and \
1261 - "--getbinpkg" in self.myopts and \
1262 - pkg.root_config.trees["bintree"].isremote(pkg.cpv):
1263 -
1264 - prefetcher = BinpkgPrefetcher(background=True,
1265 - pkg=pkg, scheduler=self._sched_iface)
1266 -
1267 - return prefetcher
1268 -
1269 - def _is_restart_scheduled(self):
1270 - """
1271 - Check if the merge list contains a replacement
1272 - for the current running instance, that will result
1273 - in restart after merge.
1274 - @rtype: bool
1275 - @returns: True if a restart is scheduled, False otherwise.
1276 - """
1277 - if self._opts_no_restart.intersection(self.myopts):
1278 - return False
1279 -
1280 - mergelist = self._mergelist
1281 -
1282 - for i, pkg in enumerate(mergelist):
1283 - if self._is_restart_necessary(pkg) and \
1284 - i != len(mergelist) - 1:
1285 - return True
1286 -
1287 - return False
1288 -
1289 - def _is_restart_necessary(self, pkg):
1290 - """
1291 - @return: True if merging the given package
1292 - requires restart, False otherwise.
1293 - """
1294 -
1295 - # Figure out if we need a restart.
1296 - if pkg.root == self._running_root.root and \
1297 - portage.match_from_list(
1298 - portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
1299 - if self._running_portage is None:
1300 - return True
1301 - elif pkg.cpv != self._running_portage.cpv or \
1302 - '9999' in pkg.cpv or \
1303 - 'git' in pkg.inherited or \
1304 - 'git-2' in pkg.inherited:
1305 - return True
1306 - return False
1307 -
1308 - def _restart_if_necessary(self, pkg):
1309 - """
1310 - Use execv() to restart emerge. This happens
1311 - if portage upgrades itself and there are
1312 - remaining packages in the list.
1313 - """
1314 -
1315 - if self._opts_no_restart.intersection(self.myopts):
1316 - return
1317 -
1318 - if not self._is_restart_necessary(pkg):
1319 - return
1320 -
1321 - if pkg == self._mergelist[-1]:
1322 - return
1323 -
1324 - self._main_loop_cleanup()
1325 -
1326 - logger = self._logger
1327 - pkg_count = self._pkg_count
1328 - mtimedb = self._mtimedb
1329 - bad_resume_opts = self._bad_resume_opts
1330 -
1331 - logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
1332 - (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
1333 -
1334 - logger.log(" *** RESTARTING " + \
1335 - "emerge via exec() after change of " + \
1336 - "portage version.")
1337 -
1338 - mtimedb["resume"]["mergelist"].remove(list(pkg))
1339 - mtimedb.commit()
1340 - portage.run_exitfuncs()
1341 - # Don't trust sys.argv[0] here because eselect-python may modify it.
1342 - emerge_binary = os.path.join(portage.const.PORTAGE_BIN_PATH, 'emerge')
1343 - mynewargv = [emerge_binary, "--resume"]
1344 - resume_opts = self.myopts.copy()
1345 - # For automatic resume, we need to prevent
1346 - # any of bad_resume_opts from leaking in
1347 - # via EMERGE_DEFAULT_OPTS.
1348 - resume_opts["--ignore-default-opts"] = True
1349 - for myopt, myarg in resume_opts.items():
1350 - if myopt not in bad_resume_opts:
1351 - if myarg is True:
1352 - mynewargv.append(myopt)
1353 - elif isinstance(myarg, list):
1354 - # arguments like --exclude that use 'append' action
1355 - for x in myarg:
1356 - mynewargv.append("%s=%s" % (myopt, x))
1357 - else:
1358 - mynewargv.append("%s=%s" % (myopt, myarg))
1359 - # priority only needs to be adjusted on the first run
1360 - os.environ["PORTAGE_NICENESS"] = "0"
1361 - os.execv(mynewargv[0], mynewargv)
1362 -
1363 - def _run_pkg_pretend(self):
1364 - """
1365 - Since pkg_pretend output may be important, this method sends all
1366 - output directly to stdout (regardless of options like --quiet or
1367 - --jobs).
1368 - """
1369 -
1370 - failures = 0
1371 -
1372 - # Use a local PollScheduler instance here, since we don't
1373 - # want tasks here to trigger the usual Scheduler callbacks
1374 - # that handle job scheduling and status display.
1375 - sched_iface = PollScheduler().sched_iface
1376 -
1377 - for x in self._mergelist:
1378 - if not isinstance(x, Package):
1379 - continue
1380 -
1381 - if x.operation == "uninstall":
1382 - continue
1383 -
1384 - if x.metadata["EAPI"] in ("0", "1", "2", "3"):
1385 - continue
1386 -
1387 - if "pretend" not in x.metadata.defined_phases:
1388 - continue
1389 -
1390 - out_str =">>> Running pre-merge checks for " + colorize("INFORM", x.cpv) + "\n"
1391 - portage.util.writemsg_stdout(out_str, noiselevel=-1)
1392 -
1393 - root_config = x.root_config
1394 - settings = self.pkgsettings[root_config.root]
1395 - settings.setcpv(x)
1396 - tmpdir = tempfile.mkdtemp()
1397 - tmpdir_orig = settings["PORTAGE_TMPDIR"]
1398 - settings["PORTAGE_TMPDIR"] = tmpdir
1399 -
1400 - try:
1401 - if x.built:
1402 - tree = "bintree"
1403 - bintree = root_config.trees["bintree"].dbapi.bintree
1404 - fetched = False
1405 -
1406 - # Display fetch on stdout, so that it's always clear what
1407 - # is consuming time here.
1408 - if bintree.isremote(x.cpv):
1409 - fetcher = BinpkgFetcher(pkg=x,
1410 - scheduler=sched_iface)
1411 - fetcher.start()
1412 - if fetcher.wait() != os.EX_OK:
1413 - failures += 1
1414 - continue
1415 - fetched = fetcher.pkg_path
1416 -
1417 - verifier = BinpkgVerifier(pkg=x,
1418 - scheduler=sched_iface)
1419 - verifier.start()
1420 - if verifier.wait() != os.EX_OK:
1421 - failures += 1
1422 - continue
1423 -
1424 - if fetched:
1425 - bintree.inject(x.cpv, filename=fetched)
1426 - tbz2_file = bintree.getname(x.cpv)
1427 - infloc = os.path.join(tmpdir, x.category, x.pf, "build-info")
1428 - os.makedirs(infloc)
1429 - portage.xpak.tbz2(tbz2_file).unpackinfo(infloc)
1430 - ebuild_path = os.path.join(infloc, x.pf + ".ebuild")
1431 - settings.configdict["pkg"]["EMERGE_FROM"] = "binary"
1432 - settings.configdict["pkg"]["MERGE_TYPE"] = "binary"
1433 -
1434 - else:
1435 - tree = "porttree"
1436 - portdb = root_config.trees["porttree"].dbapi
1437 - ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
1438 - if ebuild_path is None:
1439 - raise AssertionError("ebuild not found for '%s'" % x.cpv)
1440 - settings.configdict["pkg"]["EMERGE_FROM"] = "ebuild"
1441 - if self._build_opts.buildpkgonly:
1442 - settings.configdict["pkg"]["MERGE_TYPE"] = "buildonly"
1443 - else:
1444 - settings.configdict["pkg"]["MERGE_TYPE"] = "source"
1445 -
1446 - portage.package.ebuild.doebuild.doebuild_environment(ebuild_path,
1447 - "pretend", settings=settings,
1448 - db=self.trees[settings["ROOT"]][tree].dbapi)
1449 - prepare_build_dirs(root_config.root, settings, cleanup=0)
1450 -
1451 - vardb = root_config.trees['vartree'].dbapi
1452 - settings["REPLACING_VERSIONS"] = " ".join(
1453 - set(portage.versions.cpv_getversion(match) \
1454 - for match in vardb.match(x.slot_atom) + \
1455 - vardb.match('='+x.cpv)))
1456 - pretend_phase = EbuildPhase(
1457 - phase="pretend", scheduler=sched_iface,
1458 - settings=settings)
1459 -
1460 - pretend_phase.start()
1461 - ret = pretend_phase.wait()
1462 - if ret != os.EX_OK:
1463 - failures += 1
1464 - portage.elog.elog_process(x.cpv, settings)
1465 - finally:
1466 - shutil.rmtree(tmpdir)
1467 - settings["PORTAGE_TMPDIR"] = tmpdir_orig
1468 -
1469 - if failures:
1470 - return 1
1471 - return os.EX_OK
1472 -
1473 - def merge(self):
1474 - if "--resume" in self.myopts:
1475 - # We're resuming.
1476 - portage.writemsg_stdout(
1477 - colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
1478 - self._logger.log(" *** Resuming merge...")
1479 -
1480 - self._save_resume_list()
1481 -
1482 - try:
1483 - self._background = self._background_mode()
1484 - except self._unknown_internal_error:
1485 - return 1
1486 -
1487 - for root in self.trees:
1488 - root_config = self.trees[root]["root_config"]
1489 -
1490 - # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
1491 - # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
1492 - # for ensuring sane $PWD (bug #239560) and storing elog messages.
1493 - tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
1494 - if not tmpdir or not os.path.isdir(tmpdir):
1495 - msg = "The directory specified in your " + \
1496 - "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
1497 - "does not exist. Please create this " + \
1498 - "directory or correct your PORTAGE_TMPDIR setting."
1499 - msg = textwrap.wrap(msg, 70)
1500 - out = portage.output.EOutput()
1501 - for l in msg:
1502 - out.eerror(l)
1503 - return 1
1504 -
1505 - if self._background:
1506 - root_config.settings.unlock()
1507 - root_config.settings["PORTAGE_BACKGROUND"] = "1"
1508 - root_config.settings.backup_changes("PORTAGE_BACKGROUND")
1509 - root_config.settings.lock()
1510 -
1511 - self.pkgsettings[root] = portage.config(
1512 - clone=root_config.settings)
1513 -
1514 - keep_going = "--keep-going" in self.myopts
1515 - fetchonly = self._build_opts.fetchonly
1516 - mtimedb = self._mtimedb
1517 - failed_pkgs = self._failed_pkgs
1518 -
1519 - rval = self._generate_digests()
1520 - if rval != os.EX_OK:
1521 - return rval
1522 -
1523 - rval = self._env_sanity_check()
1524 - if rval != os.EX_OK:
1525 - return rval
1526 -
1527 - # TODO: Immediately recalculate deps here if --keep-going
1528 - # is enabled and corrupt manifests are detected.
1529 - rval = self._check_manifests()
1530 - if rval != os.EX_OK and not keep_going:
1531 - return rval
1532 -
1533 - if not fetchonly:
1534 - rval = self._run_pkg_pretend()
1535 - if rval != os.EX_OK:
1536 - return rval
1537 -
1538 - while True:
1539 -
1540 - received_signal = []
1541 -
1542 - def sighandler(signum, frame):
1543 - signal.signal(signal.SIGINT, signal.SIG_IGN)
1544 - signal.signal(signal.SIGTERM, signal.SIG_IGN)
1545 - portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % \
1546 - {"signal":signum})
1547 - self.terminate()
1548 - received_signal.append(128 + signum)
1549 -
1550 - earlier_sigint_handler = signal.signal(signal.SIGINT, sighandler)
1551 - earlier_sigterm_handler = signal.signal(signal.SIGTERM, sighandler)
1552 -
1553 - try:
1554 - rval = self._merge()
1555 - finally:
1556 - # Restore previous handlers
1557 - if earlier_sigint_handler is not None:
1558 - signal.signal(signal.SIGINT, earlier_sigint_handler)
1559 - else:
1560 - signal.signal(signal.SIGINT, signal.SIG_DFL)
1561 - if earlier_sigterm_handler is not None:
1562 - signal.signal(signal.SIGTERM, earlier_sigterm_handler)
1563 - else:
1564 - signal.signal(signal.SIGTERM, signal.SIG_DFL)
1565 -
1566 - if received_signal:
1567 - sys.exit(received_signal[0])
1568 -
1569 - if rval == os.EX_OK or fetchonly or not keep_going:
1570 - break
1571 - if "resume" not in mtimedb:
1572 - break
1573 - mergelist = self._mtimedb["resume"].get("mergelist")
1574 - if not mergelist:
1575 - break
1576 -
1577 - if not failed_pkgs:
1578 - break
1579 -
1580 - for failed_pkg in failed_pkgs:
1581 - mergelist.remove(list(failed_pkg.pkg))
1582 -
1583 - self._failed_pkgs_all.extend(failed_pkgs)
1584 - del failed_pkgs[:]
1585 -
1586 - if not mergelist:
1587 - break
1588 -
1589 - if not self._calc_resume_list():
1590 - break
1591 -
1592 - clear_caches(self.trees)
1593 - if not self._mergelist:
1594 - break
1595 -
1596 - self._save_resume_list()
1597 - self._pkg_count.curval = 0
1598 - self._pkg_count.maxval = len([x for x in self._mergelist \
1599 - if isinstance(x, Package) and x.operation == "merge"])
1600 - self._status_display.maxval = self._pkg_count.maxval
1601 -
1602 - self._logger.log(" *** Finished. Cleaning up...")
1603 -
1604 - if failed_pkgs:
1605 - self._failed_pkgs_all.extend(failed_pkgs)
1606 - del failed_pkgs[:]
1607 -
1608 - printer = portage.output.EOutput()
1609 - background = self._background
1610 - failure_log_shown = False
1611 - if background and len(self._failed_pkgs_all) == 1:
1612 - # If only one package failed then just show it's
1613 - # whole log for easy viewing.
1614 - failed_pkg = self._failed_pkgs_all[-1]
1615 - build_dir = failed_pkg.build_dir
1616 - log_file = None
1617 - log_file_real = None
1618 -
1619 - log_paths = [failed_pkg.build_log]
1620 -
1621 - log_path = self._locate_failure_log(failed_pkg)
1622 - if log_path is not None:
1623 - try:
1624 - log_file = open(_unicode_encode(log_path,
1625 - encoding=_encodings['fs'], errors='strict'), mode='rb')
1626 - except IOError:
1627 - pass
1628 - else:
1629 - if log_path.endswith('.gz'):
1630 - log_file_real = log_file
1631 - log_file = gzip.GzipFile(filename='',
1632 - mode='rb', fileobj=log_file)
1633 -
1634 - if log_file is not None:
1635 - try:
1636 - for line in log_file:
1637 - writemsg_level(line, noiselevel=-1)
1638 - except zlib.error as e:
1639 - writemsg_level("%s\n" % (e,), level=logging.ERROR,
1640 - noiselevel=-1)
1641 - finally:
1642 - log_file.close()
1643 - if log_file_real is not None:
1644 - log_file_real.close()
1645 - failure_log_shown = True
1646 -
1647 - # Dump mod_echo output now since it tends to flood the terminal.
1648 - # This allows us to avoid having more important output, generated
1649 - # later, from being swept away by the mod_echo output.
1650 - mod_echo_output = _flush_elog_mod_echo()
1651 -
1652 - if background and not failure_log_shown and \
1653 - self._failed_pkgs_all and \
1654 - self._failed_pkgs_die_msgs and \
1655 - not mod_echo_output:
1656 -
1657 - for mysettings, key, logentries in self._failed_pkgs_die_msgs:
1658 - root_msg = ""
1659 - if mysettings["ROOT"] != "/":
1660 - root_msg = " merged to %s" % mysettings["ROOT"]
1661 - print()
1662 - printer.einfo("Error messages for package %s%s:" % \
1663 - (colorize("INFORM", key), root_msg))
1664 - print()
1665 - for phase in portage.const.EBUILD_PHASES:
1666 - if phase not in logentries:
1667 - continue
1668 - for msgtype, msgcontent in logentries[phase]:
1669 - if isinstance(msgcontent, basestring):
1670 - msgcontent = [msgcontent]
1671 - for line in msgcontent:
1672 - printer.eerror(line.strip("\n"))
1673 -
1674 - if self._post_mod_echo_msgs:
1675 - for msg in self._post_mod_echo_msgs:
1676 - msg()
1677 -
1678 - if len(self._failed_pkgs_all) > 1 or \
1679 - (self._failed_pkgs_all and keep_going):
1680 - if len(self._failed_pkgs_all) > 1:
1681 - msg = "The following %d packages have " % \
1682 - len(self._failed_pkgs_all) + \
1683 - "failed to build or install:"
1684 - else:
1685 - msg = "The following package has " + \
1686 - "failed to build or install:"
1687 -
1688 - printer.eerror("")
1689 - for line in textwrap.wrap(msg, 72):
1690 - printer.eerror(line)
1691 - printer.eerror("")
1692 - for failed_pkg in self._failed_pkgs_all:
1693 - # Use _unicode_decode() to force unicode format string so
1694 - # that Package.__unicode__() is called in python2.
1695 - msg = _unicode_decode(" %s") % (failed_pkg.pkg,)
1696 - log_path = self._locate_failure_log(failed_pkg)
1697 - if log_path is not None:
1698 - msg += ", Log file:"
1699 - printer.eerror(msg)
1700 - if log_path is not None:
1701 - printer.eerror(" '%s'" % colorize('INFORM', log_path))
1702 - printer.eerror("")
1703 -
1704 - if self._failed_pkgs_all:
1705 - return 1
1706 - return os.EX_OK
1707 -
1708 - def _elog_listener(self, mysettings, key, logentries, fulltext):
1709 - errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
1710 - if errors:
1711 - self._failed_pkgs_die_msgs.append(
1712 - (mysettings, key, errors))
1713 -
1714 - def _locate_failure_log(self, failed_pkg):
1715 -
1716 - build_dir = failed_pkg.build_dir
1717 - log_file = None
1718 -
1719 - log_paths = [failed_pkg.build_log]
1720 -
1721 - for log_path in log_paths:
1722 - if not log_path:
1723 - continue
1724 -
1725 - try:
1726 - log_size = os.stat(log_path).st_size
1727 - except OSError:
1728 - continue
1729 -
1730 - if log_size == 0:
1731 - continue
1732 -
1733 - return log_path
1734 -
1735 - return None
1736 -
1737 - def _add_packages(self):
1738 - pkg_queue = self._pkg_queue
1739 - for pkg in self._mergelist:
1740 - if isinstance(pkg, Package):
1741 - pkg_queue.append(pkg)
1742 - elif isinstance(pkg, Blocker):
1743 - pass
1744 -
1745 - def _system_merge_started(self, merge):
1746 - """
1747 - Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
1748 - In general, this keeps track of installed system packages with
1749 - unsatisfied RDEPEND or PDEPEND (circular dependencies). It can be
1750 - a fragile situation, so we don't execute any unrelated builds until
1751 - the circular dependencies are built and installed.
1752 - """
1753 - graph = self._digraph
1754 - if graph is None:
1755 - return
1756 - pkg = merge.merge.pkg
1757 -
1758 - # Skip this if $ROOT != / since it shouldn't matter if there
1759 - # are unsatisfied system runtime deps in this case.
1760 - if pkg.root != '/':
1761 - return
1762 -
1763 - completed_tasks = self._completed_tasks
1764 - unsatisfied = self._unsatisfied_system_deps
1765 -
1766 - def ignore_non_runtime_or_satisfied(priority):
1767 - """
1768 - Ignore non-runtime and satisfied runtime priorities.
1769 - """
1770 - if isinstance(priority, DepPriority) and \
1771 - not priority.satisfied and \
1772 - (priority.runtime or priority.runtime_post):
1773 - return False
1774 - return True
1775 -
1776 - # When checking for unsatisfied runtime deps, only check
1777 - # direct deps since indirect deps are checked when the
1778 - # corresponding parent is merged.
1779 - for child in graph.child_nodes(pkg,
1780 - ignore_priority=ignore_non_runtime_or_satisfied):
1781 - if not isinstance(child, Package) or \
1782 - child.operation == 'uninstall':
1783 - continue
1784 - if child is pkg:
1785 - continue
1786 - if child.operation == 'merge' and \
1787 - child not in completed_tasks:
1788 - unsatisfied.add(child)
1789 -
1790 - def _merge_wait_exit_handler(self, task):
1791 - self._merge_wait_scheduled.remove(task)
1792 - self._merge_exit(task)
1793 -
1794 - def _merge_exit(self, merge):
1795 - self._running_tasks.pop(id(merge), None)
1796 - self._do_merge_exit(merge)
1797 - self._deallocate_config(merge.merge.settings)
1798 - if merge.returncode == os.EX_OK and \
1799 - not merge.merge.pkg.installed:
1800 - self._status_display.curval += 1
1801 - self._status_display.merges = len(self._task_queues.merge)
1802 - self._schedule()
1803 -
1804 - def _do_merge_exit(self, merge):
1805 - pkg = merge.merge.pkg
1806 - settings = merge.merge.settings
1807 - trees = self.trees
1808 - init_buildlog = gobs_buildlog()
1809 - if merge.returncode != os.EX_OK:
1810 - build_dir = settings.get("PORTAGE_BUILDDIR")
1811 - build_log = settings.get("PORTAGE_LOG_FILE")
1812 -
1813 - self._failed_pkgs.append(self._failed_pkg(
1814 - build_dir=build_dir, build_log=build_log,
1815 - pkg=pkg,
1816 - returncode=merge.returncode))
1817 - if not self._terminated_tasks:
1818 - self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
1819 - self._status_display.failed = len(self._failed_pkgs)
1820 - init_buildlog.add_buildlog_main(settings, pkg, trees)
1821 - return
1822 -
1823 - self._task_complete(pkg)
1824 - pkg_to_replace = merge.merge.pkg_to_replace
1825 - if pkg_to_replace is not None:
1826 - # When a package is replaced, mark it's uninstall
1827 - # task complete (if any).
1828 - if self._digraph is not None and \
1829 - pkg_to_replace in self._digraph:
1830 - try:
1831 - self._pkg_queue.remove(pkg_to_replace)
1832 - except ValueError:
1833 - pass
1834 - self._task_complete(pkg_to_replace)
1835 - else:
1836 - self._pkg_cache.pop(pkg_to_replace, None)
1837 -
1838 - if pkg.installed:
1839 - init_buildlog.add_buildlog_main(settings, pkg, trees)
1840 - return
1841 -
1842 - self._restart_if_necessary(pkg)
1843 -
1844 - # Call mtimedb.commit() after each merge so that
1845 - # --resume still works after being interrupted
1846 - # by reboot, sigkill or similar.
1847 - mtimedb = self._mtimedb
1848 - mtimedb["resume"]["mergelist"].remove(list(pkg))
1849 - if not mtimedb["resume"]["mergelist"]:
1850 - del mtimedb["resume"]
1851 - mtimedb.commit()
1852 - init_buildlog.add_buildlog_main(settings, pkg, trees)
1853 -
1854 - def _build_exit(self, build):
1855 - self._running_tasks.pop(id(build), None)
1856 - if build.returncode == os.EX_OK and self._terminated_tasks:
1857 - # We've been interrupted, so we won't
1858 - # add this to the merge queue.
1859 - self.curval += 1
1860 - self._deallocate_config(build.settings)
1861 - elif build.returncode == os.EX_OK:
1862 - self.curval += 1
1863 - merge = PackageMerge(merge=build)
1864 - self._running_tasks[id(merge)] = merge
1865 - if not build.build_opts.buildpkgonly and \
1866 - build.pkg in self._deep_system_deps:
1867 - # Since dependencies on system packages are frequently
1868 - # unspecified, merge them only when no builds are executing.
1869 - self._merge_wait_queue.append(merge)
1870 - merge.addStartListener(self._system_merge_started)
1871 - else:
1872 - merge.addExitListener(self._merge_exit)
1873 - self._task_queues.merge.add(merge)
1874 - self._status_display.merges = len(self._task_queues.merge)
1875 - else:
1876 - settings = build.settings
1877 - trees = self.trees
1878 - pkg=build.pkg
1879 - init_buildlog = gobs_buildlog()
1880 - build_dir = settings.get("PORTAGE_BUILDDIR")
1881 - build_log = settings.get("PORTAGE_LOG_FILE")
1882 -
1883 - self._failed_pkgs.append(self._failed_pkg(
1884 - build_dir=build_dir, build_log=build_log,
1885 - pkg=pkg, returncode=build.returncode))
1886 - if not self._terminated_tasks:
1887 - self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
1888 - self._status_display.failed = len(self._failed_pkgs)
1889 - self._deallocate_config(build.settings)
1890 - init_buildlog.add_buildlog_main(settings, pkg, trees)
1891 - self._jobs -= 1
1892 - self._status_display.running = self._jobs
1893 - self._schedule()
1894 -
1895 - def _extract_exit(self, build):
1896 - self._build_exit(build)
1897 -
1898 - def _task_complete(self, pkg):
1899 - self._completed_tasks.add(pkg)
1900 - self._unsatisfied_system_deps.discard(pkg)
1901 - self._choose_pkg_return_early = False
1902 - blocker_db = self._blocker_db[pkg.root]
1903 - blocker_db.discardBlocker(pkg)
1904 -
1905 - def _merge(self):
1906 -
1907 - self._add_prefetchers()
1908 - self._add_packages()
1909 - pkg_queue = self._pkg_queue
1910 - failed_pkgs = self._failed_pkgs
1911 - portage.locks._quiet = self._background
1912 - portage.elog.add_listener(self._elog_listener)
1913 - rval = os.EX_OK
1914 -
1915 - try:
1916 - self._main_loop()
1917 - finally:
1918 - self._main_loop_cleanup()
1919 - portage.locks._quiet = False
1920 - portage.elog.remove_listener(self._elog_listener)
1921 - if failed_pkgs:
1922 - rval = failed_pkgs[-1].returncode
1923 -
1924 - return rval
1925 -
1926 - def _main_loop_cleanup(self):
1927 - del self._pkg_queue[:]
1928 - self._completed_tasks.clear()
1929 - self._deep_system_deps.clear()
1930 - self._unsatisfied_system_deps.clear()
1931 - self._choose_pkg_return_early = False
1932 - self._status_display.reset()
1933 - self._digraph = None
1934 - self._task_queues.fetch.clear()
1935 - self._prefetchers.clear()
1936 -
1937 - def _choose_pkg(self):
1938 - """
1939 - Choose a task that has all its dependencies satisfied. This is used
1940 - for parallel build scheduling, and ensures that we don't build
1941 - anything with deep dependencies that have yet to be merged.
1942 - """
1943 -
1944 - if self._choose_pkg_return_early:
1945 - return None
1946 -
1947 - if self._digraph is None:
1948 - if self._is_work_scheduled() and \
1949 - not ("--nodeps" in self.myopts and \
1950 - (self._max_jobs is True or self._max_jobs > 1)):
1951 - self._choose_pkg_return_early = True
1952 - return None
1953 - return self._pkg_queue.pop(0)
1954 -
1955 - if not self._is_work_scheduled():
1956 - return self._pkg_queue.pop(0)
1957 -
1958 - self._prune_digraph()
1959 -
1960 - chosen_pkg = None
1961 -
1962 - # Prefer uninstall operations when available.
1963 - graph = self._digraph
1964 - for pkg in self._pkg_queue:
1965 - if pkg.operation == 'uninstall' and \
1966 - not graph.child_nodes(pkg):
1967 - chosen_pkg = pkg
1968 - break
1969 -
1970 - if chosen_pkg is None:
1971 - later = set(self._pkg_queue)
1972 - for pkg in self._pkg_queue:
1973 - later.remove(pkg)
1974 - if not self._dependent_on_scheduled_merges(pkg, later):
1975 - chosen_pkg = pkg
1976 - break
1977 -
1978 - if chosen_pkg is not None:
1979 - self._pkg_queue.remove(chosen_pkg)
1980 -
1981 - if chosen_pkg is None:
1982 - # There's no point in searching for a package to
1983 - # choose until at least one of the existing jobs
1984 - # completes.
1985 - self._choose_pkg_return_early = True
1986 -
1987 - return chosen_pkg
1988 -
1989 - def _dependent_on_scheduled_merges(self, pkg, later):
1990 - """
1991 - Traverse the subgraph of the given packages deep dependencies
1992 - to see if it contains any scheduled merges.
1993 - @param pkg: a package to check dependencies for
1994 - @type pkg: Package
1995 - @param later: packages for which dependence should be ignored
1996 - since they will be merged later than pkg anyway and therefore
1997 - delaying the merge of pkg will not result in a more optimal
1998 - merge order
1999 - @type later: set
2000 - @rtype: bool
2001 - @returns: True if the package is dependent, False otherwise.
2002 - """
2003 -
2004 - graph = self._digraph
2005 - completed_tasks = self._completed_tasks
2006 -
2007 - dependent = False
2008 - traversed_nodes = set([pkg])
2009 - direct_deps = graph.child_nodes(pkg)
2010 - node_stack = direct_deps
2011 - direct_deps = frozenset(direct_deps)
2012 - while node_stack:
2013 - node = node_stack.pop()
2014 - if node in traversed_nodes:
2015 - continue
2016 - traversed_nodes.add(node)
2017 - if not ((node.installed and node.operation == "nomerge") or \
2018 - (node.operation == "uninstall" and \
2019 - node not in direct_deps) or \
2020 - node in completed_tasks or \
2021 - node in later):
2022 - dependent = True
2023 - break
2024 -
2025 - # Don't traverse children of uninstall nodes since
2026 - # those aren't dependencies in the usual sense.
2027 - if node.operation != "uninstall":
2028 - node_stack.extend(graph.child_nodes(node))
2029 -
2030 - return dependent
2031 -
2032 - def _allocate_config(self, root):
2033 - """
2034 - Allocate a unique config instance for a task in order
2035 - to prevent interference between parallel tasks.
2036 - """
2037 - if self._config_pool[root]:
2038 - temp_settings = self._config_pool[root].pop()
2039 - else:
2040 - temp_settings = portage.config(clone=self.pkgsettings[root])
2041 - # Since config.setcpv() isn't guaranteed to call config.reset() due to
2042 - # performance reasons, call it here to make sure all settings from the
2043 - # previous package get flushed out (such as PORTAGE_LOG_FILE).
2044 - temp_settings.reload()
2045 - temp_settings.reset()
2046 - return temp_settings
2047 -
2048 - def _deallocate_config(self, settings):
2049 - self._config_pool[settings["ROOT"]].append(settings)
2050 -
2051 - def _main_loop(self):
2052 -
2053 - # Only allow 1 job max if a restart is scheduled
2054 - # due to portage update.
2055 - if self._is_restart_scheduled() or \
2056 - self._opts_no_background.intersection(self.myopts):
2057 - self._set_max_jobs(1)
2058 -
2059 - while self._schedule():
2060 - self._poll_loop()
2061 -
2062 - while True:
2063 - self._schedule()
2064 - if not self._is_work_scheduled():
2065 - break
2066 - self._poll_loop()
2067 -
2068 - def _keep_scheduling(self):
2069 - return bool(not self._terminated_tasks and self._pkg_queue and \
2070 - not (self._failed_pkgs and not self._build_opts.fetchonly))
2071 -
2072 - def _is_work_scheduled(self):
2073 - return bool(self._running_tasks)
2074 -
2075 - def _schedule_tasks(self):
2076 -
2077 - while True:
2078 -
2079 - # When the number of jobs and merges drops to zero,
2080 - # process a single merge from _merge_wait_queue if
2081 - # it's not empty. We only process one since these are
2082 - # special packages and we want to ensure that
2083 - # parallel-install does not cause more than one of
2084 - # them to install at the same time.
2085 - if (self._merge_wait_queue and not self._jobs and
2086 - not self._task_queues.merge):
2087 - task = self._merge_wait_queue.popleft()
2088 - task.addExitListener(self._merge_wait_exit_handler)
2089 - self._task_queues.merge.add(task)
2090 - self._status_display.merges = len(self._task_queues.merge)
2091 - self._merge_wait_scheduled.append(task)
2092 -
2093 - self._schedule_tasks_imp()
2094 - self._status_display.display()
2095 -
2096 - state_change = 0
2097 - for q in self._task_queues.values():
2098 - if q.schedule():
2099 - state_change += 1
2100 -
2101 - # Cancel prefetchers if they're the only reason
2102 - # the main poll loop is still running.
2103 - if self._failed_pkgs and not self._build_opts.fetchonly and \
2104 - not self._is_work_scheduled() and \
2105 - self._task_queues.fetch:
2106 - self._task_queues.fetch.clear()
2107 - state_change += 1
2108 -
2109 - if not (state_change or \
2110 - (self._merge_wait_queue and not self._jobs and
2111 - not self._task_queues.merge)):
2112 - break
2113 -
2114 - return self._keep_scheduling()
2115 -
2116 - def _job_delay(self):
2117 - """
2118 - @rtype: bool
2119 - @returns: True if job scheduling should be delayed, False otherwise.
2120 - """
2121 -
2122 - if self._jobs and self._max_load is not None:
2123 -
2124 - current_time = time.time()
2125 -
2126 - delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
2127 - if delay > self._job_delay_max:
2128 - delay = self._job_delay_max
2129 - if (current_time - self._previous_job_start_time) < delay:
2130 - return True
2131 -
2132 - return False
2133 -
2134 - def _schedule_tasks_imp(self):
2135 - """
2136 - @rtype: bool
2137 - @returns: True if state changed, False otherwise.
2138 - """
2139 -
2140 - state_change = 0
2141 -
2142 - while True:
2143 -
2144 - if not self._keep_scheduling():
2145 - return bool(state_change)
2146 -
2147 - if self._choose_pkg_return_early or \
2148 - self._merge_wait_scheduled or \
2149 - (self._jobs and self._unsatisfied_system_deps) or \
2150 - not self._can_add_job() or \
2151 - self._job_delay():
2152 - return bool(state_change)
2153 -
2154 - pkg = self._choose_pkg()
2155 - if pkg is None:
2156 - return bool(state_change)
2157 -
2158 - state_change += 1
2159 -
2160 - if not pkg.installed:
2161 - self._pkg_count.curval += 1
2162 -
2163 - task = self._task(pkg)
2164 -
2165 - if pkg.installed:
2166 - merge = PackageMerge(merge=task)
2167 - self._running_tasks[id(merge)] = merge
2168 - merge.addExitListener(self._merge_exit)
2169 - self._task_queues.merge.addFront(merge)
2170 -
2171 - elif pkg.built:
2172 - self._jobs += 1
2173 - self._previous_job_start_time = time.time()
2174 - self._status_display.running = self._jobs
2175 - self._running_tasks[id(task)] = task
2176 - task.addExitListener(self._extract_exit)
2177 - self._task_queues.jobs.add(task)
2178 -
2179 - else:
2180 - self._jobs += 1
2181 - self._previous_job_start_time = time.time()
2182 - self._status_display.running = self._jobs
2183 - self._running_tasks[id(task)] = task
2184 - task.addExitListener(self._build_exit)
2185 - self._task_queues.jobs.add(task)
2186 -
2187 - return bool(state_change)
2188 -
2189 - def _task(self, pkg):
2190 -
2191 - pkg_to_replace = None
2192 - if pkg.operation != "uninstall":
2193 - vardb = pkg.root_config.trees["vartree"].dbapi
2194 - previous_cpv = [x for x in vardb.match(pkg.slot_atom) \
2195 - if portage.cpv_getkey(x) == pkg.cp]
2196 - if not previous_cpv and vardb.cpv_exists(pkg.cpv):
2197 - # same cpv, different SLOT
2198 - previous_cpv = [pkg.cpv]
2199 - if previous_cpv:
2200 - previous_cpv = previous_cpv.pop()
2201 - pkg_to_replace = self._pkg(previous_cpv,
2202 - "installed", pkg.root_config, installed=True,
2203 - operation="uninstall")
2204 -
2205 - prefetcher = self._prefetchers.pop(pkg, None)
2206 - if prefetcher is not None and not prefetcher.isAlive():
2207 - try:
2208 - self._task_queues.fetch._task_queue.remove(prefetcher)
2209 - except ValueError:
2210 - pass
2211 - prefetcher = None
2212 -
2213 - task = MergeListItem(args_set=self._args_set,
2214 - background=self._background, binpkg_opts=self._binpkg_opts,
2215 - build_opts=self._build_opts,
2216 - config_pool=self._ConfigPool(pkg.root,
2217 - self._allocate_config, self._deallocate_config),
2218 - emerge_opts=self.myopts,
2219 - find_blockers=self._find_blockers(pkg), logger=self._logger,
2220 - mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
2221 - pkg_to_replace=pkg_to_replace,
2222 - prefetcher=prefetcher,
2223 - scheduler=self._sched_iface,
2224 - settings=self._allocate_config(pkg.root),
2225 - statusMessage=self._status_msg,
2226 - world_atom=self._world_atom)
2227 -
2228 - return task
2229 -
2230 - def _failed_pkg_msg(self, failed_pkg, action, preposition):
2231 - pkg = failed_pkg.pkg
2232 - msg = "%s to %s %s" % \
2233 - (bad("Failed"), action, colorize("INFORM", pkg.cpv))
2234 - if pkg.root != "/":
2235 - msg += " %s %s" % (preposition, pkg.root)
2236 -
2237 - log_path = self._locate_failure_log(failed_pkg)
2238 - if log_path is not None:
2239 - msg += ", Log file:"
2240 - self._status_msg(msg)
2241 -
2242 - if log_path is not None:
2243 - self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
2244 -
2245 - def _status_msg(self, msg):
2246 - """
2247 - Display a brief status message (no newlines) in the status display.
2248 - This is called by tasks to provide feedback to the user. This
2249 - delegates the resposibility of generating \r and \n control characters,
2250 - to guarantee that lines are created or erased when necessary and
2251 - appropriate.
2252 -
2253 - @type msg: str
2254 - @param msg: a brief status message (no newlines allowed)
2255 - """
2256 - if not self._background:
2257 - writemsg_level("\n")
2258 - self._status_display.displayMessage(msg)
2259 -
2260 - def _save_resume_list(self):
2261 - """
2262 - Do this before verifying the ebuild Manifests since it might
2263 - be possible for the user to use --resume --skipfirst get past
2264 - a non-essential package with a broken digest.
2265 - """
2266 - mtimedb = self._mtimedb
2267 -
2268 - mtimedb["resume"] = {}
2269 - # Stored as a dict starting with portage-2.1.6_rc1, and supported
2270 - # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
2271 - # a list type for options.
2272 - mtimedb["resume"]["myopts"] = self.myopts.copy()
2273 -
2274 - # Convert Atom instances to plain str.
2275 - mtimedb["resume"]["favorites"] = [str(x) for x in self._favorites]
2276 - mtimedb["resume"]["mergelist"] = [list(x) \
2277 - for x in self._mergelist \
2278 - if isinstance(x, Package) and x.operation == "merge"]
2279 -
2280 - mtimedb.commit()
2281 -
2282 - def _calc_resume_list(self):
2283 - """
2284 - Use the current resume list to calculate a new one,
2285 - dropping any packages with unsatisfied deps.
2286 - @rtype: bool
2287 - @returns: True if successful, False otherwise.
2288 - """
2289 - print(colorize("GOOD", "*** Resuming merge..."))
2290 -
2291 - # free some memory before creating
2292 - # the resume depgraph
2293 - self._destroy_graph()
2294 -
2295 - myparams = create_depgraph_params(self.myopts, None)
2296 - success = False
2297 - e = None
2298 - try:
2299 - success, mydepgraph, dropped_tasks = resume_depgraph(
2300 - self.settings, self.trees, self._mtimedb, self.myopts,
2301 - myparams, self._spinner)
2302 - except depgraph.UnsatisfiedResumeDep as exc:
2303 - # rename variable to avoid python-3.0 error:
2304 - # SyntaxError: can not delete variable 'e' referenced in nested
2305 - # scope
2306 - e = exc
2307 - mydepgraph = e.depgraph
2308 - dropped_tasks = set()
2309 -
2310 - if e is not None:
2311 - def unsatisfied_resume_dep_msg():
2312 - mydepgraph.display_problems()
2313 - out = portage.output.EOutput()
2314 - out.eerror("One or more packages are either masked or " + \
2315 - "have missing dependencies:")
2316 - out.eerror("")
2317 - indent = " "
2318 - show_parents = set()
2319 - for dep in e.value:
2320 - if dep.parent in show_parents:
2321 - continue
2322 - show_parents.add(dep.parent)
2323 - if dep.atom is None:
2324 - out.eerror(indent + "Masked package:")
2325 - out.eerror(2 * indent + str(dep.parent))
2326 - out.eerror("")
2327 - else:
2328 - out.eerror(indent + str(dep.atom) + " pulled in by:")
2329 - out.eerror(2 * indent + str(dep.parent))
2330 - out.eerror("")
2331 - msg = "The resume list contains packages " + \
2332 - "that are either masked or have " + \
2333 - "unsatisfied dependencies. " + \
2334 - "Please restart/continue " + \
2335 - "the operation manually, or use --skipfirst " + \
2336 - "to skip the first package in the list and " + \
2337 - "any other packages that may be " + \
2338 - "masked or have missing dependencies."
2339 - for line in textwrap.wrap(msg, 72):
2340 - out.eerror(line)
2341 - self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
2342 - return False
2343 -
2344 - if success and self._show_list():
2345 - mylist = mydepgraph.altlist()
2346 - if mylist:
2347 - if "--tree" in self.myopts:
2348 - mylist.reverse()
2349 - mydepgraph.display(mylist, favorites=self._favorites)
2350 -
2351 - if not success:
2352 - self._post_mod_echo_msgs.append(mydepgraph.display_problems)
2353 - return False
2354 - mydepgraph.display_problems()
2355 - self._init_graph(mydepgraph.schedulerGraph())
2356 -
2357 - msg_width = 75
2358 - for task in dropped_tasks:
2359 - if not (isinstance(task, Package) and task.operation == "merge"):
2360 - continue
2361 - pkg = task
2362 - msg = "emerge --keep-going:" + \
2363 - " %s" % (pkg.cpv,)
2364 - if pkg.root != "/":
2365 - msg += " for %s" % (pkg.root,)
2366 - msg += " dropped due to unsatisfied dependency."
2367 - for line in textwrap.wrap(msg, msg_width):
2368 - eerror(line, phase="other", key=pkg.cpv)
2369 - settings = self.pkgsettings[pkg.root]
2370 - # Ensure that log collection from $T is disabled inside
2371 - # elog_process(), since any logs that might exist are
2372 - # not valid here.
2373 - settings.pop("T", None)
2374 - portage.elog.elog_process(pkg.cpv, settings)
2375 - self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
2376 -
2377 - return True
2378 -
2379 - def _show_list(self):
2380 - myopts = self.myopts
2381 - if "--quiet" not in myopts and \
2382 - ("--ask" in myopts or "--tree" in myopts or \
2383 - "--verbose" in myopts):
2384 - return True
2385 - return False
2386 -
2387 - def _world_atom(self, pkg):
2388 - """
2389 - Add or remove the package to the world file, but only if
2390 - it's supposed to be added or removed. Otherwise, do nothing.
2391 - """
2392 -
2393 - if set(("--buildpkgonly", "--fetchonly",
2394 - "--fetch-all-uri",
2395 - "--oneshot", "--onlydeps",
2396 - "--pretend")).intersection(self.myopts):
2397 - return
2398 -
2399 - if pkg.root != self.target_root:
2400 - return
2401 -
2402 - args_set = self._args_set
2403 - if not args_set.findAtomForPackage(pkg):
2404 - return
2405 -
2406 - logger = self._logger
2407 - pkg_count = self._pkg_count
2408 - root_config = pkg.root_config
2409 - world_set = root_config.sets["selected"]
2410 - world_locked = False
2411 - if hasattr(world_set, "lock"):
2412 - world_set.lock()
2413 - world_locked = True
2414 -
2415 - try:
2416 - if hasattr(world_set, "load"):
2417 - world_set.load() # maybe it's changed on disk
2418 -
2419 - if pkg.operation == "uninstall":
2420 - if hasattr(world_set, "cleanPackage"):
2421 - world_set.cleanPackage(pkg.root_config.trees["vartree"].dbapi,
2422 - pkg.cpv)
2423 - if hasattr(world_set, "remove"):
2424 - for s in pkg.root_config.setconfig.active:
2425 - world_set.remove(SETPREFIX+s)
2426 - else:
2427 - atom = create_world_atom(pkg, args_set, root_config)
2428 - if atom:
2429 - if hasattr(world_set, "add"):
2430 - self._status_msg(('Recording %s in "world" ' + \
2431 - 'favorites file...') % atom)
2432 - logger.log(" === (%s of %s) Updating world file (%s)" % \
2433 - (pkg_count.curval, pkg_count.maxval, pkg.cpv))
2434 - world_set.add(atom)
2435 - else:
2436 - writemsg_level('\n!!! Unable to record %s in "world"\n' % \
2437 - (atom,), level=logging.WARN, noiselevel=-1)
2438 - finally:
2439 - if world_locked:
2440 - world_set.unlock()
2441 -
2442 - def _pkg(self, cpv, type_name, root_config, installed=False,
2443 - operation=None, myrepo=None):
2444 - """
2445 - Get a package instance from the cache, or create a new
2446 - one if necessary. Raises KeyError from aux_get if it
2447 - failures for some reason (package does not exist or is
2448 - corrupt).
2449 - """
2450 -
2451 - # Reuse existing instance when available.
2452 - pkg = self._pkg_cache.get(Package._gen_hash_key(cpv=cpv,
2453 - type_name=type_name, repo_name=myrepo, root_config=root_config,
2454 - installed=installed, operation=operation))
2455 -
2456 - if pkg is not None:
2457 - return pkg
2458 -
2459 - tree_type = depgraph.pkg_tree_map[type_name]
2460 - db = root_config.trees[tree_type].dbapi
2461 - db_keys = list(self.trees[root_config.root][
2462 - tree_type].dbapi._aux_cache_keys)
2463 - metadata = zip(db_keys, db.aux_get(cpv, db_keys, myrepo=myrepo))
2464 - pkg = Package(built=(type_name != "ebuild"),
2465 - cpv=cpv, installed=installed, metadata=metadata,
2466 - root_config=root_config, type_name=type_name)
2467 - self._pkg_cache[pkg] = pkg
2468 - return pkg
2469
2470 diff --git a/gobs/pym/arch.py~ b/gobs/pym/arch.py~
2471 deleted file mode 100644
2472 index ebd0017..0000000
2473 --- a/gobs/pym/arch.py~
2474 +++ /dev/null
2475 @@ -1,25 +0,0 @@
2476 -import portage
2477 -from gobs.readconf import get_conf_settings
2478 -reader=get_conf_settings()
2479 -gobs_settings_dict=reader.read_gobs_settings_all()
2480 -# make a CM
2481 -from gobs.ConnectionManager import connectionManager
2482 -CM=connectionManager(gobs_settings_dict)
2483 -#selectively import the pgsql/mysql querys
2484 -if CM.getName()=='pgsql':
2485 - from gobs.pgsql import *
2486 -
2487 -class gobs_arch(object):
2488 -
2489 - def update_arch_db(self):
2490 - conn = CM.getConnection()
2491 - # FIXME: check for new keyword
2492 - # Add arch db (keywords)
2493 - if get_arch_db(conn) is None:
2494 - arch_list = portage.archlist
2495 - for arch in arch_list:
2496 - if arch[0] not in ["~","-"]:
2497 - arch_list.append("-" + arch)
2498 - arch_list.append("-*")
2499 - add_new_arch_db(conn,arch_list)
2500 - CM.putConnection(conn)
2501 \ No newline at end of file
2502
2503 diff --git a/gobs/pym/build_log.py~ b/gobs/pym/build_log.py~
2504 deleted file mode 100644
2505 index 80a186a..0000000
2506 --- a/gobs/pym/build_log.py~
2507 +++ /dev/null
2508 @@ -1,572 +0,0 @@
2509 -from __future__ import print_function
2510 -import re
2511 -import os
2512 -import platform
2513 -try:
2514 - from subprocess import getstatusoutput as subprocess_getstatusoutput
2515 -except ImportError:
2516 - from commands import getstatusoutput as subprocess_getstatusoutput
2517 -from gobs.text import get_log_text_list
2518 -from _emerge.main import parse_opts, load_emerge_config, \
2519 - getportageversion
2520 -from portage.util import writemsg, \
2521 - writemsg_level, writemsg_stdout
2522 -from _emerge.actions import _info_pkgs_ver
2523 -from portage.exception import InvalidAtom
2524 -from portage.dep import Atom
2525 -from portage.dbapi._expand_new_virt import expand_new_virt
2526 -from portage.const import GLOBAL_CONFIG_PATH, NEWS_LIB_PATH
2527 -from portage.const import _ENABLE_DYN_LINK_MAP, _ENABLE_SET_CONFIG
2528 -from portage.versions import catpkgsplit, cpv_getversion
2529 -from portage import _encodings
2530 -from portage import _unicode_encode
2531 -from gobs.repoman_gobs import gobs_repoman
2532 -import portage
2533 -from gobs.package import gobs_package
2534 -from gobs.readconf import get_conf_settings
2535 -from gobs.flags import gobs_use_flags
2536 -reader=get_conf_settings()
2537 -gobs_settings_dict=reader.read_gobs_settings_all()
2538 -# make a CM
2539 -from gobs.ConnectionManager import connectionManager
2540 -CM=connectionManager(gobs_settings_dict)
2541 -#selectively import the pgsql/mysql querys
2542 -if CM.getName()=='pgsql':
2543 - from gobs.pgsql import *
2544 -
2545 -class gobs_buildlog(object):
2546 -
2547 - def __init__(self):
2548 - self._config_profile = gobs_settings_dict['gobs_config']
2549 -
2550 - def get_build_dict_db(self, settings, pkg):
2551 - conn=CM.getConnection()
2552 - myportdb = portage.portdbapi(mysettings=settings)
2553 - cpvr_list = catpkgsplit(pkg.cpv, silent=1)
2554 - categories = cpvr_list[0]
2555 - package = cpvr_list[1]
2556 - ebuild_version = cpv_getversion(pkg.cpv)
2557 - print('cpv: ' + pkg.cpv)
2558 - init_package = gobs_package(settings, myportdb)
2559 - package_id = have_package_db(conn, categories, package)
2560 - # print("package_id %s" % package_id, file=sys.stdout)
2561 - build_dict = {}
2562 - mybuild_dict = {}
2563 - build_dict['ebuild_version'] = ebuild_version
2564 - build_dict['package_id'] = package_id
2565 - build_dict['cpv'] = pkg.cpv
2566 - build_dict['categories'] = categories
2567 - build_dict['package'] = package
2568 - build_dict['config_profile'] = self._config_profile
2569 - init_useflags = gobs_use_flags(settings, myportdb, pkg.cpv)
2570 - iuse_flags_list, final_use_list = init_useflags.get_flags_pkg(pkg, settings)
2571 - #print 'final_use_list', final_use_list
2572 - if final_use_list != []:
2573 - build_dict['build_useflags'] = sorted(final_use_list)
2574 - else:
2575 - build_dict['build_useflags'] = None
2576 - #print "build_dict['build_useflags']", build_dict['build_useflags']
2577 - pkgdir = os.path.join(settings['PORTDIR'], categories + "/" + package)
2578 - ebuild_version_checksum_tree = portage.checksum.sha256hash(pkgdir+ "/" + package + "-" + ebuild_version + ".ebuild")[0]
2579 - build_dict['checksum'] = ebuild_version_checksum_tree
2580 - ebuild_id = get_ebuild_id_db_checksum(conn, build_dict)
2581 - if ebuild_id is None:
2582 - #print 'have any ebuild', get_ebuild_checksum(conn, package_id, ebuild_version)
2583 - init_package.update_ebuild_db(build_dict)
2584 - ebuild_id = get_ebuild_id_db_checksum(conn, build_dict)
2585 - build_dict['ebuild_id'] = ebuild_id
2586 - queue_id = check_revision(conn, build_dict)
2587 - if queue_id is None:
2588 - build_dict['queue_id'] = None
2589 - else:
2590 - build_dict['queue_id'] = queue_id
2591 - CM.putConnection(conn)
2592 - return build_dict
2593 -
2594 - def add_new_ebuild_buildlog(self, settings, pkg, build_dict, build_error, summary_error, build_log_dict):
2595 - conn=CM.getConnection()
2596 - portdb = portage.portdbapi(mysettings=settings)
2597 - init_useflags = gobs_use_flags(settings, portdb, build_dict['cpv'])
2598 - iuse_flags_list, final_use_list = init_useflags.get_flags_pkg(pkg, settings)
2599 - iuse = []
2600 - use_flags_list = []
2601 - use_enable_list = []
2602 - for iuse_line in iuse_flags_list:
2603 - iuse.append(init_useflags.reduce_flag(iuse_line))
2604 - iuse_flags_list2 = list(set(iuse))
2605 - use_enable = final_use_list
2606 - use_disable = list(set(iuse_flags_list2).difference(set(use_enable)))
2607 - use_flagsDict = {}
2608 - for x in use_enable:
2609 - use_flagsDict[x] = True
2610 - for x in use_disable:
2611 - use_flagsDict[x] = False
2612 - for u, s in use_flagsDict.iteritems():
2613 - use_flags_list.append(u)
2614 - use_enable_list.append(s)
2615 - build_id = add_new_buildlog(conn, build_dict, use_flags_list, use_enable_list, build_error, summary_error, build_log_dict)
2616 - CM.putConnection(conn)
2617 - return build_id
2618 -
2619 - def search_info(self, textline, error_log_list):
2620 - if re.search(" * Package:", textline):
2621 - error_log_list.append(textline)
2622 - if re.search(" * Repository:", textline):
2623 - error_log_list.append(textline)
2624 - if re.search(" * Maintainer:", textline):
2625 - error_log_list.append(textline)
2626 - if re.search(" * USE:", textline):
2627 - error_log_list.append(textline)
2628 - if re.search(" * FEATURES:", textline):
2629 - error_log_list.append(textline)
2630 - return error_log_list
2631 -
2632 - def search_error(self, logfile_text, textline, error_log_list, sum_build_log_list, i):
2633 - if re.search("Error 1", textline):
2634 - x = i - 20
2635 - endline = True
2636 - error_log_list.append(".....\n")
2637 - while x != i + 3 and endline:
2638 - try:
2639 - error_log_list.append(logfile_text[x])
2640 - except:
2641 - endline = False
2642 - else:
2643 - x = x +1
2644 - if re.search(" * ERROR:", textline):
2645 - x = i
2646 - endline= True
2647 - field = textline.split(" ")
2648 - sum_build_log_list.append("fail")
2649 - error_log_list.append(".....\n")
2650 - while x != i + 10 and endline:
2651 - try:
2652 - error_log_list.append(logfile_text[x])
2653 - except:
2654 - endline = False
2655 - else:
2656 - x = x +1
2657 - if re.search("configure: error:", textline):
2658 - x = i - 4
2659 - endline = True
2660 - error_log_list.append(".....\n")
2661 - while x != i + 3 and endline:
2662 - try:
2663 - error_log_list.append(logfile_text[x])
2664 - except:
2665 - endline = False
2666 - else:
2667 - x = x +1
2668 - return error_log_list, sum_build_log_list
2669 -
2670 - def search_qa(self, logfile_text, textline, qa_error_list, error_log_list,i):
2671 - if re.search(" * QA Notice:", textline):
2672 - x = i
2673 - qa_error_list.append(logfile_text[x])
2674 - endline= True
2675 - error_log_list.append(".....\n")
2676 - while x != i + 3 and endline:
2677 - try:
2678 - error_log_list.append(logfile_text[x])
2679 - except:
2680 - endline = False
2681 - else:
2682 - x = x +1
2683 - return qa_error_list, error_log_list
2684 -
2685 - def get_buildlog_info(self, settings, build_dict):
2686 - myportdb = portage.portdbapi(mysettings=settings)
2687 - init_repoman = gobs_repoman(settings, myportdb)
2688 - logfile_text = get_log_text_list(settings.get("PORTAGE_LOG_FILE"))
2689 - # FIXME to support more errors and stuff
2690 - i = 0
2691 - build_log_dict = {}
2692 - error_log_list = []
2693 - qa_error_list = []
2694 - repoman_error_list = []
2695 - sum_build_log_list = []
2696 - for textline in logfile_text:
2697 - error_log_list = self.search_info(textline, error_log_list)
2698 - error_log_list, sum_build_log_list = self.search_error(logfile_text, textline, error_log_list, sum_build_log_list, i)
2699 - qa_error_list, error_log_list = self.search_qa(logfile_text, textline, qa_error_list, error_log_list, i)
2700 - i = i +1
2701 - # Run repoman check_repoman()
2702 - repoman_error_list = init_repoman.check_repoman(build_dict['categories'], build_dict['package'], build_dict['ebuild_version'], build_dict['config_profile'])
2703 - if repoman_error_list != []:
2704 - sum_build_log_list.append("repoman")
2705 - if qa_error_list != []:
2706 - sum_build_log_list.append("qa")
2707 - build_log_dict['repoman_error_list'] = repoman_error_list
2708 - build_log_dict['qa_error_list'] = qa_error_list
2709 - build_log_dict['error_log_list'] = error_log_list
2710 - build_log_dict['summary_error_list'] = sum_build_log_list
2711 - return build_log_dict
2712 -
2713 - # Copy of the portage action_info but fixed so it post info to a list.
2714 - def action_info(self, settings, trees):
2715 - argscmd = []
2716 - myaction, myopts, myfiles = parse_opts(argscmd, silent=True)
2717 - msg = []
2718 - root = '/'
2719 - root_config = root
2720 - # root_config = trees[settings['ROOT']]['root_config']
2721 - msg.append(getportageversion(settings["PORTDIR"], settings["ROOT"],
2722 - settings.profile_path, settings["CHOST"],
2723 - trees[settings["ROOT"]]["vartree"].dbapi) + "\n")
2724 -
2725 - header_width = 65
2726 - header_title = "System Settings"
2727 - if myfiles:
2728 - msg.append(header_width * "=" + "\n")
2729 - msg.append(header_title.rjust(int(header_width/2 + len(header_title)/2)) + "\n")
2730 - msg.append(header_width * "=" + "\n")
2731 - msg.append("System uname: "+platform.platform(aliased=1) + "\n")
2732 -
2733 - lastSync = portage.grabfile(os.path.join(
2734 - settings["PORTDIR"], "metadata", "timestamp.chk"))
2735 - if lastSync:
2736 - msg.append("Timestamp of tree:" + lastSync[0] + "\n")
2737 - else:
2738 - msg.append("Timestamp of tree: Unknown" + "\n")
2739 -
2740 - output=subprocess_getstatusoutput("distcc --version")
2741 - if not output[0]:
2742 - msg.append(str(output[1].split("\n",1)[0]))
2743 - if "distcc" in settings.features:
2744 - msg.append("[enabled]")
2745 - else:
2746 - msg.append("[disabled]")
2747 -
2748 - output=subprocess_getstatusoutput("ccache -V")
2749 - if not output[0]:
2750 - msg.append(str(output[1].split("\n",1)[0]), end=' ')
2751 - if "ccache" in settings.features:
2752 - msg.append("[enabled]")
2753 - else:
2754 - msg.append("[disabled]")
2755 -
2756 - myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
2757 - "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
2758 - myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
2759 - atoms = []
2760 - vardb = trees["/"]["vartree"].dbapi
2761 - for x in myvars:
2762 - try:
2763 - x = Atom(x)
2764 - except InvalidAtom:
2765 - writemsg_stdout("%-20s %s\n" % (x+":", "[NOT VALID]"),
2766 - noiselevel=-1)
2767 - else:
2768 - for atom in expand_new_virt(vardb, x):
2769 - if not atom.blocker:
2770 - atoms.append((x, atom))
2771 -
2772 - myvars = sorted(set(atoms))
2773 -
2774 - portdb = trees["/"]["porttree"].dbapi
2775 - main_repo = portdb.getRepositoryName(portdb.porttree_root)
2776 - cp_map = {}
2777 - cp_max_len = 0
2778 -
2779 - for orig_atom, x in myvars:
2780 - pkg_matches = vardb.match(x)
2781 -
2782 - versions = []
2783 - for cpv in pkg_matches:
2784 - matched_cp = portage.versions.cpv_getkey(cpv)
2785 - ver = portage.versions.cpv_getversion(cpv)
2786 - ver_map = cp_map.setdefault(matched_cp, {})
2787 - prev_match = ver_map.get(ver)
2788 - if prev_match is not None:
2789 - if prev_match.provide_suffix:
2790 - # prefer duplicate matches that include
2791 - # additional virtual provider info
2792 - continue
2793 -
2794 - if len(matched_cp) > cp_max_len:
2795 - cp_max_len = len(matched_cp)
2796 - repo = vardb.aux_get(cpv, ["repository"])[0]
2797 - if repo == main_repo:
2798 - repo_suffix = ""
2799 - elif not repo:
2800 - repo_suffix = "::<unknown repository>"
2801 - else:
2802 - repo_suffix = "::" + repo
2803 -
2804 - if matched_cp == orig_atom.cp:
2805 - provide_suffix = ""
2806 - else:
2807 - provide_suffix = " (%s)" % (orig_atom,)
2808 -
2809 - ver_map[ver] = _info_pkgs_ver(ver, repo_suffix, provide_suffix)
2810 -
2811 - for cp in sorted(cp_map):
2812 - versions = sorted(cp_map[cp].values())
2813 - versions = ", ".join(ver.toString() for ver in versions)
2814 - msg_extra = "%s %s\n" % \
2815 - ((cp + ":").ljust(cp_max_len + 1), versions)
2816 - msg.append(msg_extra)
2817 -
2818 - libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
2819 -
2820 - repos = portdb.settings.repositories
2821 - msg_extra = "Repositories: %s\n" % \
2822 - " ".join(repo.name for repo in repos)
2823 - msg.append(msg_extra)
2824 -
2825 - if _ENABLE_SET_CONFIG:
2826 - sets_line = "Installed sets: "
2827 - sets_line += ", ".join(s for s in \
2828 - sorted(root_config.sets['selected'].getNonAtoms()) \
2829 - if s.startswith(SETPREFIX))
2830 - sets_line += "\n"
2831 - msg.append(sets_line)
2832 -
2833 - myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
2834 - 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
2835 - 'PORTDIR_OVERLAY', 'PORTAGE_BUNZIP2_COMMAND',
2836 - 'PORTAGE_BZIP2_COMMAND',
2837 - 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
2838 - 'ACCEPT_KEYWORDS', 'ACCEPT_LICENSE', 'SYNC', 'FEATURES',
2839 - 'EMERGE_DEFAULT_OPTS']
2840 - myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
2841 -
2842 - myvars_ignore_defaults = {
2843 - 'PORTAGE_BZIP2_COMMAND' : 'bzip2',
2844 - }
2845 -
2846 - myvars = portage.util.unique_array(myvars)
2847 - use_expand = settings.get('USE_EXPAND', '').split()
2848 - use_expand.sort()
2849 - use_expand_hidden = set(
2850 - settings.get('USE_EXPAND_HIDDEN', '').upper().split())
2851 - alphabetical_use = '--alphabetical' in myopts
2852 - unset_vars = []
2853 - myvars.sort()
2854 - for x in myvars:
2855 - if x in settings:
2856 - if x != "USE":
2857 - default = myvars_ignore_defaults.get(x)
2858 - if default is not None and \
2859 - default == settings[x]:
2860 - continue
2861 - msg_extra = '%s="%s"\n' % (x, settings[x])
2862 - msg.append(msg_extra)
2863 - else:
2864 - use = set(settings["USE"].split())
2865 - for varname in use_expand:
2866 - flag_prefix = varname.lower() + "_"
2867 - for f in list(use):
2868 - if f.startswith(flag_prefix):
2869 - use.remove(f)
2870 - use = list(use)
2871 - use.sort()
2872 - msg_extra = 'USE=%s' % " ".join(use)
2873 - msg.append(msg_extra + "\n")
2874 - for varname in use_expand:
2875 - myval = settings.get(varname)
2876 - if myval:
2877 - msg.append(varname + '=' + myval + "\n")
2878 - else:
2879 - unset_vars.append(x)
2880 - if unset_vars:
2881 - msg_extra = "Unset: "+", ".join(unset_vars)
2882 - msg.append(msg_extra + "\n")
2883 -
2884 - # See if we can find any packages installed matching the strings
2885 - # passed on the command line
2886 - mypkgs = []
2887 - vardb = trees[settings["ROOT"]]["vartree"].dbapi
2888 - portdb = trees[settings["ROOT"]]["porttree"].dbapi
2889 - bindb = trees[settings["ROOT"]]["bintree"].dbapi
2890 - for x in myfiles:
2891 - match_found = False
2892 - installed_match = vardb.match(x)
2893 - for installed in installed_match:
2894 - mypkgs.append((installed, "installed"))
2895 - match_found = True
2896 -
2897 - if match_found:
2898 - continue
2899 -
2900 - for db, pkg_type in ((portdb, "ebuild"), (bindb, "binary")):
2901 - if pkg_type == "binary" and "--usepkg" not in myopts:
2902 - continue
2903 -
2904 - matches = db.match(x)
2905 - matches.reverse()
2906 - for match in matches:
2907 - if pkg_type == "binary":
2908 - if db.bintree.isremote(match):
2909 - continue
2910 - auxkeys = ["EAPI", "DEFINED_PHASES"]
2911 - metadata = dict(zip(auxkeys, db.aux_get(match, auxkeys)))
2912 - if metadata["EAPI"] not in ("0", "1", "2", "3") and \
2913 - "info" in metadata["DEFINED_PHASES"].split():
2914 - mypkgs.append((match, pkg_type))
2915 - break
2916 -
2917 - # If some packages were found...
2918 - if mypkgs:
2919 - # Get our global settings (we only print stuff if it varies from
2920 - # the current config)
2921 - mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
2922 - auxkeys = mydesiredvars + list(vardb._aux_cache_keys)
2923 - auxkeys.append('DEFINED_PHASES')
2924 - global_vals = {}
2925 - pkgsettings = portage.config(clone=settings)
2926 -
2927 - # Loop through each package
2928 - # Only print settings if they differ from global settings
2929 - header_title = "Package Settings"
2930 - msg.append(header_width * "=")
2931 - msg.append(header_title.rjust(int(header_width/2 + len(header_title)/2)))
2932 - msg.append(header_width * "=")
2933 - from portage.output import EOutput
2934 - out = EOutput()
2935 - for mypkg in mypkgs:
2936 - cpv = mypkg[0]
2937 - pkg_type = mypkg[1]
2938 - # Get all package specific variables
2939 - if pkg_type == "installed":
2940 - metadata = dict(zip(auxkeys, vardb.aux_get(cpv, auxkeys)))
2941 - elif pkg_type == "ebuild":
2942 - metadata = dict(zip(auxkeys, portdb.aux_get(cpv, auxkeys)))
2943 - elif pkg_type == "binary":
2944 - metadata = dict(zip(auxkeys, bindb.aux_get(cpv, auxkeys)))
2945 -
2946 - pkg = Package(built=(pkg_type!="ebuild"), cpv=cpv,
2947 - installed=(pkg_type=="installed"), metadata=zip(Package.metadata_keys,
2948 - (metadata.get(x, '') for x in Package.metadata_keys)),
2949 - root_config=root_config, type_name=pkg_type)
2950 -
2951 - if pkg_type == "installed":
2952 - msg.append("\n%s was built with the following:" % \
2953 - colorize("INFORM", str(pkg.cpv)))
2954 - elif pkg_type == "ebuild":
2955 - msg.append("\n%s would be build with the following:" % \
2956 - colorize("INFORM", str(pkg.cpv)))
2957 - elif pkg_type == "binary":
2958 - msg.append("\n%s (non-installed binary) was built with the following:" % \
2959 - colorize("INFORM", str(pkg.cpv)))
2960 -
2961 - writemsg_stdout('%s\n' % pkg_use_display(pkg, myopts),
2962 - noiselevel=-1)
2963 - if pkg_type == "installed":
2964 - for myvar in mydesiredvars:
2965 - if metadata[myvar].split() != settings.get(myvar, '').split():
2966 - msg.append("%s=\"%s\"" % (myvar, metadata[myvar]))
2967 -
2968 - if metadata['DEFINED_PHASES']:
2969 - if 'info' not in metadata['DEFINED_PHASES'].split():
2970 - continue
2971 -
2972 - msg.append(">>> Attempting to run pkg_info() for '%s'" % pkg.cpv)
2973 -
2974 - if pkg_type == "installed":
2975 - ebuildpath = vardb.findname(pkg.cpv)
2976 - elif pkg_type == "ebuild":
2977 - ebuildpath = portdb.findname(pkg.cpv, myrepo=pkg.repo)
2978 - elif pkg_type == "binary":
2979 - tbz2_file = bindb.bintree.getname(pkg.cpv)
2980 - ebuild_file_name = pkg.cpv.split("/")[1] + ".ebuild"
2981 - ebuild_file_contents = portage.xpak.tbz2(tbz2_file).getfile(ebuild_file_name)
2982 - tmpdir = tempfile.mkdtemp()
2983 - ebuildpath = os.path.join(tmpdir, ebuild_file_name)
2984 - file = open(ebuildpath, 'w')
2985 - file.write(ebuild_file_contents)
2986 - file.close()
2987 -
2988 - if not ebuildpath or not os.path.exists(ebuildpath):
2989 - out.ewarn("No ebuild found for '%s'" % pkg.cpv)
2990 - continue
2991 -
2992 - if pkg_type == "installed":
2993 - portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
2994 - pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
2995 - mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
2996 - tree="vartree")
2997 - elif pkg_type == "ebuild":
2998 - portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
2999 - pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
3000 - mydbapi=trees[settings["ROOT"]]["porttree"].dbapi,
3001 - tree="porttree")
3002 - elif pkg_type == "binary":
3003 - portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
3004 - pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
3005 - mydbapi=trees[settings["ROOT"]]["bintree"].dbapi,
3006 - tree="bintree")
3007 - shutil.rmtree(tmpdir)
3008 - return msg
3009 -
3010 - def write_msg_file(self, msg, log_path):
3011 - """
3012 - Output msg to stdout if not self._background. If log_path
3013 - is not None then append msg to the log (appends with
3014 - compression if the filename extension of log_path
3015 - corresponds to a supported compression type).
3016 - """
3017 - msg_shown = False
3018 - if log_path is not None:
3019 - try:
3020 - f = open(_unicode_encode(log_path,
3021 - encoding=_encodings['fs'], errors='strict'),
3022 - mode='ab')
3023 - f_real = f
3024 - except IOError as e:
3025 - if e.errno not in (errno.ENOENT, errno.ESTALE):
3026 - raise
3027 - if not msg_shown:
3028 - writemsg_level(msg, level=level, noiselevel=noiselevel)
3029 - else:
3030 -
3031 - if log_path.endswith('.gz'):
3032 - # NOTE: The empty filename argument prevents us from
3033 - # triggering a bug in python3 which causes GzipFile
3034 - # to raise AttributeError if fileobj.name is bytes
3035 - # instead of unicode.
3036 - f = gzip.GzipFile(filename='', mode='ab', fileobj=f)
3037 -
3038 - f.write(_unicode_encode(msg))
3039 - f.close()
3040 - if f_real is not f:
3041 - f_real.close()
3042 -
3043 - def add_buildlog_main(self, settings, pkg, trees):
3044 - conn=CM.getConnection()
3045 - build_dict = self.get_build_dict_db(settings, pkg)
3046 - build_log_dict = {}
3047 - build_log_dict = self.get_buildlog_info(settings, build_dict)
3048 - sum_build_log_list = build_log_dict['summary_error_list']
3049 - error_log_list = build_log_dict['error_log_list']
3050 - build_error = ""
3051 - if error_log_list != []:
3052 - for log_line in error_log_list:
3053 - build_error = build_error + log_line
3054 - summary_error = ""
3055 - if sum_build_log_list != []:
3056 - for sum_log_line in sum_build_log_list:
3057 - summary_error = summary_error + " " + sum_log_line
3058 - build_log_dict['logfilename'] = re.sub("\/var\/log\/portage\/", "", settings.get("PORTAGE_LOG_FILE"))
3059 - if build_dict['queue_id'] is None:
3060 - build_id = self.add_new_ebuild_buildlog(settings, pkg, build_dict, build_error, summary_error, build_log_dict)
3061 - else:
3062 - build_id = move_queru_buildlog(conn, build_dict['queue_id'], build_error, summary_error, build_log_dict)
3063 - # update_qa_repoman(conn, build_id, build_log_dict)
3064 - msg = self.action_info(settings, trees)
3065 - emerge_info_logfilename = settings.get("PORTAGE_LOG_FILE")[:-3] + "emerge_log.log"
3066 - if build_id is not None:
3067 - for msg_line in msg:
3068 - self.write_msg_file(msg_line, emerge_info_logfilename)
3069 - os.chmod(settings.get("PORTAGE_LOG_FILE"), 0664)
3070 - os.chmod(emerge_info_logfilename, 0664)
3071 - print("Package: ", pkg.cpv, "logged to db.")
3072 - else:
3073 - # FIXME Remove the log some way so
3074 - # mergetask._locate_failure_log(x) works in action_build()
3075 - #try:
3076 - # os.remove(settings.get("PORTAGE_LOG_FILE"))
3077 - #except:
3078 - # pass
3079 - print("Package: ", pkg.cpv, "NOT logged to db.")
3080 - CM.putConnection(conn)
3081
3082 diff --git a/gobs/pym/build_queru.py~ b/gobs/pym/build_queru.py~
3083 deleted file mode 100644
3084 index 3f0bde8..0000000
3085 --- a/gobs/pym/build_queru.py~
3086 +++ /dev/null
3087 @@ -1,708 +0,0 @@
3088 -# Get the options from the config file set in gobs.readconf
3089 -from __future__ import print_function
3090 -from gobs.readconf import get_conf_settings
3091 -reader=get_conf_settings()
3092 -gobs_settings_dict=reader.read_gobs_settings_all()
3093 -# make a CM
3094 -from gobs.ConnectionManager import connectionManager
3095 -CM=connectionManager(gobs_settings_dict)
3096 -#selectively import the pgsql/mysql querys
3097 -if CM.getName()=='pgsql':
3098 - from gobs.pgsql import *
3099 -
3100 -import portage
3101 -import os
3102 -import re
3103 -import sys
3104 -import signal
3105 -from gobs.manifest import gobs_manifest
3106 -from gobs.depclean import main_depclean
3107 -from gobs.flags import gobs_use_flags
3108 -from portage import _encodings
3109 -from portage import _unicode_decode
3110 -from portage.versions import cpv_getkey
3111 -import portage.xpak, errno, re, time
3112 -from _emerge.main import parse_opts, profile_check, apply_priorities, repo_name_duplicate_check, \
3113 - config_protect_check, check_procfs, ensure_required_sets, expand_set_arguments, \
3114 - validate_ebuild_environment, chk_updated_info_files, display_preserved_libs
3115 -from _emerge.actions import action_config, action_sync, action_metadata, \
3116 - action_regen, action_search, action_uninstall, \
3117 - adjust_configs, chk_updated_cfg_files, display_missing_pkg_set, \
3118 - display_news_notification, getportageversion, load_emerge_config
3119 -from portage.util import cmp_sort_key, writemsg, \
3120 - writemsg_level, writemsg_stdout, shlex_split
3121 -from _emerge.sync.old_tree_timestamp import old_tree_timestamp_warn
3122 -from _emerge.create_depgraph_params import create_depgraph_params
3123 -from _emerge.depgraph import backtrack_depgraph, depgraph, resume_depgraph
3124 -from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
3125 -from gobs.Scheduler import Scheduler
3126 -from _emerge.clear_caches import clear_caches
3127 -from _emerge.unmerge import unmerge
3128 -from _emerge.emergelog import emergelog
3129 -from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo
3130 -from portage._global_updates import _global_updates
3131 -from portage._sets import SETPREFIX
3132 -from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH
3133 -from _emerge.is_valid_package_atom import is_valid_package_atom
3134 -from _emerge.stdout_spinner import stdout_spinner
3135 -from portage.output import blue, bold, colorize, create_color_func, darkgreen, \
3136 - red, yellow, colorize, xtermTitle, xtermTitleReset
3137 -good = create_color_func("GOOD")
3138 -bad = create_color_func("BAD")
3139 -
3140 -class queruaction(object):
3141 -
3142 - def __init__(self, config_profile):
3143 - self._mysettings = portage.config(config_root = "/")
3144 - self._config_profile = config_profile
3145 - self._myportdb = portage.portdb
3146 -
3147 - def log_fail_queru(self, build_dict, settings):
3148 - conn=CM.getConnection()
3149 - print('build_dict', build_dict)
3150 - fail_querue_dict = get_fail_querue_dict(conn, build_dict)
3151 - print('fail_querue_dict', fail_querue_dict)
3152 - if fail_querue_dict is None:
3153 - fail_querue_dict = {}
3154 - fail_querue_dict['querue_id'] = build_dict['queue_id']
3155 - fail_querue_dict['fail_type'] = build_dict['type_fail']
3156 - fail_querue_dict['fail_times'] = 1
3157 - print('fail_querue_dict', fail_querue_dict)
3158 - add_fail_querue_dict(conn, fail_querue_dict)
3159 - else:
3160 - if fail_querue_dict['fail_times'][0] < 6:
3161 - fail_querue_dict['fail_times'] = fail_querue_dict['fail_times'][0] + 1
3162 - fail_querue_dict['querue_id'] = build_dict['queue_id']
3163 - fail_querue_dict['fail_type'] = build_dict['type_fail']
3164 - update_fail_times(conn, fail_querue_dict)
3165 - return
3166 - else:
3167 - build_log_dict = {}
3168 - error_log_list = []
3169 - qa_error_list = []
3170 - repoman_error_list = []
3171 - sum_build_log_list = []
3172 - sum_build_log_list.append("fail")
3173 - error_log_list.append(build_dict['type_fail'])
3174 - build_log_dict['repoman_error_list'] = repoman_error_list
3175 - build_log_dict['qa_error_list'] = qa_error_list
3176 - build_log_dict['summary_error_list'] = sum_build_log_list
3177 - if build_dict['type_fail'] == 'merge fail':
3178 - error_log_list = []
3179 - for k, v in build_dict['failed_merge'].iteritems():
3180 - error_log_list.append(v['fail_msg'])
3181 - build_log_dict['error_log_list'] = error_log_list
3182 - build_error = ""
3183 - if error_log_list != []:
3184 - for log_line in error_log_list:
3185 - build_error = build_error + log_line
3186 - summary_error = ""
3187 - if sum_build_log_list != []:
3188 - for sum_log_line in sum_build_log_list:
3189 - summary_error = summary_error + " " + sum_log_line
3190 - if settings.get("PORTAGE_LOG_FILE") is not None:
3191 - build_log_dict['logfilename'] = re.sub("\/var\/log\/portage\/", "", settings.get("PORTAGE_LOG_FILE"))
3192 - # os.chmode(settings.get("PORTAGE_LOG_FILE"), 224)
3193 - else:
3194 - build_log_dict['logfilename'] = ""
3195 - move_queru_buildlog(conn, build_dict['queue_id'], build_error, summary_error, build_log_dict)
3196 - CM.putConnection(conn)
3197 -
3198 - def action_build(self, settings, trees, mtimedb, myopts, myaction, myfiles, spinner, build_dict):
3199 -
3200 - if '--usepkgonly' not in myopts:
3201 - old_tree_timestamp_warn(settings['PORTDIR'], settings)
3202 -
3203 - # It's best for config updates in /etc/portage to be processed
3204 - # before we get here, so warn if they're not (bug #267103).
3205 - chk_updated_cfg_files(settings['EROOT'], ['/etc/portage'])
3206 -
3207 - resume = False
3208 -
3209 - ldpath_mtimes = mtimedb["ldpath"]
3210 - favorites=[]
3211 - buildpkgonly = "--buildpkgonly" in myopts
3212 - pretend = "--pretend" in myopts
3213 - fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
3214 - ask = "--ask" in myopts
3215 - enter_invalid = '--ask-enter-invalid' in myopts
3216 - nodeps = "--nodeps" in myopts
3217 - oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
3218 - tree = "--tree" in myopts
3219 - if nodeps and tree:
3220 - tree = False
3221 - del myopts["--tree"]
3222 - portage.writemsg(colorize("WARN", " * ") + \
3223 - "--tree is broken with --nodeps. Disabling...\n")
3224 - debug = "--debug" in myopts
3225 - verbose = "--verbose" in myopts
3226 - quiet = "--quiet" in myopts
3227 -
3228 - myparams = create_depgraph_params(myopts, myaction)
3229 - try:
3230 - success, mydepgraph, favorites = backtrack_depgraph(
3231 - settings, trees, myopts, myparams, myaction, myfiles, spinner)
3232 - except portage.exception.PackageSetNotFound as e:
3233 - root_config = trees[settings["ROOT"]]["root_config"]
3234 - display_missing_pkg_set(root_config, e.value)
3235 - build_dict['type_fail'] = "depgraph fail"
3236 - build_dict['check_fail'] = True
3237 - use_changes = None
3238 - if mydepgraph._dynamic_config._needed_use_config_changes:
3239 - use_changes = {}
3240 - for pkg, needed_use_config_changes in mydepgraph._dynamic_config._needed_use_config_changes.items():
3241 - new_use, changes = needed_use_config_changes
3242 - use_changes[pkg.cpv] = changes
3243 - iteritems_packages = {}
3244 - for k, v in use_changes.iteritems():
3245 - k_package = portage.versions.cpv_getkey(k)
3246 - iteritems_packages[ k_package ] = v
3247 - print('iteritems_packages', iteritems_packages)
3248 - build_cpv_dict = iteritems_packages
3249 - if use_changes is not None:
3250 - for k, v in build_cpv_dict.iteritems():
3251 - build_use_flags_list = []
3252 - for x, y in v.iteritems():
3253 - if y is True:
3254 - build_use_flags_list.append(x)
3255 - if y is False:
3256 - build_use_flags_list.append("-" + x)
3257 - print(k, build_use_flags_list)
3258 - if not build_use_flags_list == []:
3259 - build_use_flags = ""
3260 - for flags in build_use_flags_list:
3261 - build_use_flags = build_use_flags + flags + ' '
3262 - filetext = k + ' ' + build_use_flags
3263 - print('filetext', filetext)
3264 - with open("/etc/portage/package.use/gobs.use", "a") as f:
3265 - f.write(filetext)
3266 - f.write('\n')
3267 -
3268 - settings, trees, mtimedb = load_emerge_config()
3269 - myparams = create_depgraph_params(myopts, myaction)
3270 - try:
3271 - success, mydepgraph, favorites = backtrack_depgraph(
3272 - settings, trees, myopts, myparams, myaction, myfiles, spinner)
3273 - except portage.exception.PackageSetNotFound as e:
3274 - root_config = trees[settings["ROOT"]]["root_config"]
3275 - display_missing_pkg_set(root_config, e.value)
3276 - build_dict['type_fail'] = "depgraph fail"
3277 - build_dict['check_fail'] = True
3278 - if not success:
3279 - mydepgraph.display_problems()
3280 - build_dict['type_fail'] = "depgraph fail"
3281 - build_dict['check_fail'] = True
3282 -
3283 - if build_dict['check_fail'] is True:
3284 - self.log_fail_queru(build_dict, settings)
3285 - return 1, settings, trees, mtimedb
3286 -
3287 - if "--buildpkgonly" in myopts:
3288 - graph_copy = mydepgraph._dynamic_config.digraph.copy()
3289 - removed_nodes = set()
3290 - for node in graph_copy:
3291 - if not isinstance(node, Package) or \
3292 - node.operation == "nomerge":
3293 - removed_nodes.add(node)
3294 - graph_copy.difference_update(removed_nodes)
3295 - if not graph_copy.hasallzeros(ignore_priority = \
3296 - DepPrioritySatisfiedRange.ignore_medium):
3297 - print("\n!!! --buildpkgonly requires all dependencies to be merged.")
3298 - print("!!! Cannot merge requested packages. Merge deps and try again.\n")
3299 - return 1, settings, trees, mtimedb
3300 -
3301 - mydepgraph.saveNomergeFavorites()
3302 -
3303 - mergetask = Scheduler(settings, trees, mtimedb, myopts,
3304 - spinner, favorites=favorites,
3305 - graph_config=mydepgraph.schedulerGraph())
3306 -
3307 - del mydepgraph
3308 - clear_caches(trees)
3309 -
3310 - retval = mergetask.merge()
3311 - print('retval', retval)
3312 - if retval:
3313 - build_dict['type_fail'] = 'merge fail'
3314 - build_dict['check_fail'] = True
3315 - attict = {}
3316 - failed_pkgs_dict = {}
3317 - for x in mergetask._failed_pkgs_all:
3318 - attict['fail_msg'] = str(x.pkg)[0] + ' ' + str(x.pkg)[1] + ' ' + re.sub("\/var\/log\/portage\/", "", mergetask._locate_failure_log(x))
3319 - failed_pkgs_dict[str(x.pkg.cpv)] = attict
3320 - build_dict['failed_merge'] = failed_pkgs_dict
3321 - self.log_fail_queru(build_dict, settings)
3322 - if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
3323 - if "yes" == settings.get("AUTOCLEAN"):
3324 - portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
3325 - unmerge(trees[settings["ROOT"]]["root_config"],
3326 - myopts, "clean", [],
3327 - ldpath_mtimes, autoclean=1)
3328 - else:
3329 - portage.writemsg_stdout(colorize("WARN", "WARNING:")
3330 - + " AUTOCLEAN is disabled. This can cause serious"
3331 - + " problems due to overlapping packages.\n")
3332 -
3333 - return retval, settings, trees, mtimedb
3334 -
3335 - def post_emerge(self, myaction, myopts, myfiles, target_root, trees, mtimedb, retval):
3336 -
3337 - root_config = trees[target_root]["root_config"]
3338 - vardbapi = trees[target_root]["vartree"].dbapi
3339 - settings = vardbapi.settings
3340 - info_mtimes = mtimedb["info"]
3341 -
3342 - # Load the most current variables from ${ROOT}/etc/profile.env
3343 - settings.unlock()
3344 - settings.reload()
3345 - settings.regenerate()
3346 - settings.lock()
3347 -
3348 - config_protect = shlex_split(settings.get("CONFIG_PROTECT", ""))
3349 - infodirs = settings.get("INFOPATH","").split(":") + \
3350 - settings.get("INFODIR","").split(":")
3351 -
3352 - os.chdir("/")
3353 -
3354 - if retval == os.EX_OK:
3355 - exit_msg = " *** exiting successfully."
3356 - else:
3357 - exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
3358 - emergelog("notitles" not in settings.features, exit_msg)
3359 -
3360 - _flush_elog_mod_echo()
3361 -
3362 - if not vardbapi._pkgs_changed:
3363 - display_news_notification(root_config, myopts)
3364 - # If vdb state has not changed then there's nothing else to do.
3365 - return
3366 -
3367 - vdb_path = os.path.join(root_config.settings['EROOT'], portage.VDB_PATH)
3368 - portage.util.ensure_dirs(vdb_path)
3369 - vdb_lock = None
3370 - if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
3371 - vardbapi.lock()
3372 - vdb_lock = True
3373 -
3374 - if vdb_lock:
3375 - try:
3376 - if "noinfo" not in settings.features:
3377 - chk_updated_info_files(target_root,
3378 - infodirs, info_mtimes, retval)
3379 - mtimedb.commit()
3380 - finally:
3381 - if vdb_lock:
3382 - vardbapi.unlock()
3383 -
3384 - chk_updated_cfg_files(settings['EROOT'], config_protect)
3385 -
3386 - display_news_notification(root_config, myopts)
3387 - if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
3388 - display_preserved_libs(vardbapi, myopts)
3389 -
3390 - postemerge = os.path.join(settings["PORTAGE_CONFIGROOT"],
3391 - portage.USER_CONFIG_PATH, "bin", "post_emerge")
3392 - if os.access(postemerge, os.X_OK):
3393 - hook_retval = portage.process.spawn(
3394 - [postemerge], env=settings.environ())
3395 - if hook_retval != os.EX_OK:
3396 - writemsg_level(
3397 - " %s spawn failed of %s\n" % (bad("*"), postemerge,),
3398 - level=logging.ERROR, noiselevel=-1)
3399 -
3400 - if "--quiet" not in myopts and \
3401 - myaction is None and "@world" in myfiles:
3402 - show_depclean_suggestion()
3403 -
3404 - return
3405 -
3406 - def emerge_main(self, args, build_dict):
3407 -
3408 - portage._disable_legacy_globals()
3409 - portage.dep._internal_warnings = True
3410 - # Disable color until we're sure that it should be enabled (after
3411 - # EMERGE_DEFAULT_OPTS has been parsed).
3412 - portage.output.havecolor = 0
3413 - # This first pass is just for options that need to be known as early as
3414 - # possible, such as --config-root. They will be parsed again later,
3415 - # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
3416 - # the value of --config-root).
3417 - myaction, myopts, myfiles = parse_opts(args, silent=True)
3418 - if "--debug" in myopts:
3419 - os.environ["PORTAGE_DEBUG"] = "1"
3420 - if "--config-root" in myopts:
3421 - os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
3422 - if "--root" in myopts:
3423 - os.environ["ROOT"] = myopts["--root"]
3424 - if "--accept-properties" in myopts:
3425 - os.environ["ACCEPT_PROPERTIES"] = myopts["--accept-properties"]
3426 -
3427 - # Portage needs to ensure a sane umask for the files it creates.
3428 - os.umask(0o22)
3429 - settings, trees, mtimedb = load_emerge_config()
3430 - portdb = trees[settings["ROOT"]]["porttree"].dbapi
3431 - rval = profile_check(trees, myaction)
3432 - if rval != os.EX_OK:
3433 - return rval
3434 -
3435 - tmpcmdline = []
3436 - if "--ignore-default-opts" not in myopts:
3437 - tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
3438 - tmpcmdline.extend(args)
3439 - myaction, myopts, myfiles = parse_opts(tmpcmdline)
3440 -
3441 - if myaction not in ('help', 'info', 'version') and \
3442 - myopts.get('--package-moves') != 'n' and \
3443 - _global_updates(trees, mtimedb["updates"], quiet=("--quiet" in myopts)):
3444 - mtimedb.commit()
3445 - # Reload the whole config from scratch.
3446 - settings, trees, mtimedb = load_emerge_config(trees=trees)
3447 - portdb = trees[settings["ROOT"]]["porttree"].dbapi
3448 -
3449 - xterm_titles = "notitles" not in settings.features
3450 - if xterm_titles:
3451 - xtermTitle("emerge")
3452 -
3453 - adjust_configs(myopts, trees)
3454 - apply_priorities(settings)
3455 -
3456 - spinner = stdout_spinner()
3457 - if "candy" in settings.features:
3458 - spinner.update = spinner.update_scroll
3459 -
3460 - if "--quiet" not in myopts:
3461 - portage.deprecated_profile_check(settings=settings)
3462 - if portage.const._ENABLE_REPO_NAME_WARN:
3463 - # Bug #248603 - Disable warnings about missing
3464 - # repo_name entries for stable branch.
3465 - repo_name_check(trees)
3466 - repo_name_duplicate_check(trees)
3467 - config_protect_check(trees)
3468 - check_procfs()
3469 -
3470 - if "getbinpkg" in settings.features:
3471 - myopts["--getbinpkg"] = True
3472 -
3473 - if "--getbinpkgonly" in myopts:
3474 - myopts["--getbinpkg"] = True
3475 -
3476 - if "--getbinpkgonly" in myopts:
3477 - myopts["--usepkgonly"] = True
3478 -
3479 - if "--getbinpkg" in myopts:
3480 - myopts["--usepkg"] = True
3481 -
3482 - if "--usepkgonly" in myopts:
3483 - myopts["--usepkg"] = True
3484 -
3485 - if "buildpkg" in settings.features or "--buildpkgonly" in myopts:
3486 - myopts["--buildpkg"] = True
3487 -
3488 - if "--buildpkgonly" in myopts:
3489 - # --buildpkgonly will not merge anything, so
3490 - # it cancels all binary package options.
3491 - for opt in ("--getbinpkg", "--getbinpkgonly",
3492 - "--usepkg", "--usepkgonly"):
3493 - myopts.pop(opt, None)
3494 -
3495 - for mytrees in trees.values():
3496 - mydb = mytrees["porttree"].dbapi
3497 - # Freeze the portdbapi for performance (memoize all xmatch results).
3498 - mydb.freeze()
3499 -
3500 - if myaction in ('search', None) and \
3501 - "--usepkg" in myopts:
3502 - # Populate the bintree with current --getbinpkg setting.
3503 - # This needs to happen before expand_set_arguments(), in case
3504 - # any sets use the bintree.
3505 - mytrees["bintree"].populate(
3506 - getbinpkgs="--getbinpkg" in myopts)
3507 -
3508 - del mytrees, mydb
3509 -
3510 - for x in myfiles:
3511 - ext = os.path.splitext(x)[1]
3512 - if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
3513 - print(colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n"))
3514 - break
3515 -
3516 - root_config = trees[settings["ROOT"]]["root_config"]
3517 - if myaction == "list-sets":
3518 - writemsg_stdout("".join("%s\n" % s for s in sorted(root_config.sets)))
3519 - return os.EX_OK
3520 -
3521 - ensure_required_sets(trees)
3522 -
3523 - # only expand sets for actions taking package arguments
3524 - oldargs = myfiles[:]
3525 - if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
3526 - myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
3527 - if retval != os.EX_OK:
3528 - return retval
3529 -
3530 - # Need to handle empty sets specially, otherwise emerge will react
3531 - # with the help message for empty argument lists
3532 - if oldargs and not myfiles:
3533 - print("emerge: no targets left after set expansion")
3534 - return 0
3535 -
3536 - if ("--tree" in myopts) and ("--columns" in myopts):
3537 - print("emerge: can't specify both of \"--tree\" and \"--columns\".")
3538 - return 1
3539 -
3540 - if '--emptytree' in myopts and '--noreplace' in myopts:
3541 - writemsg_level("emerge: can't specify both of " + \
3542 - "\"--emptytree\" and \"--noreplace\".\n",
3543 - level=logging.ERROR, noiselevel=-1)
3544 - return 1
3545 -
3546 - if ("--quiet" in myopts):
3547 - spinner.update = spinner.update_quiet
3548 - portage.util.noiselimit = -1
3549 -
3550 - if "--fetch-all-uri" in myopts:
3551 - myopts["--fetchonly"] = True
3552 -
3553 - if "--skipfirst" in myopts and "--resume" not in myopts:
3554 - myopts["--resume"] = True
3555 -
3556 - # Allow -p to remove --ask
3557 - if "--pretend" in myopts:
3558 - myopts.pop("--ask", None)
3559 -
3560 - # forbid --ask when not in a terminal
3561 - # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
3562 - if ("--ask" in myopts) and (not sys.stdin.isatty()):
3563 - portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
3564 - noiselevel=-1)
3565 - return 1
3566 -
3567 - if settings.get("PORTAGE_DEBUG", "") == "1":
3568 - spinner.update = spinner.update_quiet
3569 - portage.util.noiselimit = 0
3570 - if "python-trace" in settings.features:
3571 - import portage.debug as portage_debug
3572 - portage_debug.set_trace(True)
3573 -
3574 - if not ("--quiet" in myopts):
3575 - if '--nospinner' in myopts or \
3576 - settings.get('TERM') == 'dumb' or \
3577 - not sys.stdout.isatty():
3578 - spinner.update = spinner.update_basic
3579 -
3580 - if "--debug" in myopts:
3581 - print("myaction", myaction)
3582 - print("myopts", myopts)
3583 -
3584 - pretend = "--pretend" in myopts
3585 - fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
3586 - buildpkgonly = "--buildpkgonly" in myopts
3587 -
3588 - # check if root user is the current user for the actions where emerge needs this
3589 - if portage.secpass < 2:
3590 - # We've already allowed "--version" and "--help" above.
3591 - if "--pretend" not in myopts and myaction not in ("search","info"):
3592 - need_superuser = myaction in ('clean', 'depclean', 'deselect',
3593 - 'prune', 'unmerge') or not \
3594 - (fetchonly or \
3595 - (buildpkgonly and secpass >= 1) or \
3596 - myaction in ("metadata", "regen", "sync"))
3597 - if portage.secpass < 1 or \
3598 - need_superuser:
3599 - if need_superuser:
3600 - access_desc = "superuser"
3601 - else:
3602 - access_desc = "portage group"
3603 - # Always show portage_group_warning() when only portage group
3604 - # access is required but the user is not in the portage group.
3605 - from portage.data import portage_group_warning
3606 - if "--ask" in myopts:
3607 - myopts["--pretend"] = True
3608 - del myopts["--ask"]
3609 - print(("%s access is required... " + \
3610 - "adding --pretend to options\n") % access_desc)
3611 - if portage.secpass < 1 and not need_superuser:
3612 - portage_group_warning()
3613 - else:
3614 - sys.stderr.write(("emerge: %s access is required\n") \
3615 - % access_desc)
3616 - if portage.secpass < 1 and not need_superuser:
3617 - portage_group_warning()
3618 - return 1
3619 -
3620 - disable_emergelog = False
3621 - if disable_emergelog:
3622 - """ Disable emergelog for everything except build or unmerge
3623 - operations. This helps minimize parallel emerge.log entries that can
3624 - confuse log parsers. We especially want it disabled during
3625 - parallel-fetch, which uses --resume --fetchonly."""
3626 - _emerge.emergelog._disable = True
3627 -
3628 - else:
3629 - if 'EMERGE_LOG_DIR' in settings:
3630 - try:
3631 - # At least the parent needs to exist for the lock file.
3632 - portage.util.ensure_dirs(settings['EMERGE_LOG_DIR'])
3633 - except portage.exception.PortageException as e:
3634 - writemsg_level("!!! Error creating directory for " + \
3635 - "EMERGE_LOG_DIR='%s':\n!!! %s\n" % \
3636 - (settings['EMERGE_LOG_DIR'], e),
3637 - noiselevel=-1, level=logging.ERROR)
3638 - else:
3639 - global _emerge_log_dir
3640 - _emerge_log_dir = settings['EMERGE_LOG_DIR']
3641 -
3642 - if not "--pretend" in myopts:
3643 - emergelog(xterm_titles, "Started emerge on: "+\
3644 - _unicode_decode(
3645 - time.strftime("%b %d, %Y %H:%M:%S", time.localtime()),
3646 - encoding=_encodings['content'], errors='replace'))
3647 - myelogstr=""
3648 - if myopts:
3649 - myelogstr=" ".join(myopts)
3650 - if myaction:
3651 - myelogstr+=" "+myaction
3652 - if myfiles:
3653 - myelogstr += " " + " ".join(oldargs)
3654 - emergelog(xterm_titles, " *** emerge " + myelogstr)
3655 - del oldargs
3656 -
3657 - def emergeexitsig(signum, frame):
3658 - signal.signal(signal.SIGINT, signal.SIG_IGN)
3659 - signal.signal(signal.SIGTERM, signal.SIG_IGN)
3660 - portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
3661 - sys.exit(128 + signum)
3662 - signal.signal(signal.SIGINT, emergeexitsig)
3663 - signal.signal(signal.SIGTERM, emergeexitsig)
3664 -
3665 - def emergeexit():
3666 - """This gets out final log message in before we quit."""
3667 - if "--pretend" not in myopts:
3668 - emergelog(xterm_titles, " *** terminating.")
3669 - if xterm_titles:
3670 - xtermTitleReset()
3671 - portage.atexit_register(emergeexit)
3672 -
3673 -
3674 - # "update", "system", or just process files
3675 - validate_ebuild_environment(trees)
3676 -
3677 - for x in myfiles:
3678 - if x.startswith(SETPREFIX) or \
3679 - is_valid_package_atom(x, allow_repo=True):
3680 - continue
3681 - if x[:1] == os.sep:
3682 - continue
3683 - try:
3684 - os.lstat(x)
3685 - continue
3686 - except OSError:
3687 - pass
3688 - msg = []
3689 - msg.append("'%s' is not a valid package atom." % (x,))
3690 - msg.append("Please check ebuild(5) for full details.")
3691 - writemsg_level("".join("!!! %s\n" % line for line in msg),
3692 - level=logging.ERROR, noiselevel=-1)
3693 - return 1
3694 - if "--pretend" not in myopts:
3695 - display_news_notification(root_config, myopts)
3696 - retval, settings, trees, mtimedb = self.action_build(settings, trees, mtimedb,
3697 - myopts, myaction, myfiles, spinner, build_dict)
3698 - self.post_emerge(myaction, myopts, myfiles, settings["ROOT"],
3699 - trees, mtimedb, retval)
3700 -
3701 - return retval
3702 -
3703 - def make_build_list(self, build_dict, settings, portdb):
3704 - cpv = build_dict['category']+'/'+build_dict['package']+'-'+build_dict['ebuild_version']
3705 - pkgdir = os.path.join(settings['PORTDIR'], build_dict['category'] + "/" + build_dict['package'])
3706 - init_manifest = gobs_manifest(settings, pkgdir)
3707 - try:
3708 - ebuild_version_checksum_tree = portage.checksum.sha256hash(pkgdir+ "/" + build_dict['package'] + "-" + build_dict['ebuild_version'] + ".ebuild")[0]
3709 - except:
3710 - ebuild_version_checksum_tree = None
3711 - if ebuild_version_checksum_tree == build_dict['checksum']:
3712 - init_flags = gobs_use_flags(settings, portdb, cpv)
3713 - build_use_flags_list = init_flags.comper_useflags(build_dict)
3714 - print("build_use_flags_list", build_use_flags_list)
3715 - manifest_error = init_manifest.check_file_in_manifest(portdb, cpv, build_dict, build_use_flags_list)
3716 - if manifest_error is None:
3717 - build_dict['check_fail'] = False
3718 - build_cpv_dict = {}
3719 - build_cpv_dict[cpv] = build_use_flags_list
3720 - print(build_cpv_dict)
3721 - return build_cpv_dict
3722 - else:
3723 - build_dict['type_fail'] = "Manifest error"
3724 - build_dict['check_fail'] = True
3725 - else:
3726 - build_dict['type_fail'] = "Wrong ebuild checksum"
3727 - build_dict['check_fail'] = True
3728 - if build_dict['check_fail'] is True:
3729 - self.log_fail_queru(build_dict, settings)
3730 - return None
3731 - return build_cpv_dict
3732 -
3733 - def build_procces(self, buildqueru_cpv_dict, build_dict, settings, portdb):
3734 - build_cpv_list = []
3735 - depclean_fail = True
3736 - for k, build_use_flags_list in buildqueru_cpv_dict.iteritems():
3737 - build_cpv_list.append("=" + k)
3738 - if not build_use_flags_list == None:
3739 - build_use_flags = ""
3740 - for flags in build_use_flags_list:
3741 - build_use_flags = build_use_flags + flags + " "
3742 - filetext = '=' + k + ' ' + build_use_flags
3743 - print('filetext', filetext)
3744 - with open("/etc/portage/package.use/gobs.use", "a") as f:
3745 - f.write(filetext)
3746 - f.write('\n')
3747 - print('build_cpv_list', build_cpv_list)
3748 - argscmd = []
3749 - if not "nooneshort" in build_dict['post_message']:
3750 - argscmd.append("--oneshot")
3751 - argscmd.append("--buildpkg")
3752 - argscmd.append("--usepkg")
3753 - for build_cpv in build_cpv_list:
3754 - argscmd.append(build_cpv)
3755 - print(argscmd)
3756 - # Call main_emerge to build the package in build_cpv_list
3757 - build_fail = self.emerge_main(argscmd, build_dict)
3758 - # Run depclean
3759 - print('build_fail', build_fail)
3760 - if not "noclean" in build_dict['post_message']:
3761 - depclean_fail = main_depclean()
3762 - try:
3763 - os.remove("/etc/portage/package.use/gobs.use")
3764 - except:
3765 - pass
3766 - if build_fail is False or depclean_fail is False:
3767 - return False
3768 - return True
3769 -
3770 - def procces_qureru(self):
3771 - conn=CM.getConnection()
3772 - build_dict = {}
3773 - build_dict = get_packages_to_build(conn, self._config_profile)
3774 - settings, trees, mtimedb = load_emerge_config()
3775 - portdb = trees[settings["ROOT"]]["porttree"].dbapi
3776 - if build_dict is None:
3777 - CM.putConnection(conn)
3778 - return
3779 - print("build_dict", build_dict)
3780 - if not build_dict['ebuild_id'] is None and build_dict['checksum'] is not None:
3781 - buildqueru_cpv_dict = self.make_build_list(build_dict, settings, portdb)
3782 - print('buildqueru_cpv_dict', buildqueru_cpv_dict)
3783 - if buildqueru_cpv_dict is None:
3784 - CM.putConnection(conn)
3785 - return
3786 - fail_build_procces = self.build_procces(buildqueru_cpv_dict, build_dict, settings, portdb)
3787 - CM.putConnection(conn)
3788 - return
3789 - if not build_dict['post_message'] is [] and build_dict['ebuild_id'] is None:
3790 - CM.putConnection(conn)
3791 - return
3792 - if not build_dict['ebuild_id'] is None and build_dict['checksum'] is None:
3793 - del_old_queue(conn, build_dict['queue_id'])
3794 - CM.putConnection(conn)
3795 - return
3796
3797 diff --git a/gobs/pym/categories.py~ b/gobs/pym/categories.py~
3798 deleted file mode 100644
3799 index f3b2457..0000000
3800 --- a/gobs/pym/categories.py~
3801 +++ /dev/null
3802 @@ -1,30 +0,0 @@
3803 -#from gobs.text import gobs_text
3804 -from gobs.text import get_file_text
3805 -import portage
3806 -from gobs.readconf import get_conf_settings
3807 -reader=get_conf_settings()
3808 -gobs_settings_dict=reader.read_gobs_settings_all()
3809 -# make a CM
3810 -from gobs.ConnectionManager import connectionManager
3811 -CM=connectionManager(gobs_settings_dict)
3812 -#selectively import the pgsql/mysql querys
3813 -if CM.getName()=='pgsql':
3814 - from gobs.pgsql import *
3815 -
3816 -class gobs_categories(object):
3817 -
3818 - def __init__(self, mysettings):
3819 - self._mysettings = mysettings
3820 -
3821 - def update_categories_db(self, categories):
3822 - conn=CM.getConnection()
3823 - # Update categories_meta in the db
3824 - categories_dir = self._mysettings['PORTDIR'] + "/" + categories + "/"
3825 - categories_metadata_xml_checksum_tree = portage.checksum.sha256hash(categories_dir + "metadata.xml")[0]
3826 - categories_metadata_xml_text_tree = get_file_text(categories_dir + "metadata.xml")
3827 - categories_metadata_xml_checksum_db = get_categories_checksum_db(conn, categories)
3828 - if categories_metadata_xml_checksum_db is None:
3829 - add_new_categories_meta_sql(self._conn,categories, categories_metadata_xml_checksum_tree, categories_metadata_xml_text_tree)
3830 - elif categories_metadata_xml_checksum_db != categories_metadata_xml_checksum_tree:
3831 - update_categories_meta_sql(self._conn,categories, categories_metadata_xml_checksum_tree, categories_metadata_xml_text_tree)
3832 - CM.putConnection(conn)
3833
3834 diff --git a/gobs/pym/check_setup.py~ b/gobs/pym/check_setup.py~
3835 deleted file mode 100644
3836 index 8b9b883..0000000
3837 --- a/gobs/pym/check_setup.py~
3838 +++ /dev/null
3839 @@ -1,102 +0,0 @@
3840 -from __future__ import print_function
3841 -import portage
3842 -import os
3843 -import errno
3844 -from git import *
3845 -from gobs.text import get_file_text
3846 -from gobs.sync import sync_tree
3847 -
3848 -from gobs.readconf import get_conf_settings
3849 -reader=get_conf_settings()
3850 -gobs_settings_dict=reader.read_gobs_settings_all()
3851 -# make a CM
3852 -from gobs.ConnectionManager import connectionManager
3853 -CM=connectionManager(gobs_settings_dict)
3854 -#selectively import the pgsql/mysql querys
3855 -if CM.getName()=='pgsql':
3856 - from gobs.pgsql import *
3857 -
3858 -def git_pull():
3859 - repo = Repo("/var/lib/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/")
3860 - repo_remote = repo.remotes.origin
3861 - repo_remote.pull()
3862 - master = repo.head.reference
3863 - print(master.log())
3864 -
3865 -def check_make_conf():
3866 - # FIXME: mark any config updating true in the db when updating the configs
3867 - # Get the config list
3868 - conn=CM.getConnection()
3869 - config_list_all = get_config_list_all(conn)
3870 - print("Checking configs for changes and errors")
3871 - configsDict = {}
3872 - for config_id in config_list_all:
3873 - attDict={}
3874 - # Set the config dir
3875 - check_config_dir = "/var/lib/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/" + config_id[0] + "/"
3876 - make_conf_file = check_config_dir + "etc/portage/make.conf"
3877 - # Check if we can open the file and close it
3878 - # Check if we have some error in the file (portage.util.getconfig)
3879 - # Check if we envorment error with the config (settings.validate)
3880 - try:
3881 - open_make_conf = open(make_conf_file)
3882 - open_make_conf.close()
3883 - portage.util.getconfig(make_conf_file, tolerant=0, allow_sourcing=False, expand=True)
3884 - mysettings = portage.config(config_root = check_config_dir)
3885 - mysettings.validate()
3886 - # With errors we update the db on the config and disable the config
3887 - except Exception as e:
3888 - attDict['config_error'] = e
3889 - attDict['active'] = 'False'
3890 - print("Fail", config_id[0])
3891 - else:
3892 - attDict['config_error'] = ''
3893 - attDict['active'] = 'True'
3894 - print("Pass", config_id[0])
3895 - # Get the checksum of make.conf
3896 - make_conf_checksum_tree = portage.checksum.sha256hash(make_conf_file)[0]
3897 - # Check if we have change the make.conf and update the db with it
3898 - attDict['make_conf_text'] = get_file_text(make_conf_file)
3899 - attDict['make_conf_checksum_tree'] = make_conf_checksum_tree
3900 - configsDict[config_id]=attDict
3901 - update__make_conf(conn,configsDict)
3902 - CM.putConnection(conn)
3903 - print("Updated configurtions")
3904 -
3905 -def check_make_conf_guest(config_profile):
3906 - conn=CM.getConnection()
3907 - print('config_profile', config_profile)
3908 - make_conf_checksum_db = get_profile_checksum(conn,config_profile)
3909 - print('make_conf_checksum_db', make_conf_checksum_db)
3910 - if make_conf_checksum_db is None:
3911 - if get_profile_sync(conn, config_profile) is True
3912 - if sync_tree():
3913 - reset_profile_sync(conn, config_profile)
3914 - CM.putConnection(conn)
3915 - return False
3916 - make_conf_file = "/etc/portage/make.conf"
3917 - make_conf_checksum_tree = portage.checksum.sha256hash(make_conf_file)[0]
3918 - print('make_conf_checksum_tree', make_conf_checksum_tree)
3919 - if make_conf_checksum_tree != make_conf_checksum_db[0]:
3920 - CM.putConnection(conn)
3921 - return False
3922 - # Check if we can open the file and close it
3923 - # Check if we have some error in the file (portage.util.getconfig)
3924 - # Check if we envorment error with the config (settings.validate)
3925 - try:
3926 - open_make_conf = open(make_conf_file)
3927 - open_make_conf.close()
3928 - portage.util.getconfig(make_conf_file, tolerant=0, allow_sourcing=False, expand=True)
3929 - mysettings = portage.config(config_root = "/")
3930 - mysettings.validate()
3931 - # With errors we return false
3932 - except Exception as e:
3933 - CM.putConnection(conn)
3934 - return False
3935 - CM.putConnection(conn)
3936 - return True
3937 -
3938 -def check_configure_guest(config_profile):
3939 - pass_make_conf = check_make_conf_guest(config_profile)
3940 - print(pass_make_conf)
3941 - return pass_make_conf
3942 \ No newline at end of file
3943
3944 diff --git a/gobs/pym/depclean.py~ b/gobs/pym/depclean.py~
3945 deleted file mode 100644
3946 index b6096b6..0000000
3947 --- a/gobs/pym/depclean.py~
3948 +++ /dev/null
3949 @@ -1,632 +0,0 @@
3950 -from __future__ import print_function
3951 -import errno
3952 -import portage
3953 -from portage._sets.base import InternalPackageSet
3954 -from _emerge.main import parse_opts
3955 -from _emerge.create_depgraph_params import create_depgraph_params
3956 -from _emerge.depgraph import backtrack_depgraph, depgraph, resume_depgraph
3957 -from _emerge.UnmergeDepPriority import UnmergeDepPriority
3958 -from _emerge.SetArg import SetArg
3959 -from _emerge.actions import load_emerge_config
3960 -from _emerge.Package import Package
3961 -from _emerge.unmerge import unmerge
3962 -from portage.util import cmp_sort_key, writemsg, \
3963 - writemsg_level, writemsg_stdout
3964 -from portage.util.digraph import digraph
3965 -
3966 -def main_depclean():
3967 - mysettings, mytrees, mtimedb = load_emerge_config()
3968 - myroot = mysettings["ROOT"]
3969 - root_config = mytrees[myroot]["root_config"]
3970 - psets = root_config.setconfig.psets
3971 - args_set = InternalPackageSet(allow_repo=True)
3972 - spinner=None
3973 - scheduler=None
3974 - tmpcmdline = []
3975 - tmpcmdline.append("--depclean")
3976 - tmpcmdline.append("--pretend")
3977 - print("depclean",tmpcmdline)
3978 - myaction, myopts, myfiles = parse_opts(tmpcmdline, silent=False)
3979 - if myfiles:
3980 - args_set.update(myfiles)
3981 - matched_packages = False
3982 - for x in args_set:
3983 - if vardb.match(x):
3984 - matched_packages = True
3985 - if not matched_packages:
3986 - return 0
3987 -
3988 - rval, cleanlist, ordered, req_pkg_count, unresolvable = calc_depclean(mysettings, mytrees, mtimedb["ldpath"], myopts, myaction, args_set, spinner)
3989 - print('rval, cleanlist, ordered, req_pkg_count, unresolvable', rval, cleanlist, ordered, req_pkg_count, unresolvable)
3990 - if unresolvable != []:
3991 - return True
3992 - if cleanlist != []:
3993 - conflict_package_list = []
3994 - for depclean_cpv in cleanlist:
3995 - if portage.versions.cpv_getkey(depclean_cpv) in list(psets["system"]):
3996 - conflict_package_list.append(depclean_cpv)
3997 - if portage.versions.cpv_getkey(depclean_cpv) in list(psets['selected']):
3998 - conflict_package_list.append(depclean_cpv)
3999 - print('conflict_package_list', conflict_package_list)
4000 - if conflict_package_list == []:
4001 - tmpcmdline = []
4002 - tmpcmdline.append("--depclean")
4003 - myaction, myopts, myfiles = parse_opts(tmpcmdline, silent=False)
4004 - unmerge(root_config, myopts, "unmerge", cleanlist, mtimedb["ldpath"], ordered=ordered, scheduler=scheduler)
4005 - print("Number removed: "+str(len(cleanlist)))
4006 - return True
4007 - return True
4008 -
4009 -def calc_depclean(settings, trees, ldpath_mtimes,
4010 - myopts, action, args_set, spinner):
4011 - allow_missing_deps = bool(args_set)
4012 -
4013 - debug = '--debug' in myopts
4014 - xterm_titles = "notitles" not in settings.features
4015 - myroot = settings["ROOT"]
4016 - root_config = trees[myroot]["root_config"]
4017 - psets = root_config.setconfig.psets
4018 - deselect = myopts.get('--deselect') != 'n'
4019 - required_sets = {}
4020 - required_sets['world'] = psets['world']
4021 -
4022 - # When removing packages, a temporary version of the world 'selected'
4023 - # set may be used which excludes packages that are intended to be
4024 - # eligible for removal.
4025 - selected_set = psets['selected']
4026 - required_sets['selected'] = selected_set
4027 - protected_set = InternalPackageSet()
4028 - protected_set_name = '____depclean_protected_set____'
4029 - required_sets[protected_set_name] = protected_set
4030 - system_set = psets["system"]
4031 -
4032 - if not system_set or not selected_set:
4033 -
4034 - if not system_set:
4035 - writemsg_level("!!! You have no system list.\n",
4036 - level=logging.ERROR, noiselevel=-1)
4037 -
4038 - if not selected_set:
4039 - writemsg_level("!!! You have no world file.\n",
4040 - level=logging.WARNING, noiselevel=-1)
4041 -
4042 - writemsg_level("!!! Proceeding is likely to " + \
4043 - "break your installation.\n",
4044 - level=logging.WARNING, noiselevel=-1)
4045 - if "--pretend" not in myopts:
4046 - countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
4047 -
4048 - if action == "depclean":
4049 - print(" >>> depclean")
4050 -
4051 - writemsg_level("\nCalculating dependencies ")
4052 - resolver_params = create_depgraph_params(myopts, "remove")
4053 - resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
4054 - resolver._load_vdb()
4055 - vardb = resolver._frozen_config.trees[myroot]["vartree"].dbapi
4056 - real_vardb = trees[myroot]["vartree"].dbapi
4057 -
4058 - if action == "depclean":
4059 -
4060 - if args_set:
4061 -
4062 - if deselect:
4063 - # Start with an empty set.
4064 - selected_set = InternalPackageSet()
4065 - required_sets['selected'] = selected_set
4066 - # Pull in any sets nested within the selected set.
4067 - selected_set.update(psets['selected'].getNonAtoms())
4068 -
4069 - # Pull in everything that's installed but not matched
4070 - # by an argument atom since we don't want to clean any
4071 - # package if something depends on it.
4072 - for pkg in vardb:
4073 - if spinner:
4074 - spinner.update()
4075 -
4076 - try:
4077 - if args_set.findAtomForPackage(pkg) is None:
4078 - protected_set.add("=" + pkg.cpv)
4079 - continue
4080 - except portage.exception.InvalidDependString as e:
4081 - show_invalid_depstring_notice(pkg,
4082 - pkg.metadata["PROVIDE"], str(e))
4083 - del e
4084 - protected_set.add("=" + pkg.cpv)
4085 - continue
4086 -
4087 - elif action == "prune":
4088 -
4089 - if deselect:
4090 - # Start with an empty set.
4091 - selected_set = InternalPackageSet()
4092 - required_sets['selected'] = selected_set
4093 - # Pull in any sets nested within the selected set.
4094 - selected_set.update(psets['selected'].getNonAtoms())
4095 -
4096 - # Pull in everything that's installed since we don't
4097 - # to prune a package if something depends on it.
4098 - protected_set.update(vardb.cp_all())
4099 -
4100 - if not args_set:
4101 -
4102 - # Try to prune everything that's slotted.
4103 - for cp in vardb.cp_all():
4104 - if len(vardb.cp_list(cp)) > 1:
4105 - args_set.add(cp)
4106 -
4107 - # Remove atoms from world that match installed packages
4108 - # that are also matched by argument atoms, but do not remove
4109 - # them if they match the highest installed version.
4110 - for pkg in vardb:
4111 - spinner.update()
4112 - pkgs_for_cp = vardb.match_pkgs(pkg.cp)
4113 - if not pkgs_for_cp or pkg not in pkgs_for_cp:
4114 - raise AssertionError("package expected in matches: " + \
4115 - "cp = %s, cpv = %s matches = %s" % \
4116 - (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
4117 -
4118 - highest_version = pkgs_for_cp[-1]
4119 - if pkg == highest_version:
4120 - # pkg is the highest version
4121 - protected_set.add("=" + pkg.cpv)
4122 - continue
4123 -
4124 - if len(pkgs_for_cp) <= 1:
4125 - raise AssertionError("more packages expected: " + \
4126 - "cp = %s, cpv = %s matches = %s" % \
4127 - (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
4128 -
4129 - try:
4130 - if args_set.findAtomForPackage(pkg) is None:
4131 - protected_set.add("=" + pkg.cpv)
4132 - continue
4133 - except portage.exception.InvalidDependString as e:
4134 - show_invalid_depstring_notice(pkg,
4135 - pkg.metadata["PROVIDE"], str(e))
4136 - del e
4137 - protected_set.add("=" + pkg.cpv)
4138 - continue
4139 -
4140 - if resolver._frozen_config.excluded_pkgs:
4141 - excluded_set = resolver._frozen_config.excluded_pkgs
4142 - required_sets['__excluded__'] = InternalPackageSet()
4143 -
4144 - for pkg in vardb:
4145 - if spinner:
4146 - spinner.update()
4147 -
4148 - try:
4149 - if excluded_set.findAtomForPackage(pkg):
4150 - required_sets['__excluded__'].add("=" + pkg.cpv)
4151 - except portage.exception.InvalidDependString as e:
4152 - show_invalid_depstring_notice(pkg,
4153 - pkg.metadata["PROVIDE"], str(e))
4154 - del e
4155 - required_sets['__excluded__'].add("=" + pkg.cpv)
4156 -
4157 - success = resolver._complete_graph(required_sets={myroot:required_sets})
4158 - writemsg_level("\b\b... done!\n")
4159 -
4160 - resolver.display_problems()
4161 -
4162 - if not success:
4163 - return True, [], False, 0, []
4164 -
4165 - def unresolved_deps():
4166 -
4167 - unresolvable = set()
4168 - for dep in resolver._dynamic_config._initially_unsatisfied_deps:
4169 - if isinstance(dep.parent, Package) and \
4170 - (dep.priority > UnmergeDepPriority.SOFT):
4171 - unresolvable.add((dep.atom, dep.parent.cpv))
4172 -
4173 - if not unresolvable:
4174 - return None
4175 -
4176 - if unresolvable and not allow_missing_deps:
4177 -
4178 - prefix = bad(" * ")
4179 - msg = []
4180 - msg.append("Dependencies could not be completely resolved due to")
4181 - msg.append("the following required packages not being installed:")
4182 - msg.append("")
4183 - for atom, parent in unresolvable:
4184 - msg.append(" %s pulled in by:" % (atom,))
4185 - msg.append(" %s" % (parent,))
4186 - msg.append("")
4187 - msg.extend(textwrap.wrap(
4188 - "Have you forgotten to do a complete update prior " + \
4189 - "to depclean? The most comprehensive command for this " + \
4190 - "purpose is as follows:", 65
4191 - ))
4192 - msg.append("")
4193 - msg.append(" " + \
4194 - good("emerge --update --newuse --deep --with-bdeps=y @world"))
4195 - msg.append("")
4196 - msg.extend(textwrap.wrap(
4197 - "Note that the --with-bdeps=y option is not required in " + \
4198 - "many situations. Refer to the emerge manual page " + \
4199 - "(run `man emerge`) for more information about " + \
4200 - "--with-bdeps.", 65
4201 - ))
4202 - msg.append("")
4203 - msg.extend(textwrap.wrap(
4204 - "Also, note that it may be necessary to manually uninstall " + \
4205 - "packages that no longer exist in the portage tree, since " + \
4206 - "it may not be possible to satisfy their dependencies.", 65
4207 - ))
4208 - if action == "prune":
4209 - msg.append("")
4210 - msg.append("If you would like to ignore " + \
4211 - "dependencies then use %s." % good("--nodeps"))
4212 - writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
4213 - level=logging.ERROR, noiselevel=-1)
4214 - return unresolvable
4215 - return None
4216 -
4217 - unresolvable = unresolved_deps()
4218 - if not unresolvable is None:
4219 - return False, [], False, 0, unresolvable
4220 -
4221 - graph = resolver._dynamic_config.digraph.copy()
4222 - required_pkgs_total = 0
4223 - for node in graph:
4224 - if isinstance(node, Package):
4225 - required_pkgs_total += 1
4226 -
4227 - def show_parents(child_node):
4228 - parent_nodes = graph.parent_nodes(child_node)
4229 - if not parent_nodes:
4230 - # With --prune, the highest version can be pulled in without any
4231 - # real parent since all installed packages are pulled in. In that
4232 - # case there's nothing to show here.
4233 - return
4234 - parent_strs = []
4235 - for node in parent_nodes:
4236 - parent_strs.append(str(getattr(node, "cpv", node)))
4237 - parent_strs.sort()
4238 - msg = []
4239 - msg.append(" %s pulled in by:\n" % (child_node.cpv,))
4240 - for parent_str in parent_strs:
4241 - msg.append(" %s\n" % (parent_str,))
4242 - msg.append("\n")
4243 - portage.writemsg_stdout("".join(msg), noiselevel=-1)
4244 -
4245 - def cmp_pkg_cpv(pkg1, pkg2):
4246 - """Sort Package instances by cpv."""
4247 - if pkg1.cpv > pkg2.cpv:
4248 - return 1
4249 - elif pkg1.cpv == pkg2.cpv:
4250 - return 0
4251 - else:
4252 - return -1
4253 -
4254 - def create_cleanlist():
4255 -
4256 - # Never display the special internal protected_set.
4257 - for node in graph:
4258 - if isinstance(node, SetArg) and node.name == protected_set_name:
4259 - graph.remove(node)
4260 - break
4261 -
4262 - pkgs_to_remove = []
4263 -
4264 - if action == "depclean":
4265 - if args_set:
4266 -
4267 - for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
4268 - arg_atom = None
4269 - try:
4270 - arg_atom = args_set.findAtomForPackage(pkg)
4271 - except portage.exception.InvalidDependString:
4272 - # this error has already been displayed by now
4273 - continue
4274 -
4275 - if arg_atom:
4276 - if pkg not in graph:
4277 - pkgs_to_remove.append(pkg)
4278 - elif "--verbose" in myopts:
4279 - show_parents(pkg)
4280 -
4281 - else:
4282 - for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
4283 - if pkg not in graph:
4284 - pkgs_to_remove.append(pkg)
4285 - elif "--verbose" in myopts:
4286 - show_parents(pkg)
4287 -
4288 - elif action == "prune":
4289 -
4290 - for atom in args_set:
4291 - for pkg in vardb.match_pkgs(atom):
4292 - if pkg not in graph:
4293 - pkgs_to_remove.append(pkg)
4294 - elif "--verbose" in myopts:
4295 - show_parents(pkg)
4296 -
4297 - return pkgs_to_remove
4298 -
4299 - cleanlist = create_cleanlist()
4300 - clean_set = set(cleanlist)
4301 -
4302 - if cleanlist and \
4303 - real_vardb._linkmap is not None and \
4304 - myopts.get("--depclean-lib-check") != "n" and \
4305 - "preserve-libs" not in settings.features:
4306 -
4307 - # Check if any of these packages are the sole providers of libraries
4308 - # with consumers that have not been selected for removal. If so, these
4309 - # packages and any dependencies need to be added to the graph.
4310 - linkmap = real_vardb._linkmap
4311 - consumer_cache = {}
4312 - provider_cache = {}
4313 - consumer_map = {}
4314 -
4315 - writemsg_level(">>> Checking for lib consumers...\n")
4316 -
4317 - for pkg in cleanlist:
4318 - pkg_dblink = real_vardb._dblink(pkg.cpv)
4319 - consumers = {}
4320 -
4321 - for lib in pkg_dblink.getcontents():
4322 - lib = lib[len(myroot):]
4323 - lib_key = linkmap._obj_key(lib)
4324 - lib_consumers = consumer_cache.get(lib_key)
4325 - if lib_consumers is None:
4326 - try:
4327 - lib_consumers = linkmap.findConsumers(lib_key)
4328 - except KeyError:
4329 - continue
4330 - consumer_cache[lib_key] = lib_consumers
4331 - if lib_consumers:
4332 - consumers[lib_key] = lib_consumers
4333 -
4334 - if not consumers:
4335 - continue
4336 -
4337 - for lib, lib_consumers in list(consumers.items()):
4338 - for consumer_file in list(lib_consumers):
4339 - if pkg_dblink.isowner(consumer_file):
4340 - lib_consumers.remove(consumer_file)
4341 - if not lib_consumers:
4342 - del consumers[lib]
4343 -
4344 - if not consumers:
4345 - continue
4346 -
4347 - for lib, lib_consumers in consumers.items():
4348 -
4349 - soname = linkmap.getSoname(lib)
4350 -
4351 - consumer_providers = []
4352 - for lib_consumer in lib_consumers:
4353 - providers = provider_cache.get(lib)
4354 - if providers is None:
4355 - providers = linkmap.findProviders(lib_consumer)
4356 - provider_cache[lib_consumer] = providers
4357 - if soname not in providers:
4358 - # Why does this happen?
4359 - continue
4360 - consumer_providers.append(
4361 - (lib_consumer, providers[soname]))
4362 -
4363 - consumers[lib] = consumer_providers
4364 -
4365 - consumer_map[pkg] = consumers
4366 -
4367 - if consumer_map:
4368 -
4369 - search_files = set()
4370 - for consumers in consumer_map.values():
4371 - for lib, consumer_providers in consumers.items():
4372 - for lib_consumer, providers in consumer_providers:
4373 - search_files.add(lib_consumer)
4374 - search_files.update(providers)
4375 -
4376 - writemsg_level(">>> Assigning files to packages...\n")
4377 - file_owners = real_vardb._owners.getFileOwnerMap(search_files)
4378 -
4379 - for pkg, consumers in list(consumer_map.items()):
4380 - for lib, consumer_providers in list(consumers.items()):
4381 - lib_consumers = set()
4382 -
4383 - for lib_consumer, providers in consumer_providers:
4384 - owner_set = file_owners.get(lib_consumer)
4385 - provider_dblinks = set()
4386 - provider_pkgs = set()
4387 -
4388 - if len(providers) > 1:
4389 - for provider in providers:
4390 - provider_set = file_owners.get(provider)
4391 - if provider_set is not None:
4392 - provider_dblinks.update(provider_set)
4393 -
4394 - if len(provider_dblinks) > 1:
4395 - for provider_dblink in provider_dblinks:
4396 - provider_pkg = resolver._pkg(
4397 - provider_dblink.mycpv, "installed",
4398 - root_config, installed=True)
4399 - if provider_pkg not in clean_set:
4400 - provider_pkgs.add(provider_pkg)
4401 -
4402 - if provider_pkgs:
4403 - continue
4404 -
4405 - if owner_set is not None:
4406 - lib_consumers.update(owner_set)
4407 -
4408 - for consumer_dblink in list(lib_consumers):
4409 - if resolver._pkg(consumer_dblink.mycpv, "installed",
4410 - root_config, installed=True) in clean_set:
4411 - lib_consumers.remove(consumer_dblink)
4412 - continue
4413 -
4414 - if lib_consumers:
4415 - consumers[lib] = lib_consumers
4416 - else:
4417 - del consumers[lib]
4418 - if not consumers:
4419 - del consumer_map[pkg]
4420 -
4421 - if consumer_map:
4422 - # TODO: Implement a package set for rebuilding consumer packages.
4423 -
4424 - msg = "In order to avoid breakage of link level " + \
4425 - "dependencies, one or more packages will not be removed. " + \
4426 - "This can be solved by rebuilding " + \
4427 - "the packages that pulled them in."
4428 -
4429 - prefix = bad(" * ")
4430 - from textwrap import wrap
4431 - writemsg_level("".join(prefix + "%s\n" % line for \
4432 - line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
4433 -
4434 - msg = []
4435 - for pkg in sorted(consumer_map, key=cmp_sort_key(cmp_pkg_cpv)):
4436 - consumers = consumer_map[pkg]
4437 - consumer_libs = {}
4438 - for lib, lib_consumers in consumers.items():
4439 - for consumer in lib_consumers:
4440 - consumer_libs.setdefault(
4441 - consumer.mycpv, set()).add(linkmap.getSoname(lib))
4442 - unique_consumers = set(chain(*consumers.values()))
4443 - unique_consumers = sorted(consumer.mycpv \
4444 - for consumer in unique_consumers)
4445 - msg.append("")
4446 - msg.append(" %s pulled in by:" % (pkg.cpv,))
4447 - for consumer in unique_consumers:
4448 - libs = consumer_libs[consumer]
4449 - msg.append(" %s needs %s" % \
4450 - (consumer, ', '.join(sorted(libs))))
4451 - msg.append("")
4452 - writemsg_level("".join(prefix + "%s\n" % line for line in msg),
4453 - level=logging.WARNING, noiselevel=-1)
4454 -
4455 - # Add lib providers to the graph as children of lib consumers,
4456 - # and also add any dependencies pulled in by the provider.
4457 - writemsg_level(">>> Adding lib providers to graph...\n")
4458 -
4459 - for pkg, consumers in consumer_map.items():
4460 - for consumer_dblink in set(chain(*consumers.values())):
4461 - consumer_pkg = resolver._pkg(consumer_dblink.mycpv,
4462 - "installed", root_config, installed=True)
4463 - if not resolver._add_pkg(pkg,
4464 - Dependency(parent=consumer_pkg,
4465 - priority=UnmergeDepPriority(runtime=True),
4466 - root=pkg.root)):
4467 - resolver.display_problems()
4468 - return True, [], False, 0, []
4469 -
4470 - writemsg_level("\nCalculating dependencies ")
4471 - success = resolver._complete_graph(
4472 - required_sets={myroot:required_sets})
4473 - writemsg_level("\b\b... done!\n")
4474 - resolver.display_problems()
4475 - if not success:
4476 - return True, [], False, 0, []
4477 - unresolvable = unresolved_deps()
4478 - if not unresolvable is None:
4479 - return False, [], False, 0, unresolvable
4480 -
4481 - graph = resolver._dynamic_config.digraph.copy()
4482 - required_pkgs_total = 0
4483 - for node in graph:
4484 - if isinstance(node, Package):
4485 - required_pkgs_total += 1
4486 - cleanlist = create_cleanlist()
4487 - if not cleanlist:
4488 - return 0, [], False, required_pkgs_total, unresolvable
4489 - clean_set = set(cleanlist)
4490 -
4491 - if clean_set:
4492 - writemsg_level(">>> Calculating removal order...\n")
4493 - # Use a topological sort to create an unmerge order such that
4494 - # each package is unmerged before it's dependencies. This is
4495 - # necessary to avoid breaking things that may need to run
4496 - # during pkg_prerm or pkg_postrm phases.
4497 -
4498 - # Create a new graph to account for dependencies between the
4499 - # packages being unmerged.
4500 - graph = digraph()
4501 - del cleanlist[:]
4502 -
4503 - dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4504 - runtime = UnmergeDepPriority(runtime=True)
4505 - runtime_post = UnmergeDepPriority(runtime_post=True)
4506 - buildtime = UnmergeDepPriority(buildtime=True)
4507 - priority_map = {
4508 - "RDEPEND": runtime,
4509 - "PDEPEND": runtime_post,
4510 - "DEPEND": buildtime,
4511 - }
4512 -
4513 - for node in clean_set:
4514 - graph.add(node, None)
4515 - mydeps = []
4516 - for dep_type in dep_keys:
4517 - depstr = node.metadata[dep_type]
4518 - if not depstr:
4519 - continue
4520 - priority = priority_map[dep_type]
4521 -
4522 - try:
4523 - atoms = resolver._select_atoms(myroot, depstr,
4524 - myuse=node.use.enabled, parent=node,
4525 - priority=priority)[node]
4526 - except portage.exception.InvalidDependString:
4527 - # Ignore invalid deps of packages that will
4528 - # be uninstalled anyway.
4529 - continue
4530 -
4531 - for atom in atoms:
4532 - if not isinstance(atom, portage.dep.Atom):
4533 - # Ignore invalid atoms returned from dep_check().
4534 - continue
4535 - if atom.blocker:
4536 - continue
4537 - matches = vardb.match_pkgs(atom)
4538 - if not matches:
4539 - continue
4540 - for child_node in matches:
4541 - if child_node in clean_set:
4542 - graph.add(child_node, node, priority=priority)
4543 -
4544 - ordered = True
4545 - if len(graph.order) == len(graph.root_nodes()):
4546 - # If there are no dependencies between packages
4547 - # let unmerge() group them by cat/pn.
4548 - ordered = False
4549 - cleanlist = [pkg.cpv for pkg in graph.order]
4550 - else:
4551 - # Order nodes from lowest to highest overall reference count for
4552 - # optimal root node selection (this can help minimize issues
4553 - # with unaccounted implicit dependencies).
4554 - node_refcounts = {}
4555 - for node in graph.order:
4556 - node_refcounts[node] = len(graph.parent_nodes(node))
4557 - def cmp_reference_count(node1, node2):
4558 - return node_refcounts[node1] - node_refcounts[node2]
4559 - graph.order.sort(key=cmp_sort_key(cmp_reference_count))
4560 -
4561 - ignore_priority_range = [None]
4562 - ignore_priority_range.extend(
4563 - range(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
4564 - while graph:
4565 - for ignore_priority in ignore_priority_range:
4566 - nodes = graph.root_nodes(ignore_priority=ignore_priority)
4567 - if nodes:
4568 - break
4569 - if not nodes:
4570 - raise AssertionError("no root nodes")
4571 - if ignore_priority is not None:
4572 - # Some deps have been dropped due to circular dependencies,
4573 - # so only pop one node in order to minimize the number that
4574 - # are dropped.
4575 - del nodes[1:]
4576 - for node in nodes:
4577 - graph.remove(node)
4578 - cleanlist.append(node.cpv)
4579 -
4580 - return True, cleanlist, ordered, required_pkgs_total, []
4581 - return True, [], False, required_pkgs_total, []
4582
4583 diff --git a/gobs/pym/init_setup_profile.py~ b/gobs/pym/init_setup_profile.py~
4584 deleted file mode 100644
4585 index e647e1f..0000000
4586 --- a/gobs/pym/init_setup_profile.py~
4587 +++ /dev/null
4588 @@ -1,86 +0,0 @@
4589 -#!/usr/bin/python
4590 -# Copyright 2006-2011 Gentoo Foundation
4591 -# Distributed under the terms of the GNU General Public License v2
4592 -
4593 -""" This code will update the sql backend with needed info for
4594 - the Frontend and the Guest deamon. """
4595 -
4596 -import sys
4597 -import os
4598 -
4599 -# Get the options from the config file set in gobs.readconf
4600 -from gobs.readconf import get_conf_settings
4601 -reader=get_conf_settings()
4602 -gobs_settings_dict=reader.read_gobs_settings_all()
4603 -# make a CM
4604 -from gobs.ConnectionManager import connectionManager
4605 -CM=connectionManager(gobs_settings_dict)
4606 -#selectively import the pgsql/mysql querys
4607 -if CM.getName()=='pgsql':
4608 - from gobs.pgsql import *
4609 -
4610 -from gobs.check_setup import check_make_conf, git_pull
4611 -from gobs.package import gobs_package
4612 -import portage
4613 -
4614 -def setup_profile_main(args=None):
4615 - """
4616 - @param args: command arguments (default: sys.argv[1:])
4617 - @type args: list
4618 - """
4619 - conn=CM.getConnection()
4620 - if args is None:
4621 - args = sys.argv[1:]
4622 - if args[0] == "-add":
4623 - git_pull()
4624 - check_make_conf()
4625 - print "Check configs done"
4626 - # Get default config from the configs table and default_config=1
4627 - config_id = args[1]
4628 - default_config_root = "/var/lib/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/" + config_id + "/"
4629 - # Set config_root (PORTAGE_CONFIGROOT) to default_config_root
4630 - mysettings = portage.config(config_root = default_config_root)
4631 - myportdb = portage.portdbapi(mysettings=mysettings)
4632 - init_package = gobs_package(mysettings, myportdb)
4633 - # get the cp list
4634 - package_list_tree = package_list_tree = myportdb.cp_all()
4635 - print "Setting default config to:", config_id
4636 - config_id_list = []
4637 - config_id_list.append(config_id)
4638 - for package_line in sorted(package_list_tree):
4639 - # FIXME: remove the check for gobs when in tree
4640 - if package_line != "dev-python/gobs":
4641 - build_dict = {}
4642 - packageDict = {}
4643 - ebuild_id_list = []
4644 - # split the cp to categories and package
4645 - element = package_line.split('/')
4646 - categories = element[0]
4647 - package = element[1]
4648 - print "C", categories + "/" + package # C = Checking
4649 - pkgdir = mysettings['PORTDIR'] + "/" + categories + "/" + package
4650 - config_cpv_listDict = init_package.config_match_ebuild(categories, package, config_id_list)
4651 - if config_cpv_listDict != {}:
4652 - cpv = categories + "/" + package + "-" + config_cpv_listDict[config_id]['ebuild_version']
4653 - attDict = {}
4654 - attDict['categories'] = categories
4655 - attDict['package'] = package
4656 - attDict['ebuild_version_tree'] = config_cpv_listDict[config_id]['ebuild_version']
4657 - packageDict[cpv] = attDict
4658 - build_dict['checksum'] = portage.checksum.sha256hash(pkgdir + "/" + package + "-" + config_cpv_listDict[config_id]['ebuild_version'] + ".ebuild")[0]
4659 - build_dict['package_id'] = have_package_db(conn, categories, package)[0]
4660 - build_dict['ebuild_version'] = config_cpv_listDict[config_id]['ebuild_version']
4661 - ebuild_id = get_ebuild_id_db_checksum(conn, build_dict)
4662 - if ebuild_id is not None:
4663 - ebuild_id_list.append(ebuild_id)
4664 - init_package.add_new_ebuild_buildquery_db(ebuild_id_list, packageDict, config_cpv_listDict)
4665 -
4666 - if args[0] == "-del":
4667 - config_id = args[1]
4668 - querue_id_list = get_queue_id_list_config(conn, config_id)
4669 - if querue_id_list is not None:
4670 - for querue_id in querue_id_list:
4671 - del_old_queue(conn, querue_id)
4672 - CM.putConnection(conn)
4673 -
4674 -
4675 \ No newline at end of file
4676
4677 diff --git a/gobs/pym/manifest.py~ b/gobs/pym/manifest.py~
4678 deleted file mode 100644
4679 index fb29f0a..0000000
4680 --- a/gobs/pym/manifest.py~
4681 +++ /dev/null
4682 @@ -1,124 +0,0 @@
4683 -import os
4684 -import warnings
4685 -from portage import os, _encodings, _unicode_decode
4686 -from portage.exception import DigestException, FileNotFound
4687 -from portage.localization import _
4688 -from portage.manifest import Manifest
4689 -import portage
4690 -
4691 -class gobs_manifest(object):
4692 -
4693 - def __init__ (self, mysettings, pkgdir):
4694 - self._mysettings = mysettings
4695 - self._pkgdir = pkgdir
4696 -
4697 - # Copy of portage.digestcheck() but without the writemsg() stuff
4698 - def digestcheck(self):
4699 - """
4700 - Verifies checksums. Assumes all files have been downloaded.
4701 - @rtype: int
4702 - @returns: None on success and error msg on failure
4703 - """
4704 -
4705 - myfiles = []
4706 - justmanifest = None
4707 - self._mysettings['PORTAGE_QUIET'] = '1'
4708 -
4709 - if self._mysettings.get("EBUILD_SKIP_MANIFEST") == "1":
4710 - return None
4711 - manifest_path = os.path.join(self._pkgdir, "Manifest")
4712 - if not os.path.exists(manifest_path):
4713 - return ("!!! Manifest file not found: '%s'") % manifest_path
4714 - mf = Manifest(self._pkgdir, self._mysettings["DISTDIR"])
4715 - manifest_empty = True
4716 - for d in mf.fhashdict.values():
4717 - if d:
4718 - manifest_empty = False
4719 - break
4720 - if manifest_empty:
4721 - return ("!!! Manifest is empty: '%s'") % manifest_path
4722 - try:
4723 - if "PORTAGE_PARALLEL_FETCHONLY" not in self._mysettings:
4724 - mf.checkTypeHashes("EBUILD")
4725 - mf.checkTypeHashes("AUX")
4726 - mf.checkTypeHashes("MISC", ignoreMissingFiles=True)
4727 - for f in myfiles:
4728 - ftype = mf.findFile(f)
4729 - if ftype is None:
4730 - return ("!!! Missing digest for '%s'") % (f,)
4731 - mf.checkFileHashes(ftype, f)
4732 - except FileNotFound as e:
4733 - return ("!!! A file listed in the Manifest could not be found: %s") % str(e)
4734 - except DigestException as e:
4735 - return ("!!! Digest verification failed: %s\nReason: %s\nGot: %s\nExpected: %s") \
4736 - % (e.value[0], e.value[1], e.value[2], e.value[3])
4737 - # Make sure that all of the ebuilds are actually listed in the Manifest.
4738 - for f in os.listdir(self._pkgdir):
4739 - pf = None
4740 - if f[-7:] == '.ebuild':
4741 - pf = f[:-7]
4742 - if pf is not None and not mf.hasFile("EBUILD", f):
4743 - return ("!!! A file is not listed in the Manifest: '%s'") \
4744 - % os.path.join(pkgdir, f)
4745 - """ epatch will just grab all the patches out of a directory, so we have to
4746 - make sure there aren't any foreign files that it might grab."""
4747 - filesdir = os.path.join(self._pkgdir, "files")
4748 - for parent, dirs, files in os.walk(filesdir):
4749 - try:
4750 - parent = _unicode_decode(parent,
4751 - encoding=_encodings['fs'], errors='strict')
4752 - except UnicodeDecodeError:
4753 - parent = _unicode_decode(parent, encoding=_encodings['fs'], errors='replace')
4754 - return ("!!! Path contains invalid character(s) for encoding '%s': '%s'") \
4755 - % (_encodings['fs'], parent)
4756 - for d in dirs:
4757 - d_bytes = d
4758 - try:
4759 - d = _unicode_decode(d, encoding=_encodings['fs'], errors='strict')
4760 - except UnicodeDecodeError:
4761 - d = _unicode_decode(d, encoding=_encodings['fs'], errors='replace')
4762 - return ("!!! Path contains invalid character(s) for encoding '%s': '%s'") \
4763 - % (_encodings['fs'], os.path.join(parent, d))
4764 - if d.startswith(".") or d == "CVS":
4765 - dirs.remove(d_bytes)
4766 - for f in files:
4767 - try:
4768 - f = _unicode_decode(f, encoding=_encodings['fs'], errors='strict')
4769 - except UnicodeDecodeError:
4770 - f = _unicode_decode(f, encoding=_encodings['fs'], errors='replace')
4771 - if f.startswith("."):
4772 - continue
4773 - f = os.path.join(parent, f)[len(filesdir) + 1:]
4774 - return ("!!! File name contains invalid character(s) for encoding '%s': '%s'") \
4775 - % (_encodings['fs'], f)
4776 - if f.startswith("."):
4777 - continue
4778 - f = os.path.join(parent, f)[len(filesdir) + 1:]
4779 - file_type = mf.findFile(f)
4780 - if file_type != "AUX" and not f.startswith("digest-"):
4781 - return ("!!! A file is not listed in the Manifest: '%s'") \
4782 - % os.path.join(filesdir, f)
4783 - return None
4784 -
4785 - def check_file_in_manifest(self, portdb, cpv, build_dict, build_use_flags_list):
4786 - myfetchlistdict = portage.FetchlistDict(self._pkgdir, self._mysettings, portdb)
4787 - my_manifest = portage.Manifest(self._pkgdir, self._mysettings['DISTDIR'], fetchlist_dict=myfetchlistdict, manifest1_compat=False, from_scratch=False)
4788 - if my_manifest.findFile(build_dict['package'] + "-" + build_dict['ebuild_version'] + ".ebuild") is None:
4789 - return "Ebuild file not found."
4790 - cpv_fetchmap = portdb.getFetchMap(cpv, useflags=build_use_flags_list, mytree=None)
4791 - self._mysettings.unlock()
4792 - try:
4793 - portage.fetch(cpv_fetchmap, self._mysettings, listonly=0, fetchonly=0, locks_in_subdir='.locks', use_locks=1, try_mirrors=1)
4794 - except:
4795 - self._mysettings.lock()
4796 - return "Can't fetch the file."
4797 - self._mysettings.lock()
4798 - try:
4799 - my_manifest.checkCpvHashes(cpv, checkDistfiles=True, onlyDistfiles=False, checkMiscfiles=True)
4800 - except:
4801 - return "Can't fetch the file or the hash failed."
4802 - try:
4803 - portdb.fetch_check(cpv, useflags=build_use_flags_list, mysettings=self._mysettings, all=False)
4804 - except:
4805 - return "Fetch check failed."
4806 - return None
4807 \ No newline at end of file
4808
4809 diff --git a/gobs/pym/old_cpv.py~ b/gobs/pym/old_cpv.py~
4810 deleted file mode 100644
4811 index 4923bf7..0000000
4812 --- a/gobs/pym/old_cpv.py~
4813 +++ /dev/null
4814 @@ -1,89 +0,0 @@
4815 -from __future__ import print_function
4816 -from gobs.readconf import get_conf_settings
4817 -reader=get_conf_settings()
4818 -gobs_settings_dict=reader.read_gobs_settings_all()
4819 -# make a CM
4820 -from gobs.ConnectionManager import connectionManager
4821 -CM=connectionManager(gobs_settings_dict)
4822 -#selectively import the pgsql/mysql querys
4823 -if CM.getName()=='pgsql':
4824 - from gobs.pgsql import *
4825 -
4826 -class gobs_old_cpv(object):
4827 -
4828 - def __init__(self, myportdb, mysettings):
4829 - self._mysettings = mysettings
4830 - self._myportdb = myportdb
4831 -
4832 - def mark_old_ebuild_db(self, categories, package, package_id):
4833 - conn=CM.getConnection()
4834 - ebuild_list_tree = sorted(self._myportdb.cp_list((categories + "/" + package), use_cache=1, mytree=None))
4835 - # Get ebuild list on categories, package in the db
4836 - ebuild_list_db = cp_list_db(conn,package_id)
4837 - # Check if don't have the ebuild in the tree
4838 - # Add it to the no active list
4839 - old_ebuild_list = []
4840 - for ebuild_line in ebuild_list_db:
4841 - ebuild_line_db = categories + "/" + package + "-" + ebuild_line[0]
4842 - if not ebuild_line_db in ebuild_list_tree:
4843 - old_ebuild_list.append(ebuild_line)
4844 - # Set no active on ebuilds in the db that no longer in tree
4845 - if old_ebuild_list != []:
4846 - for old_ebuild in old_ebuild_list:
4847 - print("O", categories + "/" + package + "-" + old_ebuild[0])
4848 - add_old_ebuild(conn,package_id, old_ebuild_list)
4849 - # Check if we have older no activ ebuilds then 60 days
4850 - ebuild_old_list_db = cp_list_old_db(conn,package_id)
4851 - # Delete older ebuilds in the db
4852 - if ebuild_old_list_db != []:
4853 - for del_ebuild_old in ebuild_old_list_db:
4854 - print("D", categories + "/" + package + "-" + del_ebuild_old[1])
4855 - del_old_ebuild(conn,ebuild_old_list_db)
4856 - CM.putConnection(conn)
4857 -
4858 - def mark_old_package_db(self, package_id_list_tree):
4859 - conn=CM.getConnection()
4860 - # Get categories/package list from db
4861 - package_list_db = cp_all_db(conn)
4862 - old_package_id_list = []
4863 - # Check if don't have the categories/package in the tree
4864 - # Add it to the no active list
4865 - for package_line in package_list_db:
4866 - if not package_line in package_id_list_tree:
4867 - old_package_id_list.append(package_line)
4868 - # Set no active on categories/package and ebuilds in the db that no longer in tree
4869 - if old_package_id_list != []:
4870 - mark_old_list = add_old_package(conn,old_package_id_list)
4871 - if mark_old_list != []:
4872 - for x in mark_old_list:
4873 - element = get_cp_from_package_id(conn,x)
4874 - print("O", element[0])
4875 - # Check if we have older no activ categories/package then 60 days
4876 - del_package_id_old_list = cp_all_old_db(conn,old_package_id_list)
4877 - # Delete older categories/package and ebuilds in the db
4878 - if del_package_id_old_list != []:
4879 - for i in del_package_id_old_list:
4880 - element = get_cp_from_package_id(conn,i)
4881 - print("D", element)
4882 - del_old_package(conn,del_package_id_old_list)
4883 - CM.putConnection(conn)
4884 -
4885 - def mark_old_categories_db(self):
4886 - conn=CM.getConnection()
4887 - # Get categories list from the tree and db
4888 - categories_list_tree = self._mysettings.categories
4889 - categories_list_db =get_categories_db(conn)
4890 - categories_old_list = []
4891 - # Check if don't have the categories in the tree
4892 - # Add it to the no active list
4893 - for categories_line in categories_list_db:
4894 - if not categories_line[0] in categories_list_tree:
4895 - old_c = get_old_categories(conn,categories_line[0])
4896 - if old_c is not None:
4897 - categories_old_list.append(categories_line)
4898 - # Delete older categories in the db
4899 - if categories_old_list != []:
4900 - for real_old_categories in categories_old_list:
4901 - del_old_categories(conn,real_old_categoriess)
4902 - print("D", real_old_categories)
4903 - CM.putConnection(conn)
4904 \ No newline at end of file
4905
4906 diff --git a/gobs/pym/package.py~ b/gobs/pym/package.py~
4907 deleted file mode 100644
4908 index 9ae6c57..0000000
4909 --- a/gobs/pym/package.py~
4910 +++ /dev/null
4911 @@ -1,306 +0,0 @@
4912 -from __future__ import print_function
4913 -import portage
4914 -from gobs.flags import gobs_use_flags
4915 -from gobs.repoman_gobs import gobs_repoman
4916 -from gobs.manifest import gobs_manifest
4917 -from gobs.text import get_file_text, get_ebuild_text
4918 -from gobs.old_cpv import gobs_old_cpv
4919 -from gobs.readconf import get_conf_settings
4920 -from gobs.flags import gobs_use_flags
4921 -reader=get_conf_settings()
4922 -gobs_settings_dict=reader.read_gobs_settings_all()
4923 -# make a CM
4924 -from gobs.ConnectionManager import connectionManager
4925 -CM=connectionManager(gobs_settings_dict)
4926 -#selectively import the pgsql/mysql querys
4927 -if CM.getName()=='pgsql':
4928 - from gobs.pgsql import *
4929 -
4930 -class gobs_package(object):
4931 -
4932 - def __init__(self, mysettings, myportdb):
4933 - self._mysettings = mysettings
4934 - self._myportdb = myportdb
4935 -
4936 - def change_config(self, config_id):
4937 - # Change config_root config_id = table configs.id
4938 - my_new_setup = "/var/lib/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/" + config_id + "/"
4939 - mysettings_setup = portage.config(config_root = my_new_setup)
4940 - return mysettings_setup
4941 -
4942 - def config_match_ebuild(self, categories, package, config_list):
4943 - config_cpv_listDict ={}
4944 - if config_list == []:
4945 - return config_cpv_listDict
4946 - conn=CM.getConnection()
4947 - for config_id in config_list:
4948 - # Change config/setup
4949 - mysettings_setup = self.change_config(config_id)
4950 - myportdb_setup = portage.portdbapi(mysettings=mysettings_setup)
4951 - # Get latest cpv from portage with the config
4952 - latest_ebuild = myportdb_setup.xmatch('bestmatch-visible', categories + "/" + package)
4953 - latest_ebuild_version = unicode("")
4954 - # Check if could get cpv from portage
4955 - if latest_ebuild != "":
4956 - # Get the version of cpv
4957 - latest_ebuild_version = portage.versions.cpv_getversion(latest_ebuild)
4958 - # Get the iuse and use flags for that config/setup
4959 - init_useflags = gobs_use_flags(mysettings_setup, myportdb_setup, latest_ebuild)
4960 - iuse_flags_list, final_use_list = init_useflags.get_flags()
4961 - iuse_flags_list2 = []
4962 - for iuse_line in iuse_flags_list:
4963 - iuse_flags_list2.append( init_useflags.reduce_flag(iuse_line))
4964 - # Dic the needed info
4965 - attDict = {}
4966 - attDict['ebuild_version'] = latest_ebuild_version
4967 - attDict['useflags'] = final_use_list
4968 - attDict['iuse'] = iuse_flags_list2
4969 - attDict['package'] = package
4970 - attDict['categories'] = categories
4971 - config_cpv_listDict[config_id] = attDict
4972 - # Clean some cache
4973 - myportdb_setup.close_caches()
4974 - portage.portdbapi.portdbapi_instances.remove(myportdb_setup)
4975 - CM.putConnection(conn)
4976 - return config_cpv_listDict
4977 -
4978 - def get_ebuild_metadata(self, ebuild_line):
4979 - # Get the auxdbkeys infos for the ebuild
4980 - try:
4981 - ebuild_auxdb_list = self._myportdb.aux_get(ebuild_line, portage.auxdbkeys)
4982 - except:
4983 - ebuild_auxdb_list = []
4984 - else:
4985 - for i in range(len(ebuild_auxdb_list)):
4986 - if ebuild_auxdb_list[i] == '':
4987 - ebuild_auxdb_list[i] = ''
4988 - return ebuild_auxdb_list
4989 -
4990 - def get_packageDict(self, pkgdir, ebuild_line, categories, package, config_id):
4991 - attDict = {}
4992 - ebuild_version_tree = portage.versions.cpv_getversion(ebuild_line)
4993 - ebuild_version_checksum_tree = portage.checksum.sha256hash(pkgdir + "/" + package + "-" + ebuild_version_tree + ".ebuild")[0]
4994 - ebuild_version_text = get_ebuild_text(pkgdir + "/" + package + "-" + ebuild_version_tree + ".ebuild")
4995 - init_repoman = gobs_repoman(self._mysettings, self._myportdb)
4996 - repoman_error = init_repoman.check_repoman(categories, package, ebuild_version_tree, config_id)
4997 - ebuild_version_metadata_tree = self.get_ebuild_metadata(ebuild_line)
4998 - # if there some error to get the metadata we add rubish to the
4999 - # ebuild_version_metadata_tree and set ebuild_version_checksum_tree to 0
5000 - # so it can be updated next time we update the db
5001 - if ebuild_version_metadata_tree == []:
5002 - ebuild_version_metadata_tree = ['','','','','','','','','','','','','','','','','','','','','','','','','']
5003 - ebuild_version_checksum_tree = ['0']
5004 - # add the ebuild to the dict packages
5005 - attDict['categories'] = categories
5006 - attDict['package'] = package
5007 - attDict['ebuild_version_tree'] = ebuild_version_tree
5008 - attDict['ebuild_version_checksum_tree']= ebuild_version_checksum_tree
5009 - attDict['ebuild_version_metadata_tree'] = ebuild_version_metadata_tree
5010 - attDict['ebuild_version_text'] = ebuild_version_text[0]
5011 - attDict['ebuild_version_revision'] = ebuild_version_text[1]
5012 - attDict['ebuild_error'] = repoman_error
5013 - return attDict
5014 -
5015 - def get_metadataDict(self, packageDict, ebuild_id_list):
5016 - # Make the metadataDict from packageDict
5017 - ebuild_i = 0
5018 - metadataDict ={}
5019 - for k, v in packageDict.iteritems():
5020 - attDict = {}
5021 - metadata_restrictions = []
5022 - for i in v['ebuild_version_metadata_tree'][4].split():
5023 - metadata_restrictions.append(i)
5024 - metadata_keyword = []
5025 - for i in v['ebuild_version_metadata_tree'][8].split():
5026 - metadata_keyword.append(i)
5027 - metadata_iuse = []
5028 - for i in v['ebuild_version_metadata_tree'][10].split():
5029 - metadata_iuse.append(i)
5030 - attDict['restrictions'] = metadata_restrictions
5031 - attDict['keyword'] = metadata_keyword
5032 - attDict['iuse'] = metadata_iuse
5033 - metadataDict[ebuild_id_list[ebuild_i]] = attDict
5034 - ebuild_i = ebuild_i +1
5035 - return metadataDict
5036 -
5037 - def add_new_ebuild_buildquery_db(self, ebuild_id_list, packageDict, config_cpv_listDict):
5038 - conn=CM.getConnection()
5039 - # Get the needed info from packageDict and config_cpv_listDict and put that in buildqueue
5040 - # Only add it if ebuild_version in packageDict and config_cpv_listDict match
5041 - if config_cpv_listDict is not None:
5042 - message = None
5043 - # Unpack config_cpv_listDict
5044 - for k, v in config_cpv_listDict.iteritems():
5045 - config_id = k
5046 - latest_ebuild_version = v['ebuild_version']
5047 - iuse_flags_list = list(set(v['iuse']))
5048 - use_enable= v['useflags']
5049 - use_disable = list(set(iuse_flags_list).difference(set(use_enable)))
5050 - # Make a dict with enable and disable use flags for ebuildqueuedwithuses
5051 - use_flagsDict = {}
5052 - for x in use_enable:
5053 - use_flagsDict[x] = True
5054 - for x in use_disable:
5055 - use_flagsDict[x] = False
5056 - # Unpack packageDict
5057 - i = 0
5058 - for k, v in packageDict.iteritems():
5059 - ebuild_id = ebuild_id_list[i]
5060 - use_flags_list = []
5061 - use_enable_list = []
5062 - for u, s in use_flagsDict.iteritems():
5063 - use_flags_list.append(u)
5064 - use_enable_list.append(s)
5065 - # Comper ebuild_version and add the ebuild_version to buildqueue
5066 - if portage.vercmp(v['ebuild_version_tree'], latest_ebuild_version) == 0:
5067 - add_new_package_buildqueue(conn,ebuild_id, config_id, use_flags_list, use_enable_list, message)
5068 - print("B", config_id, v['categories'] + "/" + v['package'] + "-" + latest_ebuild_version, "USE:", use_enable) # B = Build config cpv use-flags
5069 - i = i +1
5070 - CM.putConnection(conn)
5071 -
5072 - def get_package_metadataDict(self, pkgdir, package):
5073 - # Make package_metadataDict
5074 - attDict = {}
5075 - package_metadataDict = {}
5076 - changelog_checksum_tree = portage.checksum.sha256hash(pkgdir + "/ChangeLog")
5077 - changelog_text_tree = get_file_text(pkgdir + "/ChangeLog")
5078 - metadata_xml_checksum_tree = portage.checksum.sha256hash(pkgdir + "/metadata.xml")
5079 - metadata_xml_text_tree = get_file_text(pkgdir + "/metadata.xml")
5080 - attDict['changelog_checksum'] = changelog_checksum_tree[0]
5081 - attDict['changelog_text'] = changelog_text_tree
5082 - attDict['metadata_xml_checksum'] = metadata_xml_checksum_tree[0]
5083 - attDict[' metadata_xml_text'] = metadata_xml_text_tree
5084 - package_metadataDict[package] = attDict
5085 - return package_metadataDict
5086 -
5087 - def add_new_package_db(self, categories, package):
5088 - conn=CM.getConnection()
5089 - # add new categories package ebuild to tables package and ebuilds
5090 - print("C", categories + "/" + package) # C = Checking
5091 - print("N", categories + "/" + package) # N = New Package
5092 - pkgdir = self._mysettings['PORTDIR'] + "/" + categories + "/" + package # Get PORTDIR + cp
5093 - categories_dir = self._mysettings['PORTDIR'] + "/" + categories + "/"
5094 - # Get the ebuild list for cp
5095 - ebuild_list_tree = self._myportdb.cp_list((categories + "/" + package), use_cache=1, mytree=None)
5096 - if ebuild_list_tree == []:
5097 - CM.putConnection(conn)
5098 - return
5099 - config_list = get_config_list(conn)
5100 - config_cpv_listDict = self.config_match_ebuild(categories, package, config_list)
5101 - config_id = get_default_config(conn)
5102 - packageDict ={}
5103 - for ebuild_line in sorted(ebuild_list_tree):
5104 - # Make the needed packageDict
5105 - packageDict[ebuild_line] = self.get_packageDict(pkgdir, ebuild_line, categories, package, config_id)
5106 - # Add the ebuild to db
5107 - return_id = add_new_package_sql(conn,packageDict)
5108 - ebuild_id_list = return_id[0]
5109 - package_id_list = return_id[1]
5110 - package_id = package_id_list[0]
5111 - # Add metadataDict to db
5112 - metadataDict = self.get_metadataDict(packageDict, ebuild_id_list)
5113 - add_new_metadata(conn,metadataDict)
5114 - # Add any qa and repoman erro for the ebuild to buildlog
5115 - qa_error = []
5116 - init_manifest = gobs_manifest(self._mysettings, pkgdir)
5117 - manifest_error = init_manifest.digestcheck()
5118 - if manifest_error is not None:
5119 - qa_error.append(manifest_error)
5120 - print("QA:", categories + "/" + package, qa_error)
5121 - add_qa_repoman(conn,ebuild_id_list, qa_error, packageDict, config_id)
5122 - # Add the ebuild to the buildqueru table if needed
5123 - self.add_new_ebuild_buildquery_db(ebuild_id_list, packageDict, config_cpv_listDict)
5124 - # Add some checksum on some files
5125 - package_metadataDict = self.get_package_metadataDict(pkgdir, package)
5126 - add_new_package_metadata(conn,package_id, package_metadataDict)
5127 - # Add the manifest file to db
5128 - manifest_checksum_tree = portage.checksum.sha256hash(pkgdir + "/Manifest")[0]
5129 - get_manifest_text = get_file_text(pkgdir + "/Manifest")
5130 - add_new_manifest_sql(conn,package_id, get_manifest_text, manifest_checksum_tree)
5131 - CM.putConnection(conn)
5132 - print("C", categories + "/" + package + " ... Done.")
5133 -
5134 - def update_package_db(self, categories, package, package_id):
5135 - conn=CM.getConnection()
5136 - # Update the categories and package with new info
5137 - pkgdir = self._mysettings['PORTDIR'] + "/" + categories + "/" + package # Get PORTDIR with cp
5138 - # Get the checksum from the file in portage tree
5139 - manifest_checksum_tree = portage.checksum.sha256hash(pkgdir + "/Manifest")[0]
5140 - # Get the checksum from the db in package table
5141 - manifest_checksum_db = get_manifest_db(conn,package_id)
5142 - # if we have the same checksum return else update the package
5143 - ebuild_list_tree = self._myportdb.cp_list((categories + "/" + package), use_cache=1, mytree=None)
5144 - print("C", categories + "/" + package) # C = Checking
5145 - if manifest_checksum_tree != manifest_checksum_db:
5146 - print("U", categories + "/" + package) # U = Update
5147 - # Get package_metadataDict and update the db with it
5148 - package_metadataDict = self.get_package_metadataDict(pkgdir, package)
5149 - update_new_package_metadata(conn,package_id, package_metadataDict)
5150 - # Get config_cpv_listDict
5151 - config_list = get_config_list(conn)
5152 - config_cpv_listDict = self.config_match_ebuild(categories, package, config_list)
5153 - config_id = get_default_config(conn)
5154 - packageDict ={}
5155 - for ebuild_line in sorted(ebuild_list_tree):
5156 - old_ebuild_list = []
5157 - # split out ebuild version
5158 - ebuild_version_tree = portage.versions.cpv_getversion(ebuild_line)
5159 - # Get the checksum of the ebuild in tree and db
5160 - ebuild_version_checksum_tree = portage.checksum.sha256hash(pkgdir + "/" + package + "-" + ebuild_version_tree + ".ebuild")[0]
5161 - ebuild_version_manifest_checksum_db = get_ebuild_checksum(conn,package_id, ebuild_version_tree)
5162 - # Check if the checksum have change
5163 - if ebuild_version_manifest_checksum_db is None or ebuild_version_checksum_tree != ebuild_version_manifest_checksum_db:
5164 - # Get packageDict for ebuild
5165 - packageDict[ebuild_line] = self.get_packageDict(pkgdir, ebuild_line, categories, package, config_id)
5166 - if ebuild_version_manifest_checksum_db is None:
5167 - print("N", categories + "/" + package + "-" + ebuild_version_tree) # N = New ebuild
5168 - else:
5169 - print("U", categories + "/" + package + "-" + ebuild_version_tree) # U = Updated ebuild
5170 - # Fix so we can use add_new_package_sql(packageDict) to update the ebuilds
5171 - old_ebuild_list.append(ebuild_version_tree)
5172 - add_old_ebuild(conn,package_id, old_ebuild_list)
5173 - update_active_ebuild(conn,package_id, ebuild_version_tree)
5174 - # Use packageDictand and metadataDict to update the db
5175 - return_id = add_new_package_sql(conn,packageDict)
5176 - ebuild_id_list = return_id[0]
5177 - metadataDict = self.get_metadataDict(packageDict, ebuild_id_list)
5178 - add_new_metadata(conn,metadataDict)
5179 - # Get the text in Manifest and update it
5180 - get_manifest_text = get_file_text(pkgdir + "/Manifest")
5181 - update_manifest_sql(conn,package_id, get_manifest_text, manifest_checksum_tree)
5182 - # Add any qa and repoman erros to buildlog
5183 - qa_error = []
5184 - init_manifest = gobs_manifest(self._mysettings, pkgdir)
5185 - manifest_error = init_manifest.digestcheck()
5186 - if manifest_error is not None:
5187 - qa_error.append(manifest_error)
5188 - print("QA:", categories + "/" + package, qa_error)
5189 - add_qa_repoman(conn,ebuild_id_list, qa_error, packageDict, config_id)
5190 - # Add the ebuild to the buildqueru table if needed
5191 - self.add_new_ebuild_buildquery_db(ebuild_id_list, packageDict, config_cpv_listDict)
5192 - # Mark or remove any old ebuilds
5193 - init_old_cpv = gobs_old_cpv(self._myportdb, self._mysettings)
5194 - init_old_cpv.mark_old_ebuild_db(categories, package, package_id)
5195 - CM.putConnection(conn)
5196 - print("C", categories + "/" + package + " ... Done.")
5197 -
5198 - def update_ebuild_db(self, build_dict):
5199 - conn=CM.getConnection()
5200 - config_id = build_dict['config_profile']
5201 - categories = build_dict['categories']
5202 - package = build_dict['package']
5203 - package_id = build_dict['package_id']
5204 - cpv = build_dict['cpv']
5205 - ebuild_version_tree = build_dict['ebuild_version']
5206 - pkgdir = self._mysettings['PORTDIR'] + "/" + categories + "/" + package # Get PORTDIR with cp
5207 - packageDict ={}
5208 - ebuild_version_manifest_checksum_db = get_ebuild_checksum(conn,package_id, ebuild_version_tree)
5209 - packageDict[cpv] = self.get_packageDict(pkgdir, cpv, categories, package, config_id)
5210 - old_ebuild_list = []
5211 - if ebuild_version_manifest_checksum_db is not None:
5212 - old_ebuild_list.append(ebuild_version_tree)
5213 - add_old_ebuild(conn,package_id, old_ebuild_list)
5214 - update_active_ebuild(conn,package_id, ebuild_version_tree)
5215 - return_id = add_new_package_sql(conn,packageDict)
5216 - print('return_id', return_id)
5217 - CM.putConnection(conn)
5218 \ No newline at end of file
5219
5220 diff --git a/gobs/pym/pgsql.py~ b/gobs/pym/pgsql.py~
5221 deleted file mode 100644
5222 index ea1f1f1..0000000
5223 --- a/gobs/pym/pgsql.py~
5224 +++ /dev/null
5225 @@ -1,638 +0,0 @@
5226 -#every function takes a connection as a parameter that is provided by the CM
5227 -from __future__ import print_function
5228 -
5229 -def get_default_config(connection):
5230 - cursor = connection.cursor()
5231 - sqlQ = 'SELECT id FROM configs WHERE default_config = True'
5232 - cursor.execute(sqlQ)
5233 - return cursor.fetchone()
5234 -
5235 -def get_profile_checksum(connection, config_profile):
5236 - cursor = connection.cursor()
5237 - sqlQ = "SELECT make_conf_checksum FROM configs WHERE active = 'True' AND id = %s AND updateing = 'False' AND sync = 'False'"
5238 - cursor.execute(sqlQ, (config_profile,))
5239 - return cursor.fetchone()
5240 -
5241 -def get_profile_sync(connection, config_profile):
5242 - cursor = connection.cursor()
5243 - sqlQ = "SELECT sync FROM configs WHERE active = 'True' AND id = %s AND updateing = 'False'"
5244 - cursor.execute(sqlQ, (config_profile,))
5245 - return cursor.fetchone()
5246 -
5247 -def set_profile_sync(connection):
5248 - cursor = connection.cursor()
5249 - sqlQ = "UPDATE configs SET sync = 'False' WHERE active = 'True'"
5250 - cursor.execute(sqlQ)
5251 - connection.commit()
5252 -
5253 -def reset_profile_sync(connection, config_profile):
5254 - cursor = connection.cursor()
5255 - sqlQ = "UPDATE configs SET sync = 'False' WHERE active = 'True' AND id = %s"
5256 - cursor.execute(sqlQ, (config_profile,))
5257 - connection.commit()
5258 -
5259 -def set_profile_updating(connection):
5260 - cursor = connection.cursor()
5261 - sqlQ = "UPDATE configs SET updating = 'True' WHERE active = 'True'"
5262 - cursor.execute(sqlQ)
5263 - connection.commit()
5264 -
5265 -def reset_profile_sync(connection, config_profile):
5266 - cursor = connection.cursor()
5267 - sqlQ = "UPDATE configs SET updating = 'False' WHERE active = 'True'"
5268 - cursor.execute(sqlQ)
5269 - connection.commit()
5270 -
5271 -def get_packages_to_build(connection, config_profile):
5272 - cursor =connection.cursor()
5273 - # no point in returning dead ebuilds, to just chuck em out later
5274 - sqlQ1 = '''SELECT post_message, queue_id, ebuild_id FROM buildqueue WHERE config = %s AND extract(epoch from (NOW()) - timestamp) > 7200 ORDER BY queue_id LIMIT 1'''
5275 -
5276 - sqlQ2 ='''SELECT ebuild_id,category,package_name,ebuild_version,ebuild_checksum FROM ebuilds,buildqueue,packages
5277 - WHERE buildqueue.ebuild_id=ebuilds.id AND ebuilds.package_id=packages.package_id AND queue_id = %s AND ebuilds.active = TRUE'''
5278 -
5279 - # get use flags to use
5280 - sqlQ3 = "SELECT useflag, enabled FROM ebuildqueuedwithuses WHERE queue_id = %s"
5281 - cursor.execute(sqlQ1, (config_profile,))
5282 - build_dict={}
5283 - entries = cursor.fetchone()
5284 - if entries is None:
5285 - return None
5286 - if entries[2] is None:
5287 - build_dict['ebuild_id'] = None
5288 - build_dict['queue_id'] = entries[1]
5289 - return build_dict
5290 - msg_list = []
5291 - if not entries[0] is None:
5292 - for msg in entries[0].split(" "):
5293 - msg_list.append(msg)
5294 - build_dict['post_message'] = msg_list
5295 - build_dict['queue_id'] = entries[1]
5296 - build_dict['ebuild_id']=entries[2]
5297 - cursor.execute(sqlQ2, (build_dict['queue_id'],))
5298 - #make a list that contains objects that haves ebuild_id and post_message +the lot as attributes
5299 - entries = cursor.fetchone()
5300 - if entries is None:
5301 - build_dict['checksum']= None
5302 - return build_dict
5303 - build_dict['ebuild_id']=entries[0]
5304 - build_dict['category']=entries[1]
5305 - build_dict['package']=entries[2]
5306 - build_dict['ebuild_version']=entries[3]
5307 - build_dict['checksum']=entries[4]
5308 -
5309 - #add a enabled and disabled list to the objects in the item list
5310 - cursor.execute(sqlQ3, (build_dict['queue_id'],))
5311 - uses={}
5312 - for row in cursor.fetchall():
5313 - uses[ row[0] ] = row[1]
5314 - build_dict['build_useflags']=uses
5315 - return build_dict
5316 -
5317 -def check_revision(connection, build_dict):
5318 - cursor = connection.cursor()
5319 - sqlQ1 = 'SELECT queue_id FROM buildqueue WHERE ebuild_id = %s AND config = %s'
5320 - sqlQ2 = "SELECT useflag FROM ebuildqueuedwithuses WHERE queue_id = %s AND enabled = 'True'"
5321 - cursor.execute(sqlQ1, (build_dict['ebuild_id'], build_dict['config_profile']))
5322 - queue_id_list = cursor.fetchall()
5323 - if queue_id_list == []:
5324 - return None
5325 - for queue_id in queue_id_list:
5326 - cursor.execute(sqlQ2, (queue_id[0],))
5327 - entries = cursor.fetchall()
5328 - queue_useflags = []
5329 - if entries == []:
5330 - queue_useflags = None
5331 - else:
5332 - for use_line in sorted(entries):
5333 - queue_useflags.append(use_line[0])
5334 - if queue_useflags == build_dict['build_useflags']:
5335 - return queue_id[0]
5336 - return None
5337 -
5338 -def get_config_list(connection):
5339 - cursor = connection.cursor()
5340 - sqlQ = 'SELECT id FROM configs WHERE default_config = False AND active = True'
5341 - cursor.execute(sqlQ)
5342 - entries = cursor.fetchall()
5343 - if entries == ():
5344 - return None
5345 - else:
5346 - config_id_list = []
5347 - for config_id in entries:
5348 - config_id_list.append(config_id[0])
5349 - return config_id_list
5350 -
5351 -def get_config_list_all(connection):
5352 - cursor = connection.cursor()
5353 - sqlQ = 'SELECT id FROM configs'
5354 - cursor.execute(sqlQ)
5355 - return cursor.fetchall()
5356 -
5357 -def update__make_conf(connection, configsDict):
5358 - cursor = connection.cursor()
5359 - sqlQ = 'UPDATE configs SET make_conf_checksum = %s, make_conf_text = %s, active = %s, config_error = %s WHERE id = %s'
5360 - for k, v in configsDict.iteritems():
5361 - params = [v['make_conf_checksum_tree'], v['make_conf_text'], v['active'], v['config_error'], k]
5362 - cursor.execute(sqlQ, params)
5363 - connection.commit()
5364 -
5365 -def have_package_db(connection, categories, package):
5366 - cursor = connection.cursor()
5367 - sqlQ ='SELECT package_id FROM packages WHERE category = %s AND package_name = %s'
5368 - params = categories, package
5369 - cursor.execute(sqlQ, params)
5370 - return cursor.fetchone()
5371 -
5372 -def have_activ_ebuild_id(connection, ebuild_id):
5373 - cursor = connection.cursor()
5374 - sqlQ = 'SELECT ebuild_checksum FROM ebuilds WHERE id = %s AND active = TRUE'
5375 - cursor.execute(sqlQ, (ebuild_id,))
5376 - entries = cursor.fetchone()
5377 - if entries is None:
5378 - return None
5379 - # If entries is not None we need [0]
5380 - return entries[0]
5381 -
5382 -def get_categories_db(connection):
5383 - cursor = connection.cursor()
5384 - sqlQ =' SELECT category FROM categories'
5385 - cursor.execute(sqlQ)
5386 - return cursor.fetchall()
5387 -
5388 -def get_categories_checksum_db(connection, categories):
5389 - cursor = connection.cursor()
5390 - sqlQ =' SELECT metadata_xml_checksum FROM categories_meta WHERE category = %s'
5391 - cursor.execute(sqlQ, (categories,))
5392 - return cursor.fetchone()
5393 -
5394 -def add_new_categories_meta_sql(connection, categories, categories_metadata_xml_checksum_tree, categories_metadata_xml_text_tree):
5395 - cursor = connection.cursor()
5396 - sqlQ = 'INSERT INTO categories_meta (category, metadata_xml_checksum, metadata_xml_text) VALUES ( %s, %s, %s )'
5397 - params = categories, categories_metadata_xml_checksum_tree, categories_metadata_xml_text_tree
5398 - cursor.execute(sqlQ, params)
5399 - connection.commit()
5400 -
5401 -def update_categories_meta_sql(connection, categories, categories_metadata_xml_checksum_tree, categories_metadata_xml_text_tree):
5402 - cursor = connection.cursor()
5403 - sqlQ ='UPDATE categories_meta SET metadata_xml_checksum = %s, metadata_xml_text = %s WHERE category = %s'
5404 - params = (categories_metadata_xml_checksum_tree, categories_metadata_xml_text_tree, categories)
5405 - cursor.execute(sqlQ, params)
5406 - connection.commit()
5407 -
5408 -def add_new_manifest_sql(connection, package_id, get_manifest_text, manifest_checksum_tree):
5409 - cursor = connection.cursor()
5410 - sqlQ = 'INSERT INTO manifest (package_id, manifest, checksum) VALUES ( %s, %s, %s )'
5411 - params = package_id, get_manifest_text, manifest_checksum_tree
5412 - cursor.execute(sqlQ, params)
5413 - connection.commit()
5414 -
5415 -def add_new_package_metadata(connection, package_id, package_metadataDict):
5416 - cursor = connection.cursor()
5417 - sqlQ = 'SELECT changelog_checksum FROM packages_meta WHERE package_id = %s'
5418 - cursor.execute(sqlQ, (package_id,))
5419 - if cursor.fetchone() is None:
5420 - sqlQ = 'INSERT INTO packages_meta (package_id, changelog_text, changelog_checksum, metadata_text, metadata_checksum) VALUES ( %s, %s, %s, %s, %s )'
5421 - for k, v in package_metadataDict.iteritems():
5422 - params = package_id, v['changelog_text'], v['changelog_checksum'], v[' metadata_xml_text'], v['metadata_xml_checksum']
5423 - cursor.execute(sqlQ, params)
5424 - connection.commit()
5425 -
5426 -def update_new_package_metadata(connection, package_id, package_metadataDict):
5427 - cursor = connection.cursor()
5428 - sqlQ = 'SELECT changelog_checksum, metadata_checksum FROM packages_meta WHERE package_id = %s'
5429 - cursor.execute(sqlQ, package_id)
5430 - entries = cursor.fetchone()
5431 - if entries is None:
5432 - changelog_checksum_db = None
5433 - metadata_checksum_db = None
5434 - else:
5435 - changelog_checksum_db = entries[0]
5436 - metadata_checksum_db = entries[1]
5437 - for k, v in package_metadataDict.iteritems():
5438 - if changelog_checksum_db != v['changelog_checksum']:
5439 - sqlQ = 'UPDATE packages_meta SET changelog_text = %s, changelog_checksum = %s WHERE package_id = %s'
5440 - params = v['changelog_text'], v['changelog_checksum'], package_id
5441 - cursor.execute(sqlQ, params)
5442 - if metadata_checksum_db != v['metadata_xml_checksum']:
5443 - sqlQ = 'UPDATE packages_meta SET metadata_text = %s, metadata_checksum = %s WHERE package_id = %s'
5444 - params = v[' metadata_xml_text'], v['metadata_xml_checksum'], package_id
5445 - cursor.execute(sqlQ, params)
5446 - connection.commit()
5447 -
5448 -def get_manifest_db(connection, package_id):
5449 - cursor = connection.cursor()
5450 - sqlQ = 'SELECT checksum FROM manifest WHERE package_id = %s'
5451 - cursor.execute(sqlQ, package_id)
5452 - entries = cursor.fetchone()
5453 - if entries is None:
5454 - return None
5455 - # If entries is not None we need [0]
5456 - return entries[0]
5457 -
5458 -def update_manifest_sql(connection, package_id, get_manifest_text, manifest_checksum_tree):
5459 - cursor = connection.cursor()
5460 - sqlQ = 'UPDATE manifest SET checksum = %s, manifest = %s WHERE package_id = %s'
5461 - params = (manifest_checksum_tree, get_manifest_text, package_id)
5462 - cursor.execute(sqlQ, params)
5463 - connection.commit()
5464 -
5465 -def add_new_metadata(connection, metadataDict):
5466 - cursor = connection.cursor()
5467 - for k, v in metadataDict.iteritems():
5468 - #moved the cursor out side of the loop
5469 - sqlQ = 'SELECT updaterestrictions( %s, %s )'
5470 - params = k, v['restrictions']
5471 - cursor.execute(sqlQ, params)
5472 - sqlQ = 'SELECT updatekeywords( %s, %s )'
5473 - params = k, v['keyword']
5474 - cursor.execute(sqlQ, params)
5475 - sqlQ = 'SELECT updateiuse( %s, %s )'
5476 - params = k, v['iuse']
5477 - cursor.execute(sqlQ, params)
5478 - connection.commit()
5479 -
5480 -def add_new_package_sql(connection, packageDict):
5481 - #lets have a new cursor for each metod as per best practice
5482 - cursor = connection.cursor()
5483 - sqlQ="SELECT insert_ebuild( %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, 'True')"
5484 - ebuild_id_list = []
5485 - package_id_list = []
5486 - for k, v in packageDict.iteritems():
5487 - params = [v['categories'], v['package'], v['ebuild_version_tree'], v['ebuild_version_revision'], v['ebuild_version_checksum_tree'],
5488 - v['ebuild_version_text'], v['ebuild_version_metadata_tree'][0], v['ebuild_version_metadata_tree'][1],
5489 - v['ebuild_version_metadata_tree'][12], v['ebuild_version_metadata_tree'][2], v['ebuild_version_metadata_tree'][3],
5490 - v['ebuild_version_metadata_tree'][5],v['ebuild_version_metadata_tree'][6], v['ebuild_version_metadata_tree'][7],
5491 - v['ebuild_version_metadata_tree'][9], v['ebuild_version_metadata_tree'][11],
5492 - v['ebuild_version_metadata_tree'][13],v['ebuild_version_metadata_tree'][14], v['ebuild_version_metadata_tree'][15],
5493 - v['ebuild_version_metadata_tree'][16]]
5494 - cursor.execute(sqlQ, params)
5495 - mid = cursor.fetchone()
5496 - mid=mid[0]
5497 - ebuild_id_list.append(mid[1])
5498 - package_id_list.append(mid[0])
5499 - connection.commit()
5500 - # add_new_metadata(metadataDict)
5501 - return ebuild_id_list, package_id_list
5502 -
5503 -def add_new_ebuild_sql(connection, packageDict, new_ebuild_list):
5504 - #lets have a new cursor for each metod as per best practice
5505 - cursor = connection.cursor()
5506 - sqlQ="SELECT insert_ebuild( %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, 'True')"
5507 - ebuild_id_list = []
5508 - package_id_list = []
5509 - for k, v in packageDict.iteritems():
5510 - for x in new_ebuild_list:
5511 - if x == v['ebuild_version_tree']:
5512 - params = [v['categories'], v['package'], v['ebuild_version_tree'], v['ebuild_version_revision'], v['ebuild_version_checksum_tree'],
5513 - v['ebuild_version_text'], v['ebuild_version_metadata_tree'][0], v['ebuild_version_metadata_tree'][1],
5514 - v['ebuild_version_metadata_tree'][12], v['ebuild_version_metadata_tree'][2], v['ebuild_version_metadata_tree'][3],
5515 - v['ebuild_version_metadata_tree'][5],v['ebuild_version_metadata_tree'][6], v['ebuild_version_metadata_tree'][7],
5516 - v['ebuild_version_metadata_tree'][9], v['ebuild_version_metadata_tree'][11],
5517 - v['ebuild_version_metadata_tree'][13],v['ebuild_version_metadata_tree'][14], v['ebuild_version_metadata_tree'][15],
5518 - v['ebuild_version_metadata_tree'][16]]
5519 - cursor.execute(sqlQ, params)
5520 - mid = cursor.fetchone()
5521 - mid=mid[0]
5522 - ebuild_id_list.append(mid[1])
5523 - package_id_list.append(mid[0])
5524 - connection.commit()
5525 - # add_new_metadata(metadataDict)
5526 - return ebuild_id_list, package_id_list
5527 -
5528 -def update_active_ebuild(connection, package_id, ebuild_version_tree):
5529 - cursor = connection.cursor()
5530 - sqlQ ="UPDATE ebuilds SET active = 'False', timestamp = now() WHERE package_id = %s AND ebuild_version = %s AND active = 'True'"
5531 - cursor.execute(sqlQ, (package_id, ebuild_version_tree))
5532 - connection.commit()
5533 -
5534 -def get_ebuild_id_db(connection, categories, package, ebuild_version_tree):
5535 - cursor = connection.cursor()
5536 - sqlQ ='SELECT id FROM packages WHERE category = %s AND ebuild_name = %s AND ebuild_version = %s'
5537 - cursor.execute(sqlQ, (categories, package, ebuild_version_tree))
5538 - entries = cursor.fetchone()
5539 - return entries
5540 -
5541 -def get_ebuild_id_db_checksum(connection, build_dict):
5542 - cursor = connection.cursor()
5543 - sqlQ = 'SELECT id FROM ebuilds WHERE ebuild_version = %s AND ebuild_checksum = %s AND package_id = %s'
5544 - cursor.execute(sqlQ, (build_dict['ebuild_version'], build_dict['checksum'], build_dict['package_id']))
5545 - ebuild_id_list = sorted(cursor.fetchall())
5546 - if ebuild_id_list == []:
5547 - return None
5548 - return ebuild_id_list[0]
5549 -
5550 -def get_cpv_from_ebuild_id(connection, ebuild_id):
5551 - cursor = connection.cursor()
5552 - #wasent used
5553 - #sqlQ = 'SELECT package_id FROM ebuild WHERE id = %s'
5554 - sqlQ='SELECT category, ebuild_name, ebuild_version FROM packages WHERE id = %s'
5555 - cursor.execute(sqlQ, ebuild_id)
5556 - entries = cursor.fetchone()
5557 - return entries
5558 -
5559 -def get_cp_from_package_id(connection, package_id):
5560 - cursor =connection.cursor()
5561 - sqlQ = "SELECT ARRAY_TO_STRING(ARRAY[category, package_name] , '/') AS cp FROM packages WHERE package_id = %s"
5562 - cursor.execute(sqlQ, (package_id,))
5563 - return cursor.fetchone()
5564 -
5565 -def get_keyword_id_db(connection, arch, stable):
5566 - cursor =connection.cursor()
5567 - sqlQ ='SELECT id_keyword FROM keywords WHERE ARCH = %s AND stable = %s'
5568 - cursor.execute(sqlQ, (arch, stable))
5569 - entries = cursor.fetchone()
5570 - #why only return 1 entery? if that IS the point use top(1)
5571 - return entries
5572 -
5573 -def add_new_keywords(connection, ebuild_id, keyword_id):
5574 - cursor = connection.cursor()
5575 - sqlQ ='INSERT INTO keywordsToEbuild (ebuild_id, id_keyword) VALUES ( %s, %s )'
5576 - cursor.execute(sqlQ, (ebuild_id, keyword_id))
5577 - connection.commit()
5578 -
5579 -def have_package_buildqueue(connection, ebuild_id, config_id):
5580 - cursor = connection.cursor()
5581 - sqlQ = 'SELECT useflags FROM buildqueue WHERE ebuild_id = %s AND config_id = %s'
5582 - params = (ebuild_id[0], config_id)
5583 - cursor.execute(sqlQ, params)
5584 - entries = cursor.fetchone()
5585 - return entries
5586 -
5587 -def get_queue_id_list_config(connection, config_id):
5588 - cursor = connection.cursor()
5589 - sqlQ = 'SELECT queue_id FROM buildqueue WHERE config = %s'
5590 - cursor.execute(sqlQ, (config_id,))
5591 - entries = cursor.fetchall()
5592 - return entries
5593 -
5594 -def add_new_package_buildqueue(connection, ebuild_id, config_id, iuse_flags_list, use_enable, message):
5595 - cursor = connection.cursor()
5596 - sqlQ="SELECT insert_buildqueue( %s, %s, %s, %s, %s )"
5597 - if not iuse_flags_list:
5598 - iuse_flags_list=None
5599 - use_enable=None
5600 - params = ebuild_id, config_id, iuse_flags_list, use_enable, message
5601 - cursor.execute(sqlQ, params)
5602 - connection.commit()
5603 -
5604 -def get_ebuild_checksum(connection, package_id, ebuild_version_tree):
5605 - cursor = connection.cursor()
5606 - sqlQ = 'SELECT ebuild_checksum FROM ebuilds WHERE package_id = %s AND ebuild_version = %s AND active = TRUE'
5607 - cursor.execute(sqlQ, (package_id, ebuild_version_tree))
5608 - entries = cursor.fetchone()
5609 - if entries is None:
5610 - return None
5611 - # If entries is not None we need [0]
5612 - return entries[0]
5613 -
5614 -def cp_all_db(connection):
5615 - cursor = connection.cursor()
5616 - sqlQ = "SELECT package_id FROM packages"
5617 - cursor.execute(sqlQ)
5618 - return cursor.fetchall()
5619 -
5620 -def add_old_package(connection, old_package_list):
5621 - cursor = connection.cursor()
5622 - mark_old_list = []
5623 - sqlQ = "UPDATE ebuilds SET active = 'FALSE', timestamp = NOW() WHERE package_id = %s AND active = 'TRUE' RETURNING package_id"
5624 - for old_package in old_package_list:
5625 - cursor.execute(sqlQ, (old_package[0],))
5626 - entries = cursor.fetchone()
5627 - if entries is not None:
5628 - mark_old_list.append(entries[0])
5629 - connection.commit()
5630 - return mark_old_list
5631 -
5632 -def get_old_categories(connection, categories_line):
5633 - cursor = connection.cursor()
5634 - sqlQ = "SELECT package_name FROM packages WHERE category = %s"
5635 - cursor.execute(sqlQ (categories_line))
5636 - return cursor.fetchone()
5637 -
5638 -def del_old_categories(connection, real_old_categoriess):
5639 - cursor = connection.cursor()
5640 - sqlQ1 = 'DELETE FROM categories_meta WHERE category = %s'
5641 - sqlQ2 = 'DELETE FROM categories categories_meta WHERE category = %s'
5642 - cursor.execute(sqlQ1 (real_old_categories))
5643 - cursor.execute(sqlQ2 (real_old_categories))
5644 - connection.commit()
5645 -
5646 -def add_old_ebuild(connection, package_id, old_ebuild_list):
5647 - cursor = connection.cursor()
5648 - sqlQ1 = "UPDATE ebuilds SET active = 'FALSE' WHERE package_id = %s AND ebuild_version = %s"
5649 - sqlQ2 = "SELECT id FROM ebuilds WHERE package_id = %s AND ebuild_version = %s AND active = 'TRUE'"
5650 - sqlQ3 = "SELECT queue_id FROM buildqueue WHERE ebuild_id = %s"
5651 - sqlQ4 = 'DELETE FROM ebuildqueuedwithuses WHERE queue_id = %s'
5652 - sqlQ5 = 'DELETE FROM buildqueue WHERE queue_id = %s'
5653 - for old_ebuild in old_ebuild_list:
5654 - cursor.execute(sqlQ2, (package_id, old_ebuild[0]))
5655 - ebuild_id_list = cursor.fetchall()
5656 - if ebuild_id_list is not None:
5657 - for ebuild_id in ebuild_id_list:
5658 - cursor.execute(sqlQ3, (ebuild_id))
5659 - queue_id_list = cursor.fetchall()
5660 - if queue_id_list is not None:
5661 - for queue_id in queue_id_list:
5662 - cursor.execute(sqlQ4, (queue_id))
5663 - cursor.execute(sqlQ5, (queue_id))
5664 - cursor.execute(sqlQ1, (package_id, old_ebuild[0]))
5665 - connection.commit()
5666 -
5667 -def cp_all_old_db(connection, old_package_id_list):
5668 - cursor = connection.cursor()
5669 - old_package_list = []
5670 - for old_package in old_package_id_list:
5671 - sqlQ = "SELECT package_id FROM ebuilds WHERE package_id = %s AND active = 'FALSE' AND date_part('days', NOW() - timestamp) < 60"
5672 - cursor.execute(sqlQ, old_package)
5673 - entries = cursor.fetchone()
5674 - if entries is None:
5675 - old_package_list.append(old_package)
5676 - return old_package_list
5677 -
5678 -def del_old_queue(connection, queue_id):
5679 - cursor = connection.cursor()
5680 - sqlQ1 = 'DELETE FROM ebuildqueuedwithuses WHERE queue_id = %s'
5681 - sqlQ2 = 'DELETE FROM querue_retest WHERE querue_id = %s'
5682 - sqlQ3 = 'DELETE FROM buildqueue WHERE queue_id = %s'
5683 - cursor.execute(sqlQ1, (queue_id,))
5684 - cursor.execute(sqlQ2, (queue_id,))
5685 - cursor.execute(sqlQ3, (queue_id,))
5686 - connection.commit()
5687 -
5688 -def del_old_ebuild(connection, ebuild_old_list_db):
5689 - cursor = connection.cursor()
5690 - sqlQ1 = 'SELECT build_id FROM buildlog WHERE ebuild_id = %s'
5691 - sqlQ2 = 'DELETE FROM qa_problems WHERE build_id = %s'
5692 - sqlQ3 = 'DELETE FROM repoman_problems WHERE build_id = %s'
5693 - sqlQ4 = 'DELETE FROM ebuildbuildwithuses WHERE build_id = %s'
5694 - sqlQ5 = 'DELETE FROM ebuildhaveskeywords WHERE ebuild_id = %s'
5695 - sqlQ6 = 'DELETE FROM ebuildhavesiuses WHERE ebuild = %s'
5696 - sqlQ7 = 'DELETE FROM ebuildhavesrestrictions WHERE ebuild_id = %s'
5697 - sqlQ8 = 'DELETE FROM buildlog WHERE ebuild_id = %s'
5698 - sqlQ9 = 'SELECT queue_id FROM buildqueue WHERE ebuild_id = %s'
5699 - sqlQ10 = 'DELETE FROM ebuildqueuedwithuses WHERE queue_id = %s'
5700 - sqlQ11 = 'DELETE FROM buildqueue WHERE ebuild_id = %s'
5701 - sqlQ12 = 'DELETE FROM ebuilds WHERE id = %s'
5702 - for ebuild_id in ebuild_old_list_db:
5703 - cursor.execute(sqlQ1, (ebuild_id[0],))
5704 - build_id_list = cursor.fetchall()
5705 - if build_id_list != []:
5706 - for build_id in build_id_list:
5707 - cursor.execute(sqlQ2, (build_id[0],))
5708 - cursor.execute(sqlQ3, (build_id[0],))
5709 - cursor.execute(sqlQ4, (build_id[0],))
5710 - cursor.execute(sqlQ9, (ebuild_id[0],))
5711 - queue_id_list = cursor.fetchall()
5712 - if queue_id_list != []:
5713 - for queue_id in queue_id_list:
5714 - cursor.execute(sqlQ10, (queue_id[0],))
5715 - cursor.execute(sqlQ5, (ebuild_id[0],))
5716 - cursor.execute(sqlQ6, (ebuild_id[0],))
5717 - cursor.execute(sqlQ7, (ebuild_id[0],))
5718 - cursor.execute(sqlQ8, (ebuild_id[0],))
5719 - cursor.execute(sqlQ11, (ebuild_id[0],))
5720 - cursor.execute(sqlQ12, (ebuild_id[0],))
5721 - connection.commit()
5722 -
5723 -def del_old_package(connection, package_id_list):
5724 - cursor = connection.cursor()
5725 - sqlQ1 = 'SELECT id FROM ebuilds WHERE package_id = %s'
5726 - sqlQ2 = 'DELETE FROM ebuilds WHERE package_id = %s'
5727 - sqlQ3 = 'DELETE FROM manifest WHERE package_id = %s'
5728 - sqlQ4 = 'DELETE FROM packages_meta WHERE package_id = %s'
5729 - sqlQ5 = 'DELETE FROM packages WHERE package_id = %s'
5730 - for package_id in package_id_list:
5731 - cursor.execute(sqlQ1, package_id)
5732 - ebuild_id_list = cursor.fetchall()
5733 - del_old_ebuild(connection, ebuild_id_list)
5734 - cursor.execute(sqlQ2, (package_id,))
5735 - cursor.execute(sqlQ3, (package_id,))
5736 - cursor.execute(sqlQ4, (package_id,))
5737 - cursor.execute(sqlQ5, (package_id,))
5738 - connection.commit()
5739 -
5740 -def cp_list_db(connection, package_id):
5741 - cursor = connection.cursor()
5742 - sqlQ = "SELECT ebuild_version FROM ebuilds WHERE active = 'TRUE' AND package_id = %s"
5743 - cursor.execute(sqlQ, (package_id))
5744 - return cursor.fetchall()
5745 -
5746 -def cp_list_old_db(connection, package_id):
5747 - cursor = connection.cursor()
5748 - sqlQ ="SELECT id, ebuild_version FROM ebuilds WHERE active = 'FALSE' AND package_id = %s AND date_part('days', NOW() - timestamp) > 60"
5749 - cursor.execute(sqlQ, package_id)
5750 - return cursor.fetchall()
5751 -
5752 -def move_queru_buildlog(connection, queue_id, build_error, summary_error, build_log_dict):
5753 - cursor = connection.cursor()
5754 - repoman_error_list = build_log_dict['repoman_error_list']
5755 - qa_error_list = build_log_dict['qa_error_list']
5756 - sqlQ = 'SELECT make_buildlog( %s, %s, %s, %s, %s, %s)'
5757 - cursor.execute(sqlQ, (queue_id, summary_error, build_error, build_log_dict['logfilename'], qa_error_list, repoman_error_list))
5758 - entries = cursor.fetchone()
5759 - connection.commit()
5760 - return entries
5761 -
5762 -def add_new_buildlog(connection, build_dict, use_flags_list, use_enable_list, build_error, summary_error, build_log_dict):
5763 - cursor = connection.cursor()
5764 - repoman_error_list = build_log_dict['repoman_error_list']
5765 - qa_error_list = build_log_dict['qa_error_list']
5766 - if not use_flags_list:
5767 - use_flags_list=None
5768 - use_enable_list=None
5769 - sqlQ = 'SELECT make_deplog( %s, %s, %s, %s, %s, %s, %s, %s, %s)'
5770 - params = (build_dict['ebuild_id'], build_dict['config_profile'], use_flags_list, use_enable_list, summary_error, build_error, build_log_dict['logfilename'], qa_error_list, repoman_error_list)
5771 - cursor.execute(sqlQ, params)
5772 - entries = cursor.fetchone()
5773 - connection.commit()
5774 - if entries is None:
5775 - return None
5776 - # If entries is not None we need [0]
5777 - return entries[0]
5778 -
5779 -def add_qa_repoman(connection, ebuild_id_list, qa_error, packageDict, config_id):
5780 - cursor = connection.cursor()
5781 - ebuild_i = 0
5782 - for k, v in packageDict.iteritems():
5783 - ebuild_id = ebuild_id_list[ebuild_i]
5784 - sqlQ = "INSERT INTO buildlog (ebuild_id, config, error_summary, timestamp, hash ) VALUES ( %s, %s, %s, now(), '1' ) RETURNING build_id"
5785 - if v['ebuild_error'] != [] or qa_error != []:
5786 - if v['ebuild_error'] != [] or qa_error == []:
5787 - summary = "Repoman"
5788 - elif v['ebuild_error'] == [] or qa_error != []:
5789 - summary = "QA"
5790 - else:
5791 - summary = "QA:Repoman"
5792 - params = (ebuild_id, config_id, summary)
5793 - cursor.execute(sqlQ, params)
5794 - build_id = cursor.fetchone()
5795 - if v['ebuild_error'] != []:
5796 - sqlQ = 'INSERT INTO repoman_problems (problem, build_id ) VALUES ( %s, %s )'
5797 - for x in v['ebuild_error']:
5798 - params = (x, build_id)
5799 - cursor.execute(sqlQ, params)
5800 - if qa_error != []:
5801 - sqlQ = 'INSERT INTO qa_problems (problem, build_id ) VALUES ( %s, %s )'
5802 - for x in qa_error:
5803 - params = (x, build_id)
5804 - cursor.execute(sqlQ, params)
5805 - ebuild_i = ebuild_i +1
5806 - connection.commit()
5807 -
5808 -def update_qa_repoman(connection, build_id, build_log_dict):
5809 - cursor = connection.cursor()
5810 - sqlQ1 = 'INSERT INTO repoman_problems (problem, build_id ) VALUES ( %s, %s )'
5811 - sqlQ2 = 'INSERT INTO qa_problems (problem, build_id ) VALUES ( %s, %s )'
5812 - if build_log_dict['repoman_error_list'] != []:
5813 - for x in build_log_dict['repoman_error_list']:
5814 - params = (x, build_id)
5815 - cursor.execute(sqlQ, params)
5816 - if build_log_dict['qa_error_list'] != []:
5817 - for x in build_log_dict['qa_error_list']:
5818 - params = (x, build_id)
5819 - cursor.execute(sqlQ, params)
5820 - connection.commit()
5821 -
5822 -def get_arch_db(connection):
5823 - cursor = connection.cursor()
5824 - sqlQ = "SELECT keyword FROM keywords WHERE keyword = 'amd64'"
5825 - cursor.execute(sqlQ)
5826 - return cursor.fetchone()
5827 -
5828 -def add_new_arch_db(connection, arch_list):
5829 - cursor = connection.cursor()
5830 - sqlQ = 'INSERT INTO keywords (keyword) VALUES ( %s )'
5831 - for arch in arch_list:
5832 - cursor.execute(sqlQ, (arch,))
5833 - connection.commit()
5834 -
5835 -def update_fail_times(connection, fail_querue_dict):
5836 - cursor = connection.cursor()
5837 - sqlQ1 = 'UPDATE querue_retest SET fail_times = %s WHERE querue_id = %s AND fail_type = %s'
5838 - sqlQ2 = 'UPDATE buildqueue SET timestamp = NOW() WHERE queue_id = %s'
5839 - cursor.execute(sqlQ1, (fail_querue_dict['fail_times'], fail_querue_dict['querue_id'], fail_querue_dict['fail_type'],))
5840 - cursor.execute(sqlQ2, (fail_querue_dict['querue_id'],))
5841 - connection.commit()
5842 -
5843 -def get_fail_querue_dict(connection, build_dict):
5844 - cursor = connection.cursor()
5845 - fail_querue_dict = {}
5846 - sqlQ = 'SELECT fail_times FROM querue_retest WHERE querue_id = %s AND fail_type = %s'
5847 - cursor.execute(sqlQ, (build_dict['queue_id'], build_dict['type_fail'],))
5848 - entries = cursor.fetchone()
5849 - if entries is None:
5850 - return None
5851 - fail_querue_dict['fail_times'] = entries
5852 - return fail_querue_dict
5853 -
5854 -def add_fail_querue_dict(connection, fail_querue_dict):
5855 - cursor = connection.cursor()
5856 - sqlQ1 = 'INSERT INTO querue_retest (querue_id, fail_type, fail_times) VALUES ( %s, %s, %s)'
5857 - sqlQ2 = 'UPDATE buildqueue SET timestamp = NOW() WHERE queue_id = %s'
5858 - cursor.execute(sqlQ1, (fail_querue_dict['querue_id'],fail_querue_dict['fail_type'], fail_querue_dict['fail_times']))
5859 - cursor.execute(sqlQ2, (fail_querue_dict['querue_id'],))
5860 - connection.commit()
5861 -
5862 -def make_conf_error(connection,config_profile):
5863 - pass
5864
5865 diff --git a/gobs/pym/readconf.py~ b/gobs/pym/readconf.py~
5866 deleted file mode 100644
5867 index c017561..0000000
5868 --- a/gobs/pym/readconf.py~
5869 +++ /dev/null
5870 @@ -1,46 +0,0 @@
5871 -import os
5872 -import sys
5873 -import re
5874 -
5875 -class get_conf_settings(object):
5876 -# open the /etc/buildhost/buildhost.conf file and get the needed
5877 -# settings for gobs
5878 - def __init__(self):
5879 - self.configfile = "/etc/gobs/gobs.conf"
5880 -
5881 - def read_gobs_settings_all(self):
5882 - # It will return a dict with options from the configfile
5883 - try:
5884 - open_conffile = open(self.configfile, 'r')
5885 - except:
5886 - sys.exit("Fail to open config file:" + self.configfile)
5887 - textlines = open_conffile.readlines()
5888 - for line in textlines:
5889 - element = line.split('=')
5890 - if element[0] == 'SQLBACKEND': # Databas backend
5891 - get_sql_backend = element[1]
5892 - if element[0] == 'SQLDB': # Database
5893 - get_sql_db = element[1]
5894 - if element[0] == 'SQLHOST': # Host
5895 - get_sql_host = element[1]
5896 - if element[0] == 'SQLUSER': # User
5897 - get_sql_user = element[1]
5898 - if element[0] == 'SQLPASSWD': # Password
5899 - get_sql_passwd = element[1]
5900 - # Buildhost root (dir for host/setup on host)
5901 - if element[0] == 'GOBSROOT':
5902 - get_gobs_root = element[1]
5903 - # Buildhost setup (host/setup on guest)
5904 - if element[0] == 'GOBSCONFIG':
5905 - get_gobs_config = element[1]
5906 -
5907 - open_conffile.close()
5908 - gobs_settings_dict = {}
5909 - gobs_settings_dict['sql_backend'] = get_sql_backend.rstrip('\n')
5910 - gobs_settings_dict['sql_db'] = get_sql_db.rstrip('\n')
5911 - gobs_settings_dict['sql_host'] = get_sql_host.rstrip('\n')
5912 - gobs_settings_dict['sql_user'] = get_sql_user.rstrip('\n')
5913 - gobs_settings_dict['sql_passwd'] = get_sql_passwd.rstrip('\n')
5914 - gobs_settings_dict['gobs_root'] = get_gobs_root.rstrip('\n')
5915 - gobs_settings_dict['gobs_config'] = get_gobs_config.rstrip('\n')
5916 - return gobs_settings_dict
5917
5918 diff --git a/gobs/pym/repoman_gobs.py~ b/gobs/pym/repoman_gobs.py~
5919 deleted file mode 100644
5920 index 2141342..0000000
5921 --- a/gobs/pym/repoman_gobs.py~
5922 +++ /dev/null
5923 @@ -1,48 +0,0 @@
5924 -import sys
5925 -import os
5926 -import portage
5927 -from portage import os, _encodings, _unicode_decode
5928 -from portage import _unicode_encode
5929 -from portage.exception import DigestException, FileNotFound, ParseError, PermissionDenied
5930 -from _emerge.Package import Package
5931 -from _emerge.RootConfig import RootConfig
5932 -from repoman.checks import run_checks
5933 -import codecs
5934 -
5935 -class gobs_repoman(object):
5936 -
5937 - def __init__(self, mysettings, myportdb):
5938 - self._mysettings = mysettings
5939 - self._myportdb = myportdb
5940 -
5941 - def check_repoman(self, categories, package, ebuild_version_tree, config_id):
5942 - # We run repoman run_checks on the ebuild
5943 - pkgdir = self._mysettings['PORTDIR'] + "/" + categories + "/" + package
5944 - full_path = pkgdir + "/" + package + "-" + ebuild_version_tree + ".ebuild"
5945 - cpv = categories + "/" + package + "-" + ebuild_version_tree
5946 - root = '/'
5947 - trees = {
5948 - root : {'porttree' : portage.portagetree(root, settings=self._mysettings)}
5949 - }
5950 - root_config = RootConfig(self._mysettings, trees[root], None)
5951 - allvars = set(x for x in portage.auxdbkeys if not x.startswith("UNUSED_"))
5952 - allvars.update(Package.metadata_keys)
5953 - allvars = sorted(allvars)
5954 - myaux = dict(zip(allvars, self._myportdb.aux_get(cpv, allvars)))
5955 - pkg = Package(cpv=cpv, metadata=myaux, root_config=root_config)
5956 - fails = []
5957 - try:
5958 - # All ebuilds should have utf_8 encoding.
5959 - f = codecs.open(_unicode_encode(full_path,
5960 - encoding = _encodings['fs'], errors = 'strict'),
5961 - mode = 'r', encoding = _encodings['repo.content'])
5962 - try:
5963 - for check_name, e in run_checks(f, pkg):
5964 - fails.append(check_name + ": " + e)
5965 - finally:
5966 - f.close()
5967 - except UnicodeDecodeError:
5968 - # A file.UTF8 failure will have already been recorded above.
5969 - pass
5970 - # fails will have a list with repoman errors
5971 - return fails
5972 \ No newline at end of file
5973
5974 diff --git a/gobs/pym/sync.py~ b/gobs/pym/sync.py~
5975 deleted file mode 100644
5976 index 35833f9..0000000
5977 --- a/gobs/pym/sync.py~
5978 +++ /dev/null
5979 @@ -1,23 +0,0 @@
5980 -from __future__ import print_function
5981 -import portage
5982 -import os
5983 -import errno
5984 -from git import *
5985 -
5986 -def git_pull():
5987 - repo = Repo("/var/lib/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/")
5988 - repo_remote = repo.remotes.origin
5989 - repo_remote.pull()
5990 - master = repo.head.reference
5991 - print(master.log())
5992 -
5993 -def sync_tree()
5994 - settings, trees, mtimedb = load_emerge_config()
5995 - portdb = trees[settings["ROOT"]]["porttree"].dbapi
5996 - tmpcmdline = []
5997 - tmpcmdline.append("--sync")
5998 - tmpcmdline.append("--quiet")
5999 - myaction, myopts, myfiles = parse_opts(tmpcmdline)
6000 - fail_sync = action_sync(settings, trees, mtimedb, myopts, myaction)
6001 - print("fail_sync", fail_sync)
6002 - return fail_sync
6003 \ No newline at end of file
6004
6005 diff --git a/gobs/pym/text.py~ b/gobs/pym/text.py~
6006 deleted file mode 100644
6007 index 7523015..0000000
6008 --- a/gobs/pym/text.py~
6009 +++ /dev/null
6010 @@ -1,48 +0,0 @@
6011 -import sys
6012 -import re
6013 -import os
6014 -import errno
6015 -
6016 -def get_file_text(filename):
6017 - # Return the filename contents
6018 - try:
6019 - textfile = open(filename)
6020 - except:
6021 - return "No file", filename
6022 - text = ""
6023 - for line in textfile:
6024 - text += unicode(line, 'utf-8')
6025 - textfile.close()
6026 - return text
6027 -
6028 -def get_ebuild_text(filename):
6029 - """Return the ebuild contents"""
6030 - try:
6031 - ebuildfile = open(filename)
6032 - except:
6033 - return "No Ebuild file there"
6034 - text = ""
6035 - dataLines = ebuildfile.readlines()
6036 - for i in dataLines:
6037 - text = text + i + " "
6038 - line2 = dataLines[2]
6039 - field = line2.split(" ")
6040 - ebuildfile.close()
6041 - try:
6042 - cvs_revision = field[3]
6043 - except:
6044 - cvs_revision = ''
6045 - return text, cvs_revision
6046 -
6047 -def get_log_text_list(filename):
6048 - """Return the log contents as a list"""
6049 - print "filename", filename
6050 - try:
6051 - logfile = open(filename)
6052 - except:
6053 - return None
6054 - text = []
6055 - dataLines = logfile.readlines()
6056 - for i in dataLines:
6057 - text.append(i)
6058 - return text