Gentoo Archives: gentoo-commits

From: Magnus Granberg <zorry@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] dev/zorry:master commit in: gobs/pym/
Date: Fri, 29 Jul 2011 15:31:35
Message-Id: 8554eeee41909a2df43989d6872346b5b64e4570.zorry@gentoo
1 commit: 8554eeee41909a2df43989d6872346b5b64e4570
2 Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
3 AuthorDate: Fri Jul 29 15:29:35 2011 +0000
4 Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
5 CommitDate: Fri Jul 29 15:29:35 2011 +0000
6 URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=8554eeee
7
8 Updated alot in the pym dir
9
10 ---
11 gobs/pym/ConnectionManager.py | 26 +-
12 gobs/pym/build_log.py | 99 ++++---
13 gobs/pym/build_queru.py | 158 ++++++++++
14 gobs/pym/depclean.py | 632 +++++++++++++++++++++++++++++++++++++++++
15 gobs/pym/flags.py | 27 +-
16 gobs/pym/old_cpv.py | 11 +-
17 gobs/pym/package.py | 44 +---
18 gobs/pym/text.py | 19 +-
19 8 files changed, 901 insertions(+), 115 deletions(-)
20
21 diff --git a/gobs/pym/ConnectionManager.py b/gobs/pym/ConnectionManager.py
22 index 7d87702..1bbeb35 100644
23 --- a/gobs/pym/ConnectionManager.py
24 +++ b/gobs/pym/ConnectionManager.py
25 @@ -1,31 +1,31 @@
26 #a simple CM build around sie singleton so there can only be 1 CM but you can call the class in different place with out caring about it.
27 #when the first object is created of this class, the SQL settings are read from the file and stored in the class for later reuse by the next object and so on.
28 #(maybe later add support for connection pools)
29 +from __future__ import print_function
30 +
31 class connectionManager(object):
32 _instance = None
33
34 - #size of the connection Pool
35 + #size of the connection Pool
36 def __new__(cls, settings_dict, numberOfconnections=20, *args, **kwargs):
37 if not cls._instance:
38 cls._instance = super(connectionManager, cls).__new__(cls, *args, **kwargs)
39 #read the sql user/host etc and store it in the local object
40 - print settings_dict['sql_host']
41 + print(settings_dict['sql_host'])
42 cls._host=settings_dict['sql_host']
43 cls._user=settings_dict['sql_user']
44 cls._password=settings_dict['sql_passwd']
45 cls._database=settings_dict['sql_db']
46 #shouldnt we include port also?
47 try:
48 - from psycopg2 import pool
49 - cls._connectionNumber=numberOfconnections
50 - #always create 1 connection
51 - cls._pool=pool.ThreadedConnectionPool(1,cls._connectionNumber,host=cls._host,database=cls._database,user=cls._user,password=cls._password)
52 - cls._name='pgsql'
53 -
54 -
55 - except ImportError:
56 - print "Please install a recent version of dev-python/psycopg for Python"
57 - sys.exit(1)
58 + from psycopg2 import pool
59 + cls._connectionNumber=numberOfconnections
60 + #always create 1 connection
61 + cls._pool=pool.ThreadedConnectionPool(1,cls._connectionNumber,host=cls._host,database=cls._database,user=cls._user,password=cls._password)
62 + cls._name='pgsql'
63 + except ImportError:
64 + print("Please install a recent version of dev-python/psycopg for Python")
65 + sys.exit(1)
66 #setup connection pool
67 return cls._instance
68
69 @@ -38,7 +38,7 @@ class connectionManager(object):
70
71 def putConnection(self, connection):
72 self._pool.putconn(connection)
73 -
74 +
75 def closeAllConnections(self):
76 self._pool.closeall()
77
78
79 diff --git a/gobs/pym/build_log.py b/gobs/pym/build_log.py
80 index 4f5a801..eb5fcea 100644
81 --- a/gobs/pym/build_log.py
82 +++ b/gobs/pym/build_log.py
83 @@ -1,8 +1,10 @@
84 +from __future__ import print_function
85 import re
86 from gobs.text import get_log_text_list
87 from gobs.repoman_gobs import gobs_repoman
88 import portage
89 from gobs.readconf import get_conf_settings
90 +from gobs.flags import gobs_use_flags
91 reader=get_conf_settings()
92 gobs_settings_dict=reader.read_gobs_settings_all()
93 # make a CM
94 @@ -16,35 +18,58 @@ elif CM.getName()=='mysql':
95
96 class gobs_buildlog(object):
97
98 - def __init__(self, CM, mysettings, build_dict, config_profile):
99 + def __init__(self, mysettings, build_dict):
100 self._mysettings = mysettings
101 self._myportdb = portage.portdbapi(mysettings=self._mysettings)
102 self._build_dict = build_dict
103 - self._config_profile = config_profile
104 - self._CM = CM
105 self._logfile_text = get_log_text_list(self._mysettings.get("PORTAGE_LOG_FILE"))
106 +
107 + def add_new_ebuild_buildlog(self, build_error, summary_error, build_log_dict):
108 + conn=CM.getConnection()
109 + cpv = self._build_dict['cpv']
110 + init_useflags = gobs_use_flags(self._mysettings, self._myportdb, cpv)
111 + iuse_flags_list, final_use_list = init_useflags.get_flags_looked()
112 + iuse = []
113 + use_flags_list = []
114 + use_enable_list = []
115 + for iuse_line in iuse_flags_list:
116 + iuse.append(init_useflags.reduce_flag(iuse_line))
117 + iuse_flags_list2 = list(set(iuse))
118 + use_enable = final_use_list
119 + use_disable = list(set(iuse_flags_list2).difference(set(use_enable)))
120 + use_flagsDict = {}
121 + for x in use_enable:
122 + use_flagsDict[x] = True
123 + for x in use_disable:
124 + use_flagsDict[x] = False
125 + for u, s in use_flagsDict.iteritems():
126 + use_flags_list.append(u)
127 + use_enable_list.append(s)
128 + build_id = add_new_buildlog(conn, self._build_dict, use_flags_list, use_enable_list, build_error, summary_error, build_log_dict)
129 + CM.putConnection(conn)
130 + return build_id
131
132 - def search_info(self, textline, error_log_list, i):
133 + def search_info(self, textline, error_log_list):
134 if re.search(" * Package:", textline):
135 - print 'Package'
136 + print('Package')
137 error_log_list.append(textline)
138 if re.search(" * Repository:", textline):
139 - print 'Repository'
140 + print('Repository')
141 error_log_list.append(textline)
142 if re.search(" * Maintainer:", textline):
143 error_log_list.append(textline)
144 - print 'Maintainer'
145 + print('Maintainer')
146 if re.search(" * USE:", textline):
147 error_log_list.append(textline)
148 - print 'USE'
149 + print('USE')
150 if re.search(" * FEATURES:", textline):
151 error_log_list.append(textline)
152 - print 'FEATURES'
153 + print('FEATURES')
154 return error_log_list
155
156 def search_error(self, textline, error_log_list, sum_build_log_list, i):
157 if re.search("Error 1", textline):
158 - print 'Error'
159 + print('Error')
160 x = i - 20
161 endline = True
162 error_log_list.append(".....\n")
163 @@ -56,7 +81,7 @@ class gobs_buildlog(object):
164 else:
165 x = x +1
166 if re.search(" * ERROR:", textline):
167 - print 'ERROR'
168 + print('ERROR')
169 x = i
170 endline= True
171 field = textline.split(" ")
172 @@ -69,12 +94,25 @@ class gobs_buildlog(object):
173 endline = False
174 else:
175 x = x +1
176 + if re.search("configure: error:", textline):
177 + print('configure: error:')
178 + x = i - 4
179 + endline = True
180 + error_log_list.append(".....\n")
181 + while x != i + 3 and endline:
182 + try:
183 + error_log_list.append(self._logfile_text[x])
184 + except:
185 + endline = False
186 + else:
187 + x = x +1
188 return error_log_list, sum_build_log_list
189
190 def search_qa(self, textline, qa_error_list, error_log_list,i):
191 - if re.search(" * QA Notice: Package has poor programming", textline):
192 - print 'QA Notice'
193 + if re.search(" * QA Notice:", textline):
194 + print('QA Notice')
195 x = i
196 + qa_error_list.append(self._logfile_text[x])
197 endline= True
198 error_log_list.append(".....\n")
199 while x != i + 3 and endline:
200 @@ -84,20 +122,6 @@ class gobs_buildlog(object):
201 endline = False
202 else:
203 x = x +1
204 - qa_error_list.append('QA Notice: Package has poor programming practices')
205 - if re.search(" * QA Notice: The following shared libraries lack NEEDED", textline):
206 - print 'QA Notice'
207 - x = i
208 - endline= True
209 - error_log_list.append(".....\n")
210 - while x != i + 2 and endline:
211 - try:
212 - error_log_list.append(self._logfile_text[x])
213 - except:
214 - endline = False
215 - else:
216 - x = x +1
217 - qa_error_list.append('QA Notice: The following shared libraries lack NEEDED entries')
218 return qa_error_list, error_log_list
219
220 def get_buildlog_info(self):
221 @@ -110,15 +134,12 @@ class gobs_buildlog(object):
222 repoman_error_list = []
223 sum_build_log_list = []
224 for textline in self._logfile_text:
225 - error_log_list = self.search_info(textline, error_log_list, i)
226 + error_log_list = self.search_info(textline, error_log_list)
227 error_log_list, sum_build_log_list = self.search_error(textline, error_log_list, sum_build_log_list, i)
228 qa_error_list, error_log_list = self.search_qa(textline, qa_error_list, error_log_list, i)
229 i = i +1
230 # Run repoman check_repoman()
231 - categories = self._build_dict['categories']
232 - package = self._build_dict['package']
233 - ebuild_version = self._build_dict['ebuild_version']
234 - repoman_error_list = init_repoman.check_repoman(categories, package, ebuild_version, self._config_profile)
235 + repoman_error_list = init_repoman.check_repoman(self._build_dict['categories'], self._build_dict['package'], self._build_dict['ebuild_version'], self._build_dict['config_profile'])
236 if repoman_error_list != []:
237 sum_build_log_list.append("repoman")
238 if qa_error_list != []:
239 @@ -130,7 +151,7 @@ class gobs_buildlog(object):
240 return build_log_dict
241
242 def add_buildlog_main(self):
243 - conn=self._CM.getConnection()
244 + conn=CM.getConnection()
245 build_log_dict = {}
246 build_log_dict = self.get_buildlog_info()
247 sum_build_log_list = build_log_dict['summary_error_list']
248 @@ -143,8 +164,12 @@ class gobs_buildlog(object):
249 if sum_build_log_list != []:
250 for sum_log_line in sum_build_log_list:
251 summary_error = summary_error + " " + sum_log_line
252 - print 'summary_error', summary_error
253 - logfilename = re.sub("\/var\/log\/portage\/", "", self._mysettings.get("PORTAGE_LOG_FILE"))
254 - build_id = move_queru_buildlog(conn, self._build_dict['queue_id'], build_error, summary_error, logfilename, build_log_dict)
255 + print('summary_error', summary_error)
256 + build_log_dict['logfilename'] = re.sub("\/var\/log\/portage\/", "", self._mysettings.get("PORTAGE_LOG_FILE"))
257 + print(self._build_dict['queue_id'], build_error, summary_error, build_log_dict['logfilename'], build_log_dict)
258 + if self._build_dict['queue_id'] is None:
259 + build_id = self.add_new_ebuild_buildlog(build_error, summary_error, build_log_dict)
260 + else:
261 + build_id = move_queru_buildlog(conn, self._build_dict['queue_id'], build_error, summary_error, build_log_dict)
262 # update_qa_repoman(conn, build_id, build_log_dict)
263 - print "build_id", build_id, "logged to db."
264 + print("build_id", build_id[0], "logged to db.")
265
266 diff --git a/gobs/pym/build_queru.py b/gobs/pym/build_queru.py
267 new file mode 100644
268 index 0000000..3d53a05
269 --- /dev/null
270 +++ b/gobs/pym/build_queru.py
271 @@ -0,0 +1,158 @@
272 +# Get the options from the config file set in gobs.readconf
273 +from gobs.readconf import get_conf_settings
274 +reader=get_conf_settings()
275 +gobs_settings_dict=reader.read_gobs_settings_all()
276 +# make a CM
277 +from gobs.ConnectionManager import connectionManager
278 +CM=connectionManager(gobs_settings_dict)
279 +#selectively import the pgsql/mysql querys
280 +if CM.getName()=='pgsql':
281 + from gobs.querys.pgsql import *
282 +elif CM.getName()=='mysql':
283 + from gobs.querys.mysql import *
284 +
285 +import portage
286 +import os
287 +from gobs.manifest import gobs_manifest
288 +from gobs.depclean import main_depclean
289 +from gobs.flags import gobs_use_flags
290 +from _emerge.main import emerge_main
291 +
292 +class queruaction(object):
293 +
294 + def __init__(self, config_profile):
295 + self._mysettings = portage.settings
296 + self._config_profile = config_profile
297 + self._myportdb = portage.portdb
298 +
299 + def log_fail_queru(self, build_dict, fail_querue_dict):
300 + fail_times = 0
301 + if fail_querue_dict == {}:
302 + attDict = {}
303 + attDict[build_dict['type_fail']] = 1
304 + attDict['build_info'] = build_dict
305 + fail_querue_dict[build_dict['querue_id']] = attDict
306 + return fail_querue_dict
307 + else:
308 + # FIXME:If is 5 remove fail_querue_dict[build_dict['querue_id'] from
309 + # fail_querue_dict and add log to db.
310 + if not fail_querue_dict[build_dict['querue_id']] is None:
311 + if fail_querue_dict[build_dict['querue_id']][build_dict['type_fail']] is None:
312 + fail_querue_dict[build_dict['querue_id']][build_dict['type_fail']] = 1
313 + return fail_querue_dict
314 + else:
315 + fail_times = fail_querue_dict[build_dict['querue_id']][build_dict['type_fail']]
316 + fail_times = fail_times + 1
317 + if not fail_times is 5:
318 + fail_querue_dict[build_dict['querue_id']][build_dict['type_fail']] = fail_times
319 + return fail_querue_dict
320 + else:
321 + # FIXME:If is 5 remove fail_querue_dict[build_dict['querue_id']] from
322 + # fail_querue_dict and add log to db.
323 + return fail_querue_dict
324 + else:
325 + attDict = {}
326 + attDict[build_dict['type_fail']] = 1
327 + attDict['build_info'] = build_dict
328 + fail_querue_dict[build_dict['querue_id']] = attDict
329 + return fail_querue_dict
330 +
331 + def make_build_list(self, build_dict):
332 + conn=CM.getConnection()
333 + cpv = build_dict['category']+'/'+build_dict['package']+'-'+build_dict['ebuild_version']
334 + pkgdir = os.path.join(self._mysettings['PORTDIR'], build_dict['category'] + "/" + build_dict['package'])
335 + init_manifest = gobs_manifest(self._mysettings, pkgdir)
336 + try:
337 + ebuild_version_checksum_tree = portage.checksum.sha256hash(pkgdir+ "/" + build_dict['package'] + "-" + build_dict['ebuild_version'] + ".ebuild")[0]
338 + except:
339 + ebuild_version_checksum_tree = None
340 + if ebuild_version_checksum_tree == build_dict['checksum']:
341 + if portage.getmaskingstatus(cpv, settings=self._mysettings, portdb=self._myportdb) == []:
342 + init_flags = gobs_use_flags(self._mysettings, self._myportdb, cpv)
343 + build_use_flags_list = init_flags.comper_useflags(build_dict)
344 + print "build_use_flags_list", build_use_flags_list
345 + manifest_error = init_manifest.check_file_in_manifest(self._myportdb, cpv, build_dict, build_use_flags_list)
346 + if manifest_error is None:
347 + build_dict['check_fail'] = False
348 + build_cpv_dict = init_flags.get_needed_dep_useflags(build_use_flags_list)
349 + print build_cpv_dict, build_use_flags_list, cpv
350 + build_use_flags_dict = {}
351 + if build_use_flags_list is None:
352 + build_use_flags_dict['None'] = None
353 + if build_cpv_dict is None:
354 + build_cpv_dict = {}
355 + build_cpv_dict[cpv] = build_use_flags_dict
356 + else:
357 + build_cpv_dict[cpv] = build_use_flags_dict
358 + print build_cpv_dict
359 + return build_cpv_dict, build_dict
360 + else:
361 + build_dict['1'] = 1
362 + else:
363 + build_dict['2'] = 2
364 + else:
365 + build_dict['3'] = 3
366 + build_dict['check_fail'] = True
367 + return build_cpv_dict, build_dict
368 +
369 + def build_procces(self, buildqueru_cpv_dict, build_dict):
370 + build_cpv_list = []
371 + for k, v in buildqueru_cpv_dict.iteritems():
372 + build_use_flags_list = []
373 + for x, y in v.iteritems():
374 + if y is True:
375 + build_use_flags_list.append(x)
376 + if y is False:
377 + build_use_flags_list.append("-" + x)
378 + print k, build_use_flags_list
379 + if build_use_flags_list == []:
380 + build_cpv_list.append("=" + k)
381 + else:
382 + build_use_flags = ""
383 + for flags in build_use_flags_list:
384 + build_use_flags = build_use_flags + flags + ","
385 + build_cpv_list.append("=" + k + "[" + build_use_flags + "]")
386 + print 'build_cpv_list', build_cpv_list
387 + argscmd = []
388 + if not "nooneshort" in build_dict['post_message']:
389 + argscmd.append("--oneshot")
390 + argscmd.append("--buildpkg")
391 + argscmd.append("--usepkg")
392 + for build_cpv in build_cpv_list:
393 + argscmd.append(build_cpv)
394 + print argscmd
395 + # Call main_emerge to build the package in build_cpv_list
396 + try:
397 + build_fail = emerge_main(args=argscmd)
398 + except:
399 + build_fail = False
400 + # Run depclean
401 + if not "nodepclean" in build_dict['post_message']:
402 + depclean_fail = main_depclean()
403 + if build_fail is False or depclean_fail is False:
404 + return False
405 + return True
406 +
407 + def procces_qureru(self, fail_querue_dict):
408 + conn=CM.getConnection()
409 + build_dict = {}
410 + build_dict = get_packages_to_build(conn, self._config_profile)
411 + print "build_dict", build_dict
412 + if build_dict is None and fail_querue_dict == {}:
413 + return fail_querue_dict
414 + if build_dict is None and fail_querue_dict != {}:
415 + return fail_querue_dict
416 + if not build_dict['ebuild_id'] is None and build_dict['checksum'] is not None:
417 + buildqueru_cpv_dict, build_dict = self.make_build_list(build_dict)
418 + print 'buildqueru_cpv_dict', buildqueru_cpv_dict
419 + if buildqueru_cpv_dict is None:
420 + return fail_querue_dict
421 + fail_build_procces = self.build_procces(buildqueru_cpv_dict, build_dict)
422 + if build_dict['check_fail'] is True:
423 + fail_querue_dict = self.log_fail_queru(build_dict, fail_querue_dict)
424 + return fail_querue_dict
425 + if not build_dict['post_message'] is [] and build_dict['ebuild_id'] is None:
426 + return fail_querue_dict
427 + if not build_dict['ebuild_id'] is None and build_dict['checksum'] is None:
428 + del_old_queue(conn, build_dict['queue_id'])
429 + return fail_querue_dict
430
431 diff --git a/gobs/pym/depclean.py b/gobs/pym/depclean.py
432 new file mode 100644
433 index 0000000..b6096b6
434 --- /dev/null
435 +++ b/gobs/pym/depclean.py
436 @@ -0,0 +1,632 @@
437 +from __future__ import print_function
438 +import errno
439 +import portage
440 +from portage._sets.base import InternalPackageSet
441 +from _emerge.main import parse_opts
442 +from _emerge.create_depgraph_params import create_depgraph_params
443 +from _emerge.depgraph import backtrack_depgraph, depgraph, resume_depgraph
444 +from _emerge.UnmergeDepPriority import UnmergeDepPriority
445 +from _emerge.SetArg import SetArg
446 +from _emerge.actions import load_emerge_config
447 +from _emerge.Package import Package
448 +from _emerge.unmerge import unmerge
449 +from portage.util import cmp_sort_key, writemsg, \
450 + writemsg_level, writemsg_stdout
451 +from portage.util.digraph import digraph
452 +
453 +def main_depclean():
454 + mysettings, mytrees, mtimedb = load_emerge_config()
455 + myroot = mysettings["ROOT"]
456 + root_config = mytrees[myroot]["root_config"]
457 + psets = root_config.setconfig.psets
458 + args_set = InternalPackageSet(allow_repo=True)
459 + spinner=None
460 + scheduler=None
461 + tmpcmdline = []
462 + tmpcmdline.append("--depclean")
463 + tmpcmdline.append("--pretend")
464 + print("depclean",tmpcmdline)
465 + myaction, myopts, myfiles = parse_opts(tmpcmdline, silent=False)
466 + if myfiles:
467 + args_set.update(myfiles)
468 + matched_packages = False
469 + for x in args_set:
470 + if vardb.match(x):
471 + matched_packages = True
472 + if not matched_packages:
473 + return 0
474 +
475 + rval, cleanlist, ordered, req_pkg_count, unresolvable = calc_depclean(mysettings, mytrees, mtimedb["ldpath"], myopts, myaction, args_set, spinner)
476 + print('rval, cleanlist, ordered, req_pkg_count, unresolvable', rval, cleanlist, ordered, req_pkg_count, unresolvable)
477 + if unresolvable != []:
478 + return True
479 + if cleanlist != []:
480 + conflict_package_list = []
481 + for depclean_cpv in cleanlist:
482 + if portage.versions.cpv_getkey(depclean_cpv) in list(psets["system"]):
483 + conflict_package_list.append(depclean_cpv)
484 + if portage.versions.cpv_getkey(depclean_cpv) in list(psets['selected']):
485 + conflict_package_list.append(depclean_cpv)
486 + print('conflict_package_list', conflict_package_list)
487 + if conflict_package_list == []:
488 + tmpcmdline = []
489 + tmpcmdline.append("--depclean")
490 + myaction, myopts, myfiles = parse_opts(tmpcmdline, silent=False)
491 + unmerge(root_config, myopts, "unmerge", cleanlist, mtimedb["ldpath"], ordered=ordered, scheduler=scheduler)
492 + print("Number removed: "+str(len(cleanlist)))
493 + return True
494 + return True
495 +
496 +def calc_depclean(settings, trees, ldpath_mtimes,
497 + myopts, action, args_set, spinner):
498 + allow_missing_deps = bool(args_set)
499 +
500 + debug = '--debug' in myopts
501 + xterm_titles = "notitles" not in settings.features
502 + myroot = settings["ROOT"]
503 + root_config = trees[myroot]["root_config"]
504 + psets = root_config.setconfig.psets
505 + deselect = myopts.get('--deselect') != 'n'
506 + required_sets = {}
507 + required_sets['world'] = psets['world']
508 +
509 + # When removing packages, a temporary version of the world 'selected'
510 + # set may be used which excludes packages that are intended to be
511 + # eligible for removal.
512 + selected_set = psets['selected']
513 + required_sets['selected'] = selected_set
514 + protected_set = InternalPackageSet()
515 + protected_set_name = '____depclean_protected_set____'
516 + required_sets[protected_set_name] = protected_set
517 + system_set = psets["system"]
518 +
519 + if not system_set or not selected_set:
520 +
521 + if not system_set:
522 + writemsg_level("!!! You have no system list.\n",
523 + level=logging.ERROR, noiselevel=-1)
524 +
525 + if not selected_set:
526 + writemsg_level("!!! You have no world file.\n",
527 + level=logging.WARNING, noiselevel=-1)
528 +
529 + writemsg_level("!!! Proceeding is likely to " + \
530 + "break your installation.\n",
531 + level=logging.WARNING, noiselevel=-1)
532 + if "--pretend" not in myopts:
533 + countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
534 +
535 + if action == "depclean":
536 + print(" >>> depclean")
537 +
538 + writemsg_level("\nCalculating dependencies ")
539 + resolver_params = create_depgraph_params(myopts, "remove")
540 + resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
541 + resolver._load_vdb()
542 + vardb = resolver._frozen_config.trees[myroot]["vartree"].dbapi
543 + real_vardb = trees[myroot]["vartree"].dbapi
544 +
545 + if action == "depclean":
546 +
547 + if args_set:
548 +
549 + if deselect:
550 + # Start with an empty set.
551 + selected_set = InternalPackageSet()
552 + required_sets['selected'] = selected_set
553 + # Pull in any sets nested within the selected set.
554 + selected_set.update(psets['selected'].getNonAtoms())
555 +
556 + # Pull in everything that's installed but not matched
557 + # by an argument atom since we don't want to clean any
558 + # package if something depends on it.
559 + for pkg in vardb:
560 + if spinner:
561 + spinner.update()
562 +
563 + try:
564 + if args_set.findAtomForPackage(pkg) is None:
565 + protected_set.add("=" + pkg.cpv)
566 + continue
567 + except portage.exception.InvalidDependString as e:
568 + show_invalid_depstring_notice(pkg,
569 + pkg.metadata["PROVIDE"], str(e))
570 + del e
571 + protected_set.add("=" + pkg.cpv)
572 + continue
573 +
574 + elif action == "prune":
575 +
576 + if deselect:
577 + # Start with an empty set.
578 + selected_set = InternalPackageSet()
579 + required_sets['selected'] = selected_set
580 + # Pull in any sets nested within the selected set.
581 + selected_set.update(psets['selected'].getNonAtoms())
582 +
583 + # Pull in everything that's installed since we don't
584 + # to prune a package if something depends on it.
585 + protected_set.update(vardb.cp_all())
586 +
587 + if not args_set:
588 +
589 + # Try to prune everything that's slotted.
590 + for cp in vardb.cp_all():
591 + if len(vardb.cp_list(cp)) > 1:
592 + args_set.add(cp)
593 +
594 + # Remove atoms from world that match installed packages
595 + # that are also matched by argument atoms, but do not remove
596 + # them if they match the highest installed version.
597 + for pkg in vardb:
598 + spinner.update()
599 + pkgs_for_cp = vardb.match_pkgs(pkg.cp)
600 + if not pkgs_for_cp or pkg not in pkgs_for_cp:
601 + raise AssertionError("package expected in matches: " + \
602 + "cp = %s, cpv = %s matches = %s" % \
603 + (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
604 +
605 + highest_version = pkgs_for_cp[-1]
606 + if pkg == highest_version:
607 + # pkg is the highest version
608 + protected_set.add("=" + pkg.cpv)
609 + continue
610 +
611 + if len(pkgs_for_cp) <= 1:
612 + raise AssertionError("more packages expected: " + \
613 + "cp = %s, cpv = %s matches = %s" % \
614 + (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
615 +
616 + try:
617 + if args_set.findAtomForPackage(pkg) is None:
618 + protected_set.add("=" + pkg.cpv)
619 + continue
620 + except portage.exception.InvalidDependString as e:
621 + show_invalid_depstring_notice(pkg,
622 + pkg.metadata["PROVIDE"], str(e))
623 + del e
624 + protected_set.add("=" + pkg.cpv)
625 + continue
626 +
627 + if resolver._frozen_config.excluded_pkgs:
628 + excluded_set = resolver._frozen_config.excluded_pkgs
629 + required_sets['__excluded__'] = InternalPackageSet()
630 +
631 + for pkg in vardb:
632 + if spinner:
633 + spinner.update()
634 +
635 + try:
636 + if excluded_set.findAtomForPackage(pkg):
637 + required_sets['__excluded__'].add("=" + pkg.cpv)
638 + except portage.exception.InvalidDependString as e:
639 + show_invalid_depstring_notice(pkg,
640 + pkg.metadata["PROVIDE"], str(e))
641 + del e
642 + required_sets['__excluded__'].add("=" + pkg.cpv)
643 +
644 + success = resolver._complete_graph(required_sets={myroot:required_sets})
645 + writemsg_level("\b\b... done!\n")
646 +
647 + resolver.display_problems()
648 +
649 + if not success:
650 + return True, [], False, 0, []
651 +
652 + def unresolved_deps():
653 +
654 + unresolvable = set()
655 + for dep in resolver._dynamic_config._initially_unsatisfied_deps:
656 + if isinstance(dep.parent, Package) and \
657 + (dep.priority > UnmergeDepPriority.SOFT):
658 + unresolvable.add((dep.atom, dep.parent.cpv))
659 +
660 + if not unresolvable:
661 + return None
662 +
663 + if unresolvable and not allow_missing_deps:
664 +
665 + prefix = bad(" * ")
666 + msg = []
667 + msg.append("Dependencies could not be completely resolved due to")
668 + msg.append("the following required packages not being installed:")
669 + msg.append("")
670 + for atom, parent in unresolvable:
671 + msg.append(" %s pulled in by:" % (atom,))
672 + msg.append(" %s" % (parent,))
673 + msg.append("")
674 + msg.extend(textwrap.wrap(
675 + "Have you forgotten to do a complete update prior " + \
676 + "to depclean? The most comprehensive command for this " + \
677 + "purpose is as follows:", 65
678 + ))
679 + msg.append("")
680 + msg.append(" " + \
681 + good("emerge --update --newuse --deep --with-bdeps=y @world"))
682 + msg.append("")
683 + msg.extend(textwrap.wrap(
684 + "Note that the --with-bdeps=y option is not required in " + \
685 + "many situations. Refer to the emerge manual page " + \
686 + "(run `man emerge`) for more information about " + \
687 + "--with-bdeps.", 65
688 + ))
689 + msg.append("")
690 + msg.extend(textwrap.wrap(
691 + "Also, note that it may be necessary to manually uninstall " + \
692 + "packages that no longer exist in the portage tree, since " + \
693 + "it may not be possible to satisfy their dependencies.", 65
694 + ))
695 + if action == "prune":
696 + msg.append("")
697 + msg.append("If you would like to ignore " + \
698 + "dependencies then use %s." % good("--nodeps"))
699 + writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
700 + level=logging.ERROR, noiselevel=-1)
701 + return unresolvable
702 + return None
703 +
704 + unresolvable = unresolved_deps()
705 + if not unresolvable is None:
706 + return False, [], False, 0, unresolvable
707 +
708 + graph = resolver._dynamic_config.digraph.copy()
709 + required_pkgs_total = 0
710 + for node in graph:
711 + if isinstance(node, Package):
712 + required_pkgs_total += 1
713 +
714 + def show_parents(child_node):
715 + parent_nodes = graph.parent_nodes(child_node)
716 + if not parent_nodes:
717 + # With --prune, the highest version can be pulled in without any
718 + # real parent since all installed packages are pulled in. In that
719 + # case there's nothing to show here.
720 + return
721 + parent_strs = []
722 + for node in parent_nodes:
723 + parent_strs.append(str(getattr(node, "cpv", node)))
724 + parent_strs.sort()
725 + msg = []
726 + msg.append(" %s pulled in by:\n" % (child_node.cpv,))
727 + for parent_str in parent_strs:
728 + msg.append(" %s\n" % (parent_str,))
729 + msg.append("\n")
730 + portage.writemsg_stdout("".join(msg), noiselevel=-1)
731 +
732 + def cmp_pkg_cpv(pkg1, pkg2):
733 + """Sort Package instances by cpv."""
734 + if pkg1.cpv > pkg2.cpv:
735 + return 1
736 + elif pkg1.cpv == pkg2.cpv:
737 + return 0
738 + else:
739 + return -1
740 +
741 + def create_cleanlist():
742 +
743 + # Never display the special internal protected_set.
744 + for node in graph:
745 + if isinstance(node, SetArg) and node.name == protected_set_name:
746 + graph.remove(node)
747 + break
748 +
749 + pkgs_to_remove = []
750 +
751 + if action == "depclean":
752 + if args_set:
753 +
754 + for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
755 + arg_atom = None
756 + try:
757 + arg_atom = args_set.findAtomForPackage(pkg)
758 + except portage.exception.InvalidDependString:
759 + # this error has already been displayed by now
760 + continue
761 +
762 + if arg_atom:
763 + if pkg not in graph:
764 + pkgs_to_remove.append(pkg)
765 + elif "--verbose" in myopts:
766 + show_parents(pkg)
767 +
768 + else:
769 + for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
770 + if pkg not in graph:
771 + pkgs_to_remove.append(pkg)
772 + elif "--verbose" in myopts:
773 + show_parents(pkg)
774 +
775 + elif action == "prune":
776 +
777 + for atom in args_set:
778 + for pkg in vardb.match_pkgs(atom):
779 + if pkg not in graph:
780 + pkgs_to_remove.append(pkg)
781 + elif "--verbose" in myopts:
782 + show_parents(pkg)
783 +
784 + return pkgs_to_remove
785 +
786 + cleanlist = create_cleanlist()
787 + clean_set = set(cleanlist)
788 +
789 + if cleanlist and \
790 + real_vardb._linkmap is not None and \
791 + myopts.get("--depclean-lib-check") != "n" and \
792 + "preserve-libs" not in settings.features:
793 +
794 + # Check if any of these packages are the sole providers of libraries
795 + # with consumers that have not been selected for removal. If so, these
796 + # packages and any dependencies need to be added to the graph.
797 + linkmap = real_vardb._linkmap
798 + consumer_cache = {}
799 + provider_cache = {}
800 + consumer_map = {}
801 +
802 + writemsg_level(">>> Checking for lib consumers...\n")
803 +
804 + for pkg in cleanlist:
805 + pkg_dblink = real_vardb._dblink(pkg.cpv)
806 + consumers = {}
807 +
808 + for lib in pkg_dblink.getcontents():
809 + lib = lib[len(myroot):]
810 + lib_key = linkmap._obj_key(lib)
811 + lib_consumers = consumer_cache.get(lib_key)
812 + if lib_consumers is None:
813 + try:
814 + lib_consumers = linkmap.findConsumers(lib_key)
815 + except KeyError:
816 + continue
817 + consumer_cache[lib_key] = lib_consumers
818 + if lib_consumers:
819 + consumers[lib_key] = lib_consumers
820 +
821 + if not consumers:
822 + continue
823 +
824 + for lib, lib_consumers in list(consumers.items()):
825 + for consumer_file in list(lib_consumers):
826 + if pkg_dblink.isowner(consumer_file):
827 + lib_consumers.remove(consumer_file)
828 + if not lib_consumers:
829 + del consumers[lib]
830 +
831 + if not consumers:
832 + continue
833 +
834 + for lib, lib_consumers in consumers.items():
835 +
836 + soname = linkmap.getSoname(lib)
837 +
838 + consumer_providers = []
839 + for lib_consumer in lib_consumers:
840 + providers = provider_cache.get(lib)
841 + if providers is None:
842 + providers = linkmap.findProviders(lib_consumer)
843 + provider_cache[lib_consumer] = providers
844 + if soname not in providers:
845 + # Why does this happen?
846 + continue
847 + consumer_providers.append(
848 + (lib_consumer, providers[soname]))
849 +
850 + consumers[lib] = consumer_providers
851 +
852 + consumer_map[pkg] = consumers
853 +
854 + if consumer_map:
855 +
856 + search_files = set()
857 + for consumers in consumer_map.values():
858 + for lib, consumer_providers in consumers.items():
859 + for lib_consumer, providers in consumer_providers:
860 + search_files.add(lib_consumer)
861 + search_files.update(providers)
862 +
863 + writemsg_level(">>> Assigning files to packages...\n")
864 + file_owners = real_vardb._owners.getFileOwnerMap(search_files)
865 +
866 + for pkg, consumers in list(consumer_map.items()):
867 + for lib, consumer_providers in list(consumers.items()):
868 + lib_consumers = set()
869 +
870 + for lib_consumer, providers in consumer_providers:
871 + owner_set = file_owners.get(lib_consumer)
872 + provider_dblinks = set()
873 + provider_pkgs = set()
874 +
875 + if len(providers) > 1:
876 + for provider in providers:
877 + provider_set = file_owners.get(provider)
878 + if provider_set is not None:
879 + provider_dblinks.update(provider_set)
880 +
881 + if len(provider_dblinks) > 1:
882 + for provider_dblink in provider_dblinks:
883 + provider_pkg = resolver._pkg(
884 + provider_dblink.mycpv, "installed",
885 + root_config, installed=True)
886 + if provider_pkg not in clean_set:
887 + provider_pkgs.add(provider_pkg)
888 +
889 + if provider_pkgs:
890 + continue
891 +
892 + if owner_set is not None:
893 + lib_consumers.update(owner_set)
894 +
895 + for consumer_dblink in list(lib_consumers):
896 + if resolver._pkg(consumer_dblink.mycpv, "installed",
897 + root_config, installed=True) in clean_set:
898 + lib_consumers.remove(consumer_dblink)
899 + continue
900 +
901 + if lib_consumers:
902 + consumers[lib] = lib_consumers
903 + else:
904 + del consumers[lib]
905 + if not consumers:
906 + del consumer_map[pkg]
907 +
908 + if consumer_map:
909 + # TODO: Implement a package set for rebuilding consumer packages.
910 +
911 + msg = "In order to avoid breakage of link level " + \
912 + "dependencies, one or more packages will not be removed. " + \
913 + "This can be solved by rebuilding " + \
914 + "the packages that pulled them in."
915 +
916 + prefix = bad(" * ")
917 + from textwrap import wrap
918 + writemsg_level("".join(prefix + "%s\n" % line for \
919 + line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
920 +
921 + msg = []
922 + for pkg in sorted(consumer_map, key=cmp_sort_key(cmp_pkg_cpv)):
923 + consumers = consumer_map[pkg]
924 + consumer_libs = {}
925 + for lib, lib_consumers in consumers.items():
926 + for consumer in lib_consumers:
927 + consumer_libs.setdefault(
928 + consumer.mycpv, set()).add(linkmap.getSoname(lib))
929 + unique_consumers = set(chain(*consumers.values()))
930 + unique_consumers = sorted(consumer.mycpv \
931 + for consumer in unique_consumers)
932 + msg.append("")
933 + msg.append(" %s pulled in by:" % (pkg.cpv,))
934 + for consumer in unique_consumers:
935 + libs = consumer_libs[consumer]
936 + msg.append(" %s needs %s" % \
937 + (consumer, ', '.join(sorted(libs))))
938 + msg.append("")
939 + writemsg_level("".join(prefix + "%s\n" % line for line in msg),
940 + level=logging.WARNING, noiselevel=-1)
941 +
942 + # Add lib providers to the graph as children of lib consumers,
943 + # and also add any dependencies pulled in by the provider.
944 + writemsg_level(">>> Adding lib providers to graph...\n")
945 +
946 + for pkg, consumers in consumer_map.items():
947 + for consumer_dblink in set(chain(*consumers.values())):
948 + consumer_pkg = resolver._pkg(consumer_dblink.mycpv,
949 + "installed", root_config, installed=True)
950 + if not resolver._add_pkg(pkg,
951 + Dependency(parent=consumer_pkg,
952 + priority=UnmergeDepPriority(runtime=True),
953 + root=pkg.root)):
954 + resolver.display_problems()
955 + return True, [], False, 0, []
956 +
957 + writemsg_level("\nCalculating dependencies ")
958 + success = resolver._complete_graph(
959 + required_sets={myroot:required_sets})
960 + writemsg_level("\b\b... done!\n")
961 + resolver.display_problems()
962 + if not success:
963 + return True, [], False, 0, []
964 + unresolvable = unresolved_deps()
965 + if not unresolvable is None:
966 + return False, [], False, 0, unresolvable
967 +
968 + graph = resolver._dynamic_config.digraph.copy()
969 + required_pkgs_total = 0
970 + for node in graph:
971 + if isinstance(node, Package):
972 + required_pkgs_total += 1
973 + cleanlist = create_cleanlist()
974 + if not cleanlist:
975 + return 0, [], False, required_pkgs_total, unresolvable
976 + clean_set = set(cleanlist)
977 +
978 + if clean_set:
979 + writemsg_level(">>> Calculating removal order...\n")
980 + # Use a topological sort to create an unmerge order such that
981 + # each package is unmerged before it's dependencies. This is
982 + # necessary to avoid breaking things that may need to run
983 + # during pkg_prerm or pkg_postrm phases.
984 +
985 + # Create a new graph to account for dependencies between the
986 + # packages being unmerged.
987 + graph = digraph()
988 + del cleanlist[:]
989 +
990 + dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
991 + runtime = UnmergeDepPriority(runtime=True)
992 + runtime_post = UnmergeDepPriority(runtime_post=True)
993 + buildtime = UnmergeDepPriority(buildtime=True)
994 + priority_map = {
995 + "RDEPEND": runtime,
996 + "PDEPEND": runtime_post,
997 + "DEPEND": buildtime,
998 + }
999 +
1000 + for node in clean_set:
1001 + graph.add(node, None)
1002 + mydeps = []
1003 + for dep_type in dep_keys:
1004 + depstr = node.metadata[dep_type]
1005 + if not depstr:
1006 + continue
1007 + priority = priority_map[dep_type]
1008 +
1009 + try:
1010 + atoms = resolver._select_atoms(myroot, depstr,
1011 + myuse=node.use.enabled, parent=node,
1012 + priority=priority)[node]
1013 + except portage.exception.InvalidDependString:
1014 + # Ignore invalid deps of packages that will
1015 + # be uninstalled anyway.
1016 + continue
1017 +
1018 + for atom in atoms:
1019 + if not isinstance(atom, portage.dep.Atom):
1020 + # Ignore invalid atoms returned from dep_check().
1021 + continue
1022 + if atom.blocker:
1023 + continue
1024 + matches = vardb.match_pkgs(atom)
1025 + if not matches:
1026 + continue
1027 + for child_node in matches:
1028 + if child_node in clean_set:
1029 + graph.add(child_node, node, priority=priority)
1030 +
1031 + ordered = True
1032 + if len(graph.order) == len(graph.root_nodes()):
1033 + # If there are no dependencies between packages
1034 + # let unmerge() group them by cat/pn.
1035 + ordered = False
1036 + cleanlist = [pkg.cpv for pkg in graph.order]
1037 + else:
1038 + # Order nodes from lowest to highest overall reference count for
1039 + # optimal root node selection (this can help minimize issues
1040 + # with unaccounted implicit dependencies).
1041 + node_refcounts = {}
1042 + for node in graph.order:
1043 + node_refcounts[node] = len(graph.parent_nodes(node))
1044 + def cmp_reference_count(node1, node2):
1045 + return node_refcounts[node1] - node_refcounts[node2]
1046 + graph.order.sort(key=cmp_sort_key(cmp_reference_count))
1047 +
1048 + ignore_priority_range = [None]
1049 + ignore_priority_range.extend(
1050 + range(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
1051 + while graph:
1052 + for ignore_priority in ignore_priority_range:
1053 + nodes = graph.root_nodes(ignore_priority=ignore_priority)
1054 + if nodes:
1055 + break
1056 + if not nodes:
1057 + raise AssertionError("no root nodes")
1058 + if ignore_priority is not None:
1059 + # Some deps have been dropped due to circular dependencies,
1060 + # so only pop one node in order to minimize the number that
1061 + # are dropped.
1062 + del nodes[1:]
1063 + for node in nodes:
1064 + graph.remove(node)
1065 + cleanlist.append(node.cpv)
1066 +
1067 + return True, cleanlist, ordered, required_pkgs_total, []
1068 + return True, [], False, required_pkgs_total, []
1069
1070 diff --git a/gobs/pym/flags.py b/gobs/pym/flags.py
1071 index ba9faf6..c2e3bcc 100644
1072 --- a/gobs/pym/flags.py
1073 +++ b/gobs/pym/flags.py
1074 @@ -1,3 +1,4 @@
1075 +from __future__ import print_function
1076 from _emerge.main import parse_opts
1077 from _emerge.depgraph import backtrack_depgraph, depgraph, resume_depgraph
1078 from _emerge.create_depgraph_params import create_depgraph_params
1079 @@ -154,20 +155,21 @@ class gobs_use_flags(object):
1080 return iuse_flags, final_flags
1081
1082 def get_needed_dep_useflags(self, build_use_flags_list):
1083 + cpv = self._cpv
1084 tmpcmdline = []
1085 tmpcmdline.append("-p")
1086 tmpcmdline.append("--autounmask")
1087 tmpcmdline.append("=" + self._cpv)
1088 - print tmpcmdline
1089 + print(tmpcmdline)
1090 myaction, myopts, myfiles = parse_opts(tmpcmdline, silent=False)
1091 - print myaction, myopts, myfiles
1092 + print(myaction, myopts, myfiles)
1093 myparams = create_depgraph_params(myopts, myaction)
1094 - print myparams
1095 + print(myparams)
1096 settings, trees, mtimedb = load_emerge_config()
1097 try:
1098 success, mydepgraph, favorites = backtrack_depgraph(
1099 settings, trees, myopts, myparams, myaction, myfiles, spinner=None)
1100 - print success, mydepgraph, favorites
1101 + print(success, mydepgraph, favorites)
1102 except portage.exception.PackageSetNotFound as e:
1103 root_config = trees[settings["ROOT"]]["root_config"]
1104 display_missing_pkg_set(root_config, e.value)
1105 @@ -179,23 +181,22 @@ class gobs_use_flags(object):
1106 use_changes = {}
1107 for pkg, needed_use_config_changes in mydepgraph._dynamic_config._needed_use_config_changes.items():
1108 new_use, changes = needed_use_config_changes
1109 - use_changes[pkg.self._cpv] = changes
1110 - print use_changes
1111 + use_changes[pkg.cpv] = changes
1112 if use_changes is None:
1113 return None
1114 iteritems_packages = {}
1115 for k, v in use_changes.iteritems():
1116 k_package = portage.versions.cpv_getkey(k)
1117 iteritems_packages[ k_package ] = v
1118 - print iteritems_packages
1119 + print('iteritems_packages', iteritems_packages)
1120 return iteritems_packages
1121
1122 def comper_useflags(self, build_dict):
1123 iuse_flags, use_enable = self.get_flags()
1124 iuse = []
1125 - print "use_enable", use_enable
1126 + print("use_enable", use_enable)
1127 build_use_flags_dict = build_dict['build_useflags']
1128 - print "build_use_flags_dict", build_use_flags_dict
1129 + print("build_use_flags_dict", build_use_flags_dict)
1130 build_use_flags_list = []
1131 if use_enable == []:
1132 if build_use_flags_dict is None:
1133 @@ -209,10 +210,10 @@ class gobs_use_flags(object):
1134 use_flagsDict[x] = True
1135 for x in use_disable:
1136 use_flagsDict[x] = False
1137 - print "use_flagsDict", use_flagsDict
1138 + print("use_flagsDict", use_flagsDict)
1139 for k, v in use_flagsDict.iteritems():
1140 - print "tree use flags", k, v
1141 - print "db use flags", k, build_use_flags_dict[k]
1142 + print("tree use flags", k, v)
1143 + print("db use flags", k, build_use_flags_dict[k])
1144 if build_use_flags_dict[k] != v:
1145 if build_use_flags_dict[k] is True:
1146 build_use_flags_list.append(k)
1147 @@ -220,5 +221,5 @@ class gobs_use_flags(object):
1148 build_use_flags_list.append("-" + k)
1149 if build_use_flags_list == []:
1150 build_use_flags_list = None
1151 - print build_use_flags_list
1152 + print(build_use_flags_list)
1153 return build_use_flags_list
1154
1155 diff --git a/gobs/pym/old_cpv.py b/gobs/pym/old_cpv.py
1156 index 9dacd82..8a1a9c5 100644
1157 --- a/gobs/pym/old_cpv.py
1158 +++ b/gobs/pym/old_cpv.py
1159 @@ -1,3 +1,4 @@
1160 +from __future__ import print_function
1161 class gobs_old_cpv(object):
1162
1163 def __init__(self, CM, myportdb, mysettings):
1164 @@ -25,14 +26,14 @@ class gobs_old_cpv(object):
1165 # Set no active on ebuilds in the db that no longer in tree
1166 if old_ebuild_list != []:
1167 for old_ebuild in old_ebuild_list:
1168 - print "O", categories + "/" + package + "-" + old_ebuild[0]
1169 + print("O", categories + "/" + package + "-" + old_ebuild[0])
1170 self.dbquerys.add_old_ebuild(conn,package_id, old_ebuild_list)
1171 # Check if we have older no activ ebuilds then 60 days
1172 ebuild_old_list_db = self.dbquerys.cp_list_old_db(conn,package_id)
1173 # Delete older ebuilds in the db
1174 if ebuild_old_list_db != []:
1175 for del_ebuild_old in ebuild_old_list_db:
1176 - print "D", categories + "/" + package + "-" + del_ebuild_old[1]
1177 + print("D", categories + "/" + package + "-" + del_ebuild_old[1])
1178 self.dbquerys.del_old_ebuild(conn,ebuild_old_list_db)
1179 self._CM.putConnection(conn)
1180
1181 @@ -52,14 +53,14 @@ class gobs_old_cpv(object):
1182 if mark_old_list != []:
1183 for x in mark_old_list:
1184 element = self.dbquerys.get_cp_from_package_id(conn,x)
1185 - print "O", element[0]
1186 + print("O", element[0])
1187 # Check if we have older no activ categories/package then 60 days
1188 del_package_id_old_list = self.dbquerys.cp_all_old_db(conn,old_package_id_list)
1189 # Delete older categories/package and ebuilds in the db
1190 if del_package_id_old_list != []:
1191 for i in del_package_id_old_list:
1192 element = self.dbquerys.get_cp_from_package_id(conn,i)
1193 - print "D", element
1194 + print("D", element)
1195 self.dbquerys.del_old_package(conn,del_package_id_old_list)
1196 self._CM.putConnection(conn)
1197
1198 @@ -80,5 +81,5 @@ class gobs_old_cpv(object):
1199 if categories_old_list != []:
1200 for real_old_categories in categories_old_list:
1201 self.dbquerys.del_old_categories(conn,real_old_categoriess)
1202 - print "D", real_old_categories
1203 + print("D", real_old_categories)
1204 self._CM.putConnection(conn)
1205 \ No newline at end of file
1206
1207 diff --git a/gobs/pym/package.py b/gobs/pym/package.py
1208 index 46e13cb..4f0864d 100644
1209 --- a/gobs/pym/package.py
1210 +++ b/gobs/pym/package.py
1211 @@ -1,3 +1,4 @@
1212 +from __future__ import print_function
1213 import portage
1214 from gobs.flags import gobs_use_flags
1215 from gobs.repoman_gobs import gobs_repoman
1216 @@ -157,37 +158,10 @@ class gobs_package(object):
1217 # Comper ebuild_version and add the ebuild_version to buildqueue
1218 if portage.vercmp(v['ebuild_version_tree'], latest_ebuild_version) == 0:
1219 self._dbquerys.add_new_package_buildqueue(conn,ebuild_id, config_id, use_flags_list, use_enable_list, message)
1220 - print "B", config_id, v['categories'] + "/" + v['package'] + "-" + latest_ebuild_version, "USE:", use_enable # B = Build config cpv use-flags
1221 + print("B", config_id, v['categories'] + "/" + v['package'] + "-" + latest_ebuild_version, "USE:", use_enable) # B = Build config cpv use-flags
1222 i = i +1
1223 self._CM.putConnection(conn)
1224
1225 - def add_new_ebuild_buildquery_db_looked(self, build_dict, config_profile):
1226 - conn=self._CM.getConnection()
1227 - myportdb = portage.portdbapi(mysettings=self._mysettings)
1228 - cpv = build_dict['cpv']
1229 - message = None
1230 - init_useflags = gobs_use_flags(self._mysettings, myportdb, cpv)
1231 - iuse_flags_list, final_use_list = init_useflags.get_flags_looked()
1232 - iuse = []
1233 - use_flags_list = []
1234 - use_enable_list = []
1235 - for iuse_line in iuse_flags_list:
1236 - iuse.append(init_useflags.reduce_flag(iuse_line))
1237 - iuse_flags_list2 = list(set(iuse))
1238 - use_enable = final_use_list
1239 - use_disable = list(set(iuse_flags_list2).difference(set(use_enable)))
1240 - use_flagsDict = {}
1241 - for x in use_enable:
1242 - use_flagsDict[x] = True
1243 - for x in use_disable:
1244 - use_flagsDict[x] = False
1245 - for u, s in use_flagsDict.iteritems():
1246 - use_flags_list.append(u)
1247 - use_enable_list.append(s)
1248 - ebuild_id = self._dbquerys.get_ebuild_id_db_checksum(conn, build_dict)
1249 - self._dbquerys.add_new_package_buildqueue(conn, ebuild_id, config_profile, use_flags_list, use_enable_list, message)
1250 - self._CM.putConnection(conn)
1251 -
1252 def get_package_metadataDict(self, pkgdir, package):
1253 # Make package_metadataDict
1254 attDict = {}
1255 @@ -206,7 +180,7 @@ class gobs_package(object):
1256 def add_new_package_db(self, categories, package):
1257 conn=self._CM.getConnection()
1258 # add new categories package ebuild to tables package and ebuilds
1259 - print "N", categories + "/" + package # N = New Package
1260 + print("N", categories + "/" + package) # N = New Package
1261 pkgdir = self._mysettings['PORTDIR'] + "/" + categories + "/" + package # Get PORTDIR + cp
1262 categories_dir = self._mysettings['PORTDIR'] + "/" + categories + "/"
1263 # Get the ebuild list for cp
1264 @@ -231,7 +205,7 @@ class gobs_package(object):
1265 manifest_error = init_manifest.digestcheck()
1266 if manifest_error is not None:
1267 qa_error.append(manifest_error)
1268 - print "QA:", categories + "/" + package, qa_error
1269 + print("QA:", categories + "/" + package, qa_error)
1270 self._dbquerys.add_qa_repoman(conn,ebuild_id_list, qa_error, packageDict, config_id)
1271 # Add the ebuild to the buildqueru table if needed
1272 self.add_new_ebuild_buildquery_db(ebuild_id_list, packageDict, config_cpv_listDict)
1273 @@ -256,7 +230,7 @@ class gobs_package(object):
1274 # if we have the same checksum return else update the package
1275 ebuild_list_tree = self._myportdb.cp_list((categories + "/" + package), use_cache=1, mytree=None)
1276 if manifest_checksum_tree != manifest_checksum_db:
1277 - print "U", categories + "/" + package # U = Update
1278 + print("U", categories + "/" + package) # U = Update
1279 # Get package_metadataDict and update the db with it
1280 package_metadataDict = self.get_package_metadataDict(pkgdir, package)
1281 self._dbquerys.update_new_package_metadata(conn,package_id, package_metadataDict)
1282 @@ -276,9 +250,9 @@ class gobs_package(object):
1283 # Get packageDict for ebuild
1284 packageDict[ebuild_line] = self.get_packageDict(pkgdir, ebuild_line, categories, package, config_id)
1285 if ebuild_version_manifest_checksum_db is None:
1286 - print "N", categories + "/" + package + "-" + ebuild_version_tree # N = New ebuild
1287 + print("N", categories + "/" + package + "-" + ebuild_version_tree) # N = New ebuild
1288 else:
1289 - print "U", categories + "/" + package + "-" + ebuild_version_tree # U = Updated ebuild
1290 + print("U", categories + "/" + package + "-" + ebuild_version_tree) # U = Updated ebuild
1291 # Fix so we can use add_new_package_sql(packageDict) to update the ebuilds
1292 old_ebuild_list.append(ebuild_version_tree)
1293 self._dbquerys.add_old_ebuild(conn,package_id, old_ebuild_list)
1294 @@ -297,7 +271,7 @@ class gobs_package(object):
1295 manifest_error = init_manifest.digestcheck()
1296 if manifest_error is not None:
1297 qa_error.append(manifest_error)
1298 - print "QA:", categories + "/" + package, qa_error
1299 + print("QA:", categories + "/" + package, qa_error)
1300 self._dbquerys.add_qa_repoman(conn,ebuild_id_list, qa_error, packageDict, config_id)
1301 # Add the ebuild to the buildqueru table if needed
1302 self.add_new_ebuild_buildquery_db(ebuild_id_list, packageDict, config_cpv_listDict)
1303 @@ -324,4 +298,4 @@ class gobs_package(object):
1304 self._dbquerys.add_old_ebuild(conn,package_id, old_ebuild_list)
1305 self._dbquerys.update_active_ebuild(conn,package_id, ebuild_version_tree)
1306 return_id = self._dbquerys.add_new_package_sql(conn,packageDict)
1307 - print 'return_id', return_id
1308 \ No newline at end of file
1309 + print('return_id', return_id)
1310 \ No newline at end of file
1311
1312 diff --git a/gobs/pym/text.py b/gobs/pym/text.py
1313 index 9f5bb4e..2f1f689 100644
1314 --- a/gobs/pym/text.py
1315 +++ b/gobs/pym/text.py
1316 @@ -1,3 +1,4 @@
1317 +from __future__ import print_function
1318 import sys
1319 import re
1320 import os
1321 @@ -7,10 +8,8 @@ def get_file_text(filename):
1322 # Return the filename contents
1323 try:
1324 textfile = open(filename)
1325 - except IOError, oe:
1326 - if oe.errno not in (errno.ENOENT, ):
1327 - raise
1328 - return "No file", filename
1329 + except:
1330 + return "No file", filename
1331 text = ""
1332 for line in textfile:
1333 text += unicode(line, 'utf-8')
1334 @@ -21,10 +20,8 @@ def get_ebuild_text(filename):
1335 """Return the ebuild contents"""
1336 try:
1337 ebuildfile = open(filename)
1338 - except IOError, oe:
1339 - if oe.errno not in (errno.ENOENT, ):
1340 - raise
1341 - return "No Ebuild file there"
1342 + except:
1343 + return "No Ebuild file there"
1344 text = ""
1345 dataLines = ebuildfile.readlines()
1346 for i in dataLines:
1347 @@ -40,12 +37,10 @@ def get_ebuild_text(filename):
1348
1349 def get_log_text_list(filename):
1350 """Return the log contents as a list"""
1351 - print "filename", filename
1352 + print("filename", filename)
1353 try:
1354 logfile = open(filename)
1355 - except IOError, oe:
1356 - if oe.errno not in (errno.ENOENT, ):
1357 - raise
1358 + except:
1359 return None
1360 text = []
1361 dataLines = logfile.readlines()