Gentoo Archives: gentoo-commits

From: Fabian Groffen <grobian@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/portage:prefix commit in: /
Date: Fri, 14 Jan 2022 10:32:30
Message-Id: 1642156321.9d0d47eed1ed7b5e2bba49b1d79ca3e9fc7fb7ec.grobian@gentoo
1 commit: 9d0d47eed1ed7b5e2bba49b1d79ca3e9fc7fb7ec
2 Author: Fabian Groffen <grobian <AT> gentoo <DOT> org>
3 AuthorDate: Fri Jan 14 10:32:01 2022 +0000
4 Commit: Fabian Groffen <grobian <AT> gentoo <DOT> org>
5 CommitDate: Fri Jan 14 10:32:01 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/portage.git/commit/?id=9d0d47ee
7
8 Merge remote-tracking branch 'origin/master' into prefix
9
10 Signed-off-by: Fabian Groffen <grobian <AT> gentoo.org>
11
12 .editorconfig | 2 +-
13 .github/workflows/black.yml | 10 +
14 .github/workflows/ci.yml | 4 +-
15 .gitignorerevs | 4 +
16 DEVELOPING | 19 +-
17 MANIFEST.in | 2 +
18 NEWS | 56 +
19 README => README.md | 59 +-
20 RELEASE-NOTES | 27 +
21 bin/check-implicit-pointer-usage.py | 52 +-
22 bin/chmod-lite.py | 25 +-
23 bin/chpathtool.py | 323 +-
24 bin/dispatch-conf | 13 +-
25 bin/dohtml.py | 424 +-
26 bin/doins.py | 1007 +-
27 bin/ebuild-ipc.py | 519 +-
28 bin/ebuild.sh | 83 +-
29 bin/estrip | 103 +-
30 bin/filter-bash-environment.py | 266 +-
31 bin/install-qa-check.d/10ignored-flags | 4 +-
32 bin/install.py | 379 +-
33 bin/isolated-functions.sh | 90 +-
34 bin/lock-helper.py | 34 +-
35 bin/misc-functions.sh | 64 +-
36 bin/phase-functions.sh | 6 +-
37 bin/phase-helpers.sh | 14 +-
38 bin/pid-ns-init | 248 +-
39 bin/portageq | 3 +-
40 bin/save-ebuild-env.sh | 18 +-
41 bin/socks5-server.py | 449 +-
42 bin/xattr-helper.py | 226 +-
43 bin/xpak-helper.py | 81 +-
44 cnf/make.conf.example.riscv.diff | 61 +
45 cnf/make.globals | 2 +-
46 doc/api/conf.py | 24 +-
47 lib/_emerge/AbstractDepPriority.py | 38 +-
48 lib/_emerge/AbstractEbuildProcess.py | 869 +-
49 lib/_emerge/AbstractPollTask.py | 205 +-
50 lib/_emerge/AsynchronousLock.py | 571 +-
51 lib/_emerge/AsynchronousTask.py | 391 +-
52 lib/_emerge/AtomArg.py | 11 +-
53 lib/_emerge/Binpkg.py | 1003 +-
54 lib/_emerge/BinpkgEnvExtractor.py | 123 +-
55 lib/_emerge/BinpkgExtractorAsync.py | 190 +-
56 lib/_emerge/BinpkgFetcher.py | 447 +-
57 lib/_emerge/BinpkgPrefetcher.py | 74 +-
58 lib/_emerge/BinpkgVerifier.py | 232 +-
59 lib/_emerge/Blocker.py | 15 +-
60 lib/_emerge/BlockerCache.py | 343 +-
61 lib/_emerge/BlockerDB.py | 226 +-
62 lib/_emerge/BlockerDepPriority.py | 14 +-
63 lib/_emerge/CompositeTask.py | 234 +-
64 lib/_emerge/DepPriority.py | 100 +-
65 lib/_emerge/DepPriorityNormalRange.py | 86 +-
66 lib/_emerge/DepPrioritySatisfiedRange.py | 183 +-
67 lib/_emerge/Dependency.py | 38 +-
68 lib/_emerge/DependencyArg.py | 48 +-
69 lib/_emerge/EbuildBinpkg.py | 94 +-
70 lib/_emerge/EbuildBuild.py | 1142 +-
71 lib/_emerge/EbuildBuildDir.py | 295 +-
72 lib/_emerge/EbuildExecuter.py | 156 +-
73 lib/_emerge/EbuildFetcher.py | 741 +-
74 lib/_emerge/EbuildFetchonly.py | 58 +-
75 lib/_emerge/EbuildIpcDaemon.py | 164 +-
76 lib/_emerge/EbuildMerge.py | 139 +-
77 lib/_emerge/EbuildMetadataPhase.py | 431 +-
78 lib/_emerge/EbuildPhase.py | 1043 +-
79 lib/_emerge/EbuildProcess.py | 31 +-
80 lib/_emerge/EbuildSpawnProcess.py | 23 +-
81 lib/_emerge/FakeVartree.py | 621 +-
82 lib/_emerge/FifoIpcDaemon.py | 105 +-
83 lib/_emerge/JobStatusDisplay.py | 556 +-
84 lib/_emerge/MergeListItem.py | 262 +-
85 lib/_emerge/MetadataRegen.py | 292 +-
86 lib/_emerge/MiscFunctionsProcess.py | 91 +-
87 lib/_emerge/Package.py | 1869 +-
88 lib/_emerge/PackageArg.py | 18 +-
89 lib/_emerge/PackageMerge.py | 90 +-
90 lib/_emerge/PackagePhase.py | 176 +-
91 lib/_emerge/PackageUninstall.py | 274 +-
92 lib/_emerge/PackageVirtualDbapi.py | 274 +-
93 lib/_emerge/PipeReader.py | 169 +-
94 lib/_emerge/PollScheduler.py | 346 +-
95 lib/_emerge/ProgressHandler.py | 30 +-
96 lib/_emerge/RootConfig.py | 64 +-
97 lib/_emerge/Scheduler.py | 4260 ++--
98 lib/_emerge/SequentialTaskQueue.py | 157 +-
99 lib/_emerge/SetArg.py | 12 +-
100 lib/_emerge/SpawnProcess.py | 545 +-
101 lib/_emerge/SubProcess.py | 157 +-
102 lib/_emerge/Task.py | 91 +-
103 lib/_emerge/TaskSequence.py | 95 +-
104 lib/_emerge/UninstallFailure.py | 21 +-
105 lib/_emerge/UnmergeDepPriority.py | 68 +-
106 lib/_emerge/UseFlagDisplay.py | 188 +-
107 lib/_emerge/UserQuery.py | 108 +-
108 lib/_emerge/_find_deep_system_runtime_deps.py | 54 +-
109 lib/_emerge/_flush_elog_mod_echo.py | 19 +-
110 lib/_emerge/actions.py | 7077 +++---
111 lib/_emerge/chk_updated_cfg_files.py | 74 +-
112 lib/_emerge/clear_caches.py | 21 +-
113 lib/_emerge/countdown.py | 24 +-
114 lib/_emerge/create_depgraph_params.py | 398 +-
115 lib/_emerge/create_world_atom.py | 213 +-
116 lib/_emerge/depgraph.py | 21924 ++++++++++---------
117 lib/_emerge/emergelog.py | 68 +-
118 lib/_emerge/getloadavg.py | 51 +-
119 lib/_emerge/help.py | 168 +-
120 lib/_emerge/is_valid_package_atom.py | 30 +-
121 lib/_emerge/main.py | 2568 +--
122 lib/_emerge/post_emerge.py | 292 +-
123 lib/_emerge/resolver/DbapiProvidesIndex.py | 191 +-
124 lib/_emerge/resolver/backtracking.py | 548 +-
125 lib/_emerge/resolver/circular_dependency.py | 576 +-
126 lib/_emerge/resolver/output.py | 1973 +-
127 lib/_emerge/resolver/output_helpers.py | 1110 +-
128 lib/_emerge/resolver/package_tracker.py | 740 +-
129 lib/_emerge/resolver/slot_collision.py | 2466 ++-
130 lib/_emerge/search.py | 1071 +-
131 lib/_emerge/show_invalid_depstring_notice.py | 49 +-
132 lib/_emerge/stdout_spinner.py | 148 +-
133 lib/_emerge/unmerge.py | 1268 +-
134 lib/portage/__init__.py | 1200 +-
135 lib/portage/_compat_upgrade/binpkg_compression.py | 67 +-
136 .../_compat_upgrade/binpkg_multi_instance.py | 44 +-
137 lib/portage/_compat_upgrade/default_locations.py | 174 +-
138 lib/portage/_emirrordist/Config.py | 274 +-
139 lib/portage/_emirrordist/ContentDB.py | 371 +-
140 lib/portage/_emirrordist/DeletionIterator.py | 204 +-
141 lib/portage/_emirrordist/DeletionTask.py | 284 +-
142 lib/portage/_emirrordist/FetchIterator.py | 539 +-
143 lib/portage/_emirrordist/FetchTask.py | 1354 +-
144 lib/portage/_emirrordist/MirrorDistTask.py | 458 +-
145 lib/portage/_emirrordist/main.py | 882 +-
146 lib/portage/_global_updates.py | 504 +-
147 lib/portage/_legacy_globals.py | 145 +-
148 lib/portage/_selinux.py | 228 +-
149 lib/portage/_sets/ProfilePackageSet.py | 65 +-
150 lib/portage/_sets/__init__.py | 619 +-
151 lib/portage/_sets/base.py | 462 +-
152 lib/portage/_sets/dbapi.py | 1073 +-
153 lib/portage/_sets/files.py | 757 +-
154 lib/portage/_sets/libs.py | 172 +-
155 lib/portage/_sets/profiles.py | 110 +-
156 lib/portage/_sets/security.py | 151 +-
157 lib/portage/_sets/shell.py | 65 +-
158 lib/portage/binrepo/config.py | 249 +-
159 lib/portage/cache/anydbm.py | 159 +-
160 lib/portage/cache/cache_errors.py | 103 +-
161 lib/portage/cache/ebuild_xattr.py | 298 +-
162 lib/portage/cache/flat_hash.py | 263 +-
163 lib/portage/cache/fs_template.py | 130 +-
164 lib/portage/cache/index/IndexStreamIterator.py | 32 +-
165 lib/portage/cache/index/pkg_desc_index.py | 67 +-
166 lib/portage/cache/mappings.py | 780 +-
167 lib/portage/cache/metadata.py | 292 +-
168 lib/portage/cache/sql_template.py | 618 +-
169 lib/portage/cache/sqlite.py | 633 +-
170 lib/portage/cache/template.py | 674 +-
171 lib/portage/cache/volatile.py | 35 +-
172 lib/portage/checksum.py | 948 +-
173 lib/portage/const.py | 389 +-
174 lib/portage/cvstree.py | 551 +-
175 lib/portage/data.py | 564 +-
176 lib/portage/dbapi/DummyTree.py | 24 +-
177 lib/portage/dbapi/IndexedPortdb.py | 313 +-
178 lib/portage/dbapi/IndexedVardb.py | 209 +-
179 .../dbapi/_ContentsCaseSensitivityManager.py | 179 +-
180 lib/portage/dbapi/_MergeProcess.py | 465 +-
181 lib/portage/dbapi/_SyncfsProcess.py | 91 +-
182 lib/portage/dbapi/_VdbMetadataDelta.py | 333 +-
183 lib/portage/dbapi/__init__.py | 878 +-
184 lib/portage/dbapi/_expand_new_virt.py | 129 +-
185 lib/portage/dbapi/_similar_name_search.py | 96 +-
186 lib/portage/dbapi/bintree.py | 3835 ++--
187 lib/portage/dbapi/cpv_expand.py | 183 +-
188 lib/portage/dbapi/dep_expand.py | 84 +-
189 lib/portage/dbapi/porttree.py | 3230 +--
190 lib/portage/dbapi/vartree.py | 12207 ++++++-----
191 lib/portage/dbapi/virtual.py | 442 +-
192 lib/portage/debug.py | 215 +-
193 lib/portage/dep/__init__.py | 5919 ++---
194 lib/portage/dep/_dnf.py | 159 +-
195 lib/portage/dep/_slot_operator.py | 206 +-
196 lib/portage/dep/dep_check.py | 2019 +-
197 lib/portage/dep/soname/SonameAtom.py | 96 +-
198 lib/portage/dep/soname/multilib_category.py | 268 +-
199 lib/portage/dep/soname/parse.py | 68 +-
200 lib/portage/dispatch_conf.py | 706 +-
201 lib/portage/eapi.py | 466 +-
202 lib/portage/eclass_cache.py | 322 +-
203 lib/portage/elog/__init__.py | 326 +-
204 lib/portage/elog/filtering.py | 21 +-
205 lib/portage/elog/messages.py | 311 +-
206 lib/portage/elog/mod_custom.py | 25 +-
207 lib/portage/elog/mod_echo.py | 103 +-
208 lib/portage/elog/mod_mail.py | 63 +-
209 lib/portage/elog/mod_mail_summary.py | 150 +-
210 lib/portage/elog/mod_save.py | 128 +-
211 lib/portage/elog/mod_save_summary.py | 134 +-
212 lib/portage/elog/mod_syslog.py | 37 +-
213 lib/portage/emaint/defaults.py | 39 +-
214 lib/portage/emaint/main.py | 431 +-
215 lib/portage/emaint/modules/binhost/__init__.py | 26 +-
216 lib/portage/emaint/modules/binhost/binhost.py | 351 +-
217 lib/portage/emaint/modules/config/__init__.py | 26 +-
218 lib/portage/emaint/modules/config/config.py | 129 +-
219 lib/portage/emaint/modules/logs/__init__.py | 79 +-
220 lib/portage/emaint/modules/logs/logs.py | 186 +-
221 lib/portage/emaint/modules/merges/__init__.py | 69 +-
222 lib/portage/emaint/modules/merges/merges.py | 543 +-
223 lib/portage/emaint/modules/move/__init__.py | 46 +-
224 lib/portage/emaint/modules/move/move.py | 361 +-
225 lib/portage/emaint/modules/resume/__init__.py | 26 +-
226 lib/portage/emaint/modules/resume/resume.py | 97 +-
227 lib/portage/emaint/modules/sync/__init__.py | 99 +-
228 lib/portage/emaint/modules/sync/sync.py | 936 +-
229 lib/portage/emaint/modules/world/__init__.py | 26 +-
230 lib/portage/emaint/modules/world/world.py | 158 +-
231 lib/portage/env/config.py | 162 +-
232 lib/portage/env/loaders.py | 588 +-
233 lib/portage/env/validators.py | 23 +-
234 lib/portage/exception.py | 248 +-
235 lib/portage/getbinpkg.py | 1737 +-
236 lib/portage/glsa.py | 1432 +-
237 lib/portage/localization.py | 62 +-
238 lib/portage/locks.py | 1419 +-
239 lib/portage/mail.py | 238 +-
240 lib/portage/manifest.py | 1446 +-
241 lib/portage/metadata.py | 417 +-
242 lib/portage/module.py | 449 +-
243 lib/portage/news.py | 862 +-
244 lib/portage/output.py | 1589 +-
245 .../package/ebuild/_config/KeywordsManager.py | 658 +-
246 .../package/ebuild/_config/LicenseManager.py | 450 +-
247 .../package/ebuild/_config/LocationsManager.py | 752 +-
248 lib/portage/package/ebuild/_config/MaskManager.py | 585 +-
249 lib/portage/package/ebuild/_config/UseManager.py | 1297 +-
250 .../package/ebuild/_config/VirtualsManager.py | 444 +-
251 .../package/ebuild/_config/env_var_validation.py | 31 +-
252 lib/portage/package/ebuild/_config/features_set.py | 237 +-
253 lib/portage/package/ebuild/_config/helper.py | 103 +-
254 .../package/ebuild/_config/special_env_vars.py | 452 +-
255 .../package/ebuild/_config/unpack_dependencies.py | 67 +-
256 lib/portage/package/ebuild/_ipc/ExitCommand.py | 36 +-
257 lib/portage/package/ebuild/_ipc/IpcCommand.py | 7 +-
258 lib/portage/package/ebuild/_ipc/QueryCommand.py | 264 +-
259 lib/portage/package/ebuild/_metadata_invalid.py | 68 +-
260 .../ebuild/_parallel_manifest/ManifestProcess.py | 74 +-
261 .../ebuild/_parallel_manifest/ManifestScheduler.py | 146 +-
262 .../ebuild/_parallel_manifest/ManifestTask.py | 403 +-
263 lib/portage/package/ebuild/_spawn_nofetch.py | 206 +-
264 lib/portage/package/ebuild/config.py | 6259 +++---
265 .../package/ebuild/deprecated_profile_check.py | 172 +-
266 lib/portage/package/ebuild/digestcheck.py | 296 +-
267 lib/portage/package/ebuild/digestgen.py | 401 +-
268 lib/portage/package/ebuild/doebuild.py | 5730 ++---
269 lib/portage/package/ebuild/fetch.py | 3521 +--
270 lib/portage/package/ebuild/getmaskingreason.py | 205 +-
271 lib/portage/package/ebuild/getmaskingstatus.py | 333 +-
272 lib/portage/package/ebuild/prepare_build_dirs.py | 914 +-
273 lib/portage/package/ebuild/profile_iuse.py | 49 +-
274 lib/portage/process.py | 1870 +-
275 lib/portage/progress.py | 87 +-
276 lib/portage/proxy/lazyimport.py | 385 +-
277 lib/portage/proxy/objectproxy.py | 120 +-
278 lib/portage/repository/config.py | 2743 +--
279 .../repository/storage/hardlink_quarantine.py | 181 +-
280 lib/portage/repository/storage/hardlink_rcu.py | 491 +-
281 lib/portage/repository/storage/inplace.py | 61 +-
282 lib/portage/repository/storage/interface.py | 127 +-
283 lib/portage/sync/__init__.py | 57 +-
284 lib/portage/sync/config_checks.py | 131 +-
285 lib/portage/sync/controller.py | 731 +-
286 lib/portage/sync/getaddrinfo_validate.py | 37 +-
287 lib/portage/sync/modules/cvs/__init__.py | 64 +-
288 lib/portage/sync/modules/cvs/cvs.py | 116 +-
289 lib/portage/sync/modules/git/__init__.py | 115 +-
290 lib/portage/sync/modules/git/git.py | 584 +-
291 lib/portage/sync/modules/mercurial/__init__.py | 52 +-
292 lib/portage/sync/modules/mercurial/mercurial.py | 320 +-
293 lib/portage/sync/modules/rsync/__init__.py | 52 +-
294 lib/portage/sync/modules/rsync/rsync.py | 1519 +-
295 lib/portage/sync/modules/svn/__init__.py | 38 +-
296 lib/portage/sync/modules/svn/svn.py | 152 +-
297 lib/portage/sync/modules/webrsync/__init__.py | 60 +-
298 lib/portage/sync/modules/webrsync/webrsync.py | 236 +-
299 lib/portage/sync/old_tree_timestamp.py | 167 +-
300 lib/portage/sync/syncbase.py | 657 +-
301 lib/portage/tests/__init__.py | 550 +-
302 lib/portage/tests/bin/setup_env.py | 131 +-
303 lib/portage/tests/bin/test_dobin.py | 19 +-
304 lib/portage/tests/bin/test_dodir.py | 23 +-
305 lib/portage/tests/bin/test_doins.py | 636 +-
306 lib/portage/tests/bin/test_eapi7_ver_funcs.py | 460 +-
307 lib/portage/tests/bin/test_filter_bash_env.py | 70 +-
308 lib/portage/tests/dbapi/test_auxdb.py | 197 +-
309 lib/portage/tests/dbapi/test_fakedbapi.py | 163 +-
310 lib/portage/tests/dbapi/test_portdb_cache.py | 349 +-
311 lib/portage/tests/dep/testAtom.py | 997 +-
312 lib/portage/tests/dep/testCheckRequiredUse.py | 414 +-
313 lib/portage/tests/dep/testExtendedAtomDict.py | 20 +-
314 lib/portage/tests/dep/testExtractAffectingUSE.py | 158 +-
315 lib/portage/tests/dep/testStandalone.py | 66 +-
316 lib/portage/tests/dep/test_best_match_to_list.py | 168 +-
317 lib/portage/tests/dep/test_dep_getcpv.py | 56 +-
318 lib/portage/tests/dep/test_dep_getrepo.py | 40 +-
319 lib/portage/tests/dep/test_dep_getslot.py | 35 +-
320 lib/portage/tests/dep/test_dep_getusedeps.py | 47 +-
321 lib/portage/tests/dep/test_dnf_convert.py | 94 +-
322 lib/portage/tests/dep/test_get_operator.py | 53 +-
323 .../tests/dep/test_get_required_use_flags.py | 76 +-
324 lib/portage/tests/dep/test_isjustname.py | 32 +-
325 lib/portage/tests/dep/test_isvalidatom.py | 407 +-
326 lib/portage/tests/dep/test_match_from_list.py | 410 +-
327 lib/portage/tests/dep/test_overlap_dnf.py | 57 +-
328 lib/portage/tests/dep/test_paren_reduce.py | 121 +-
329 lib/portage/tests/dep/test_soname_atom_pickle.py | 20 +-
330 lib/portage/tests/dep/test_use_reduce.py | 1391 +-
331 .../tests/ebuild/test_array_fromfile_eof.py | 73 +-
332 lib/portage/tests/ebuild/test_config.py | 724 +-
333 lib/portage/tests/ebuild/test_doebuild_fd_pipes.py | 273 +-
334 lib/portage/tests/ebuild/test_doebuild_spawn.py | 176 +-
335 lib/portage/tests/ebuild/test_fetch.py | 1518 +-
336 lib/portage/tests/ebuild/test_ipc_daemon.py | 289 +-
337 lib/portage/tests/ebuild/test_shell_quote.py | 218 +-
338 lib/portage/tests/ebuild/test_spawn.py | 79 +-
339 .../tests/ebuild/test_use_expand_incremental.py | 212 +-
340 lib/portage/tests/emerge/test_config_protect.py | 451 +-
341 .../emerge/test_emerge_blocker_file_collision.py | 304 +-
342 lib/portage/tests/emerge/test_emerge_slot_abi.py | 335 +-
343 lib/portage/tests/emerge/test_global_updates.py | 33 +-
344 lib/portage/tests/emerge/test_simple.py | 1054 +-
345 .../tests/env/config/test_PackageKeywordsFile.py | 59 +-
346 .../tests/env/config/test_PackageMaskFile.py | 34 +-
347 .../tests/env/config/test_PackageUseFile.py | 44 +-
348 .../tests/env/config/test_PortageModulesFile.py | 57 +-
349 lib/portage/tests/glsa/test_security_set.py | 174 +-
350 lib/portage/tests/lafilefixer/test_lafilefixer.py | 187 +-
351 .../test_lazy_import_portage_baseline.py | 135 +-
352 .../lazyimport/test_preload_portage_submodules.py | 18 +-
353 lib/portage/tests/lint/metadata.py | 9 +-
354 lib/portage/tests/lint/test_bash_syntax.py | 81 +-
355 lib/portage/tests/lint/test_compile_modules.py | 103 +-
356 lib/portage/tests/lint/test_import_modules.py | 59 +-
357 lib/portage/tests/locks/test_asynchronous_lock.py | 340 +-
358 lib/portage/tests/locks/test_lock_nonblock.py | 120 +-
359 lib/portage/tests/news/test_NewsItem.py | 121 +-
360 lib/portage/tests/process/test_AsyncFunction.py | 91 +-
361 lib/portage/tests/process/test_PipeLogger.py | 106 +-
362 lib/portage/tests/process/test_PopenProcess.py | 156 +-
363 .../tests/process/test_PopenProcessBlockingIO.py | 103 +-
364 lib/portage/tests/process/test_poll.py | 182 +-
365 lib/portage/tests/process/test_unshare_net.py | 38 +-
366 lib/portage/tests/resolver/ResolverPlayground.py | 1950 +-
367 .../test_build_id_profile_format.py | 271 +-
368 .../binpkg_multi_instance/test_rebuilt_binaries.py | 193 +-
369 .../tests/resolver/soname/test_autounmask.py | 176 +-
370 lib/portage/tests/resolver/soname/test_depclean.py | 106 +-
371 .../tests/resolver/soname/test_downgrade.py | 463 +-
372 .../tests/resolver/soname/test_or_choices.py | 166 +-
373 .../tests/resolver/soname/test_reinstall.py | 150 +-
374 .../tests/resolver/soname/test_skip_update.py | 148 +-
375 .../soname/test_slot_conflict_reinstall.py | 671 +-
376 .../resolver/soname/test_slot_conflict_update.py | 203 +-
377 .../tests/resolver/soname/test_soname_provided.py | 131 +-
378 .../tests/resolver/soname/test_unsatisfiable.py | 117 +-
379 .../tests/resolver/soname/test_unsatisfied.py | 148 +-
380 .../test_aggressive_backtrack_downgrade.py | 153 +-
381 lib/portage/tests/resolver/test_autounmask.py | 1346 +-
382 .../tests/resolver/test_autounmask_binpkg_use.py | 115 +-
383 .../resolver/test_autounmask_keep_keywords.py | 122 +-
384 .../tests/resolver/test_autounmask_multilib_use.py | 147 +-
385 .../tests/resolver/test_autounmask_parent.py | 65 +-
386 .../resolver/test_autounmask_use_backtrack.py | 146 +-
387 .../tests/resolver/test_autounmask_use_breakage.py | 174 +-
388 .../resolver/test_autounmask_use_slot_conflict.py | 76 +-
389 lib/portage/tests/resolver/test_backtracking.py | 360 +-
390 lib/portage/tests/resolver/test_bdeps.py | 399 +-
391 .../resolver/test_binary_pkg_ebuild_visibility.py | 262 +-
392 lib/portage/tests/resolver/test_blocker.py | 240 +-
393 lib/portage/tests/resolver/test_changed_deps.py | 213 +-
394 .../tests/resolver/test_circular_choices.py | 401 +-
395 .../tests/resolver/test_circular_choices_rust.py | 160 +-
396 .../tests/resolver/test_circular_dependencies.py | 197 +-
397 lib/portage/tests/resolver/test_complete_graph.py | 291 +-
398 ...test_complete_if_new_subslot_without_revbump.py | 120 +-
399 lib/portage/tests/resolver/test_depclean.py | 552 +-
400 lib/portage/tests/resolver/test_depclean_order.py | 105 +-
401 .../resolver/test_depclean_slot_unavailable.py | 127 +-
402 lib/portage/tests/resolver/test_depth.py | 586 +-
403 .../resolver/test_disjunctive_depend_order.py | 145 +-
404 lib/portage/tests/resolver/test_eapi.py | 298 +-
405 .../tests/resolver/test_features_test_use.py | 146 +-
406 .../resolver/test_imagemagick_graphicsmagick.py | 183 +-
407 lib/portage/tests/resolver/test_keywords.py | 664 +-
408 lib/portage/tests/resolver/test_merge_order.py | 1237 +-
409 .../test_missing_iuse_and_evaluated_atoms.py | 53 +-
410 lib/portage/tests/resolver/test_multirepo.py | 779 +-
411 lib/portage/tests/resolver/test_multislot.py | 99 +-
412 .../tests/resolver/test_old_dep_chain_display.py | 67 +-
413 lib/portage/tests/resolver/test_onlydeps.py | 57 +-
414 .../tests/resolver/test_onlydeps_circular.py | 87 +-
415 .../tests/resolver/test_onlydeps_minimal.py | 83 +-
416 lib/portage/tests/resolver/test_or_choices.py | 1474 +-
417 .../tests/resolver/test_or_downgrade_installed.py | 152 +-
418 .../tests/resolver/test_or_upgrade_installed.py | 418 +-
419 lib/portage/tests/resolver/test_output.py | 183 +-
420 lib/portage/tests/resolver/test_package_tracker.py | 509 +-
421 .../tests/resolver/test_profile_default_eapi.py | 214 +-
422 .../tests/resolver/test_profile_package_set.py | 217 +-
423 lib/portage/tests/resolver/test_rebuild.py | 324 +-
424 .../test_regular_slot_change_without_revbump.py | 104 +-
425 lib/portage/tests/resolver/test_required_use.py | 435 +-
426 .../resolver/test_runtime_cycle_merge_order.py | 127 +-
427 lib/portage/tests/resolver/test_simple.py | 144 +-
428 lib/portage/tests/resolver/test_slot_abi.py | 907 +-
429 .../tests/resolver/test_slot_abi_downgrade.py | 425 +-
430 .../resolver/test_slot_change_without_revbump.py | 150 +-
431 lib/portage/tests/resolver/test_slot_collisions.py | 606 +-
432 .../resolver/test_slot_conflict_force_rebuild.py | 125 +-
433 .../resolver/test_slot_conflict_mask_update.py | 63 +-
434 .../tests/resolver/test_slot_conflict_rebuild.py | 937 +-
435 .../test_slot_conflict_unsatisfied_deep_deps.py | 351 +-
436 .../tests/resolver/test_slot_conflict_update.py | 156 +-
437 .../resolver/test_slot_conflict_update_virt.py | 129 +-
438 .../resolver/test_slot_operator_autounmask.py | 232 +-
439 .../tests/resolver/test_slot_operator_bdeps.py | 395 +-
440 .../resolver/test_slot_operator_complete_graph.py | 250 +-
441 .../resolver/test_slot_operator_exclusive_slots.py | 266 +-
442 .../resolver/test_slot_operator_missed_update.py | 196 +-
443 .../tests/resolver/test_slot_operator_rebuild.py | 196 +-
444 .../resolver/test_slot_operator_required_use.py | 114 +-
445 .../resolver/test_slot_operator_reverse_deps.py | 540 +-
446 .../test_slot_operator_runtime_pkg_mask.py | 240 +-
447 .../resolver/test_slot_operator_unsatisfied.py | 121 +-
448 .../tests/resolver/test_slot_operator_unsolved.py | 147 +-
449 ..._slot_operator_update_probe_parent_downgrade.py | 112 +-
450 .../test_solve_non_slot_operator_slot_conflicts.py | 113 +-
451 lib/portage/tests/resolver/test_targetroot.py | 178 +-
452 .../tests/resolver/test_unecessary_slot_upgrade.py | 62 +
453 lib/portage/tests/resolver/test_unmerge_order.py | 394 +-
454 .../tests/resolver/test_use_dep_defaults.py | 80 +-
455 lib/portage/tests/resolver/test_useflags.py | 214 +-
456 .../resolver/test_virtual_minimize_children.py | 548 +-
457 lib/portage/tests/resolver/test_virtual_slot.py | 458 +-
458 lib/portage/tests/resolver/test_with_test_deps.py | 150 +-
459 lib/portage/tests/runTests.py | 36 +-
460 .../tests/sets/base/testInternalPackageSet.py | 67 +-
461 lib/portage/tests/sets/files/testConfigFileSet.py | 37 +-
462 lib/portage/tests/sets/files/testStaticFileSet.py | 27 +-
463 lib/portage/tests/sets/shell/testShell.py | 31 +-
464 lib/portage/tests/sync/test_sync_local.py | 836 +-
465 lib/portage/tests/unicode/test_string_format.py | 78 +-
466 lib/portage/tests/update/test_move_ent.py | 188 +-
467 lib/portage/tests/update/test_move_slot_ent.py | 259 +-
468 lib/portage/tests/update/test_update_dbentry.py | 560 +-
469 .../tests/util/dyn_libs/test_soname_deps.py | 37 +-
470 .../tests/util/eventloop/test_call_soon_fifo.py | 28 +-
471 lib/portage/tests/util/file_copy/test_copyfile.py | 98 +-
472 .../util/futures/asyncio/test_child_watcher.py | 78 +-
473 .../futures/asyncio/test_event_loop_in_fork.py | 68 +-
474 .../tests/util/futures/asyncio/test_pipe_closed.py | 251 +-
475 .../asyncio/test_policy_wrapper_recursion.py | 22 +-
476 .../futures/asyncio/test_run_until_complete.py | 44 +-
477 .../util/futures/asyncio/test_subprocess_exec.py | 356 +-
478 .../util/futures/asyncio/test_wakeup_fd_sigchld.py | 62 +-
479 .../tests/util/futures/test_compat_coroutine.py | 392 +-
480 .../tests/util/futures/test_done_callback.py | 41 +-
481 .../util/futures/test_done_callback_after_exit.py | 69 +-
482 .../tests/util/futures/test_iter_completed.py | 141 +-
483 lib/portage/tests/util/futures/test_retry.py | 462 +-
484 lib/portage/tests/util/test_checksum.py | 236 +-
485 lib/portage/tests/util/test_digraph.py | 495 +-
486 lib/portage/tests/util/test_file_copier.py | 63 +-
487 lib/portage/tests/util/test_getconfig.py | 111 +-
488 lib/portage/tests/util/test_grabdict.py | 9 +-
489 lib/portage/tests/util/test_install_mask.py | 315 +-
490 lib/portage/tests/util/test_normalizedPath.py | 11 +-
491 lib/portage/tests/util/test_shelve.py | 90 +-
492 lib/portage/tests/util/test_socks5.py | 314 +-
493 lib/portage/tests/util/test_stackDictList.py | 28 +-
494 lib/portage/tests/util/test_stackDicts.py | 41 +-
495 lib/portage/tests/util/test_stackLists.py | 22 +-
496 lib/portage/tests/util/test_uniqueArray.py | 33 +-
497 lib/portage/tests/util/test_varExpand.py | 182 +-
498 lib/portage/tests/util/test_whirlpool.py | 17 +-
499 lib/portage/tests/util/test_xattr.py | 278 +-
500 lib/portage/tests/versions/test_cpv_sort_key.py | 18 +-
501 lib/portage/tests/versions/test_vercmp.py | 151 +-
502 lib/portage/tests/xpak/test_decodeint.py | 12 +-
503 lib/portage/update.py | 805 +-
504 lib/portage/util/ExtractKernelVersion.py | 135 +-
505 lib/portage/util/SlotObject.py | 101 +-
506 lib/portage/util/__init__.py | 3605 +--
507 lib/portage/util/_async/AsyncFunction.py | 116 +-
508 lib/portage/util/_async/AsyncScheduler.py | 194 +-
509 lib/portage/util/_async/AsyncTaskFuture.py | 48 +-
510 lib/portage/util/_async/BuildLogger.py | 201 +-
511 lib/portage/util/_async/FileCopier.py | 32 +-
512 lib/portage/util/_async/FileDigester.py | 140 +-
513 lib/portage/util/_async/ForkProcess.py | 295 +-
514 lib/portage/util/_async/PipeLogger.py | 359 +-
515 lib/portage/util/_async/PipeReaderBlockingIO.py | 130 +-
516 lib/portage/util/_async/PopenProcess.py | 56 +-
517 lib/portage/util/_async/SchedulerInterface.py | 233 +-
518 lib/portage/util/_async/TaskScheduler.py | 23 +-
519 lib/portage/util/_async/run_main_scheduler.py | 66 +-
520 lib/portage/util/_compare_files.py | 179 +-
521 lib/portage/util/_ctypes.py | 65 +-
522 lib/portage/util/_desktop_entry.py | 117 +-
523 lib/portage/util/_dyn_libs/LinkageMapELF.py | 1855 +-
524 lib/portage/util/_dyn_libs/NeededEntry.py | 129 +-
525 .../util/_dyn_libs/PreservedLibsRegistry.py | 461 +-
526 .../util/_dyn_libs/display_preserved_libs.py | 166 +-
527 lib/portage/util/_dyn_libs/dyn_libs.py | 28 +
528 lib/portage/util/_dyn_libs/soname_deps.py | 310 +-
529 lib/portage/util/_dyn_libs/soname_deps_qa.py | 161 +-
530 lib/portage/util/_eventloop/asyncio_event_loop.py | 264 +-
531 lib/portage/util/_eventloop/global_event_loop.py | 2 +-
532 lib/portage/util/_get_vm_info.py | 140 +-
533 lib/portage/util/_info_files.py | 237 +-
534 lib/portage/util/_path.py | 34 +-
535 lib/portage/util/_pty.py | 98 +-
536 lib/portage/util/_urlopen.py | 157 +-
537 lib/portage/util/_xattr.py | 349 +-
538 lib/portage/util/backoff.py | 80 +-
539 lib/portage/util/bin_entry_point.py | 44 +-
540 lib/portage/util/changelog.py | 103 +-
541 lib/portage/util/compression_probe.py | 191 +-
542 lib/portage/util/configparser.py | 106 +-
543 lib/portage/util/cpuinfo.py | 43 +-
544 lib/portage/util/digraph.py | 747 +-
545 lib/portage/util/elf/constants.py | 91 +-
546 lib/portage/util/elf/header.py | 116 +-
547 lib/portage/util/endian/decode.py | 70 +-
548 lib/portage/util/env_update.py | 801 +-
549 lib/portage/util/file_copy/__init__.py | 37 +-
550 lib/portage/util/formatter.py | 95 +-
551 lib/portage/util/futures/__init__.py | 4 +-
552 lib/portage/util/futures/_asyncio/__init__.py | 486 +-
553 lib/portage/util/futures/_asyncio/streams.py | 139 +-
554 lib/portage/util/futures/_sync_decorator.py | 75 +-
555 lib/portage/util/futures/compat_coroutine.py | 220 +-
556 lib/portage/util/futures/executor/fork.py | 228 +-
557 lib/portage/util/futures/extendedfutures.py | 115 +-
558 lib/portage/util/futures/futures.py | 16 +-
559 lib/portage/util/futures/iter_completed.py | 342 +-
560 lib/portage/util/futures/retry.py | 377 +-
561 lib/portage/util/futures/unix_events.py | 95 +-
562 lib/portage/util/hooks.py | 52 +
563 lib/portage/util/install_mask.py | 340 +-
564 lib/portage/util/iterators/MultiIterGroupBy.py | 164 +-
565 lib/portage/util/lafilefixer.py | 331 +-
566 lib/portage/util/listdir.py | 251 +-
567 lib/portage/util/locale.py | 240 +-
568 lib/portage/util/movefile.py | 665 +-
569 lib/portage/util/mtimedb.py | 221 +-
570 lib/portage/util/netlink.py | 139 +-
571 lib/portage/util/path.py | 82 +-
572 lib/portage/util/shelve.py | 86 +-
573 lib/portage/util/socks5.py | 199 +-
574 lib/portage/util/whirlpool.py | 2736 ++-
575 lib/portage/util/writeable_check.py | 199 +-
576 lib/portage/versions.py | 1100 +-
577 lib/portage/xml/metadata.py | 842 +-
578 lib/portage/xpak.py | 943 +-
579 man/color.map.5 | 6 +-
580 man/dispatch-conf.1 | 5 +
581 man/emerge.1 | 9 +-
582 man/make.conf.5 | 15 +-
583 repoman/bin/repoman | 46 +-
584 repoman/lib/repoman/__init__.py | 153 +-
585 repoman/lib/repoman/_portage.py | 7 +-
586 repoman/lib/repoman/_subprocess.py | 89 +-
587 repoman/lib/repoman/actions.py | 1467 +-
588 repoman/lib/repoman/argparser.py | 621 +-
589 repoman/lib/repoman/check_missingslot.py | 43 +-
590 repoman/lib/repoman/config.py | 284 +-
591 repoman/lib/repoman/copyrights.py | 221 +-
592 repoman/lib/repoman/errors.py | 17 +-
593 repoman/lib/repoman/gpg.py | 106 +-
594 repoman/lib/repoman/main.py | 369 +-
595 repoman/lib/repoman/metadata.py | 130 +-
596 repoman/lib/repoman/modules/commit/manifest.py | 189 +-
597 repoman/lib/repoman/modules/commit/repochecks.py | 54 +-
598 .../modules/linechecks/assignment/__init__.py | 34 +-
599 .../modules/linechecks/assignment/assignment.py | 40 +-
600 repoman/lib/repoman/modules/linechecks/base.py | 177 +-
601 repoman/lib/repoman/modules/linechecks/config.py | 195 +-
602 .../lib/repoman/modules/linechecks/controller.py | 277 +-
603 .../repoman/modules/linechecks/depend/__init__.py | 22 +-
604 .../repoman/modules/linechecks/depend/implicit.py | 63 +-
605 .../modules/linechecks/deprecated/__init__.py | 70 +-
606 .../modules/linechecks/deprecated/deprecated.py | 39 +-
607 .../modules/linechecks/deprecated/inherit.py | 113 +-
608 .../lib/repoman/modules/linechecks/do/__init__.py | 22 +-
609 repoman/lib/repoman/modules/linechecks/do/dosym.py | 24 +-
610 .../repoman/modules/linechecks/eapi/__init__.py | 82 +-
611 .../lib/repoman/modules/linechecks/eapi/checks.py | 100 +-
612 .../repoman/modules/linechecks/eapi/definition.py | 55 +-
613 .../repoman/modules/linechecks/emake/__init__.py | 34 +-
614 .../lib/repoman/modules/linechecks/emake/emake.py | 30 +-
615 .../modules/linechecks/gentoo_header/__init__.py | 22 +-
616 .../modules/linechecks/gentoo_header/header.py | 95 +-
617 .../repoman/modules/linechecks/helpers/__init__.py | 22 +-
618 .../repoman/modules/linechecks/helpers/offset.py | 31 +-
619 .../repoman/modules/linechecks/nested/__init__.py | 22 +-
620 .../repoman/modules/linechecks/nested/nested.py | 13 +-
621 .../repoman/modules/linechecks/nested/nesteddie.py | 14 +-
622 .../repoman/modules/linechecks/patches/__init__.py | 22 +-
623 .../repoman/modules/linechecks/patches/patches.py | 29 +-
624 .../repoman/modules/linechecks/phases/__init__.py | 58 +-
625 .../lib/repoman/modules/linechecks/phases/phase.py | 305 +-
626 .../repoman/modules/linechecks/portage/__init__.py | 34 +-
627 .../repoman/modules/linechecks/portage/internal.py | 44 +-
628 .../repoman/modules/linechecks/quotes/__init__.py | 34 +-
629 .../repoman/modules/linechecks/quotes/quoteda.py | 15 +-
630 .../repoman/modules/linechecks/quotes/quotes.py | 147 +-
631 .../lib/repoman/modules/linechecks/uri/__init__.py | 22 +-
632 repoman/lib/repoman/modules/linechecks/uri/uri.py | 50 +-
633 .../lib/repoman/modules/linechecks/use/__init__.py | 22 +-
634 .../repoman/modules/linechecks/use/builtwith.py | 9 +-
635 .../repoman/modules/linechecks/useless/__init__.py | 34 +-
636 .../lib/repoman/modules/linechecks/useless/cd.py | 32 +-
637 .../repoman/modules/linechecks/useless/dodoc.py | 19 +-
638 .../modules/linechecks/whitespace/__init__.py | 34 +-
639 .../repoman/modules/linechecks/whitespace/blank.py | 31 +-
640 .../modules/linechecks/whitespace/whitespace.py | 23 +-
641 .../modules/linechecks/workaround/__init__.py | 22 +-
642 .../modules/linechecks/workaround/workarounds.py | 12 +-
643 .../lib/repoman/modules/scan/depend/__init__.py | 57 +-
644 .../repoman/modules/scan/depend/_depend_checks.py | 448 +-
645 .../lib/repoman/modules/scan/depend/_gen_arches.py | 112 +-
646 repoman/lib/repoman/modules/scan/depend/profile.py | 748 +-
647 .../repoman/modules/scan/directories/__init__.py | 83 +-
648 .../lib/repoman/modules/scan/directories/files.py | 155 +-
649 .../lib/repoman/modules/scan/directories/mtime.py | 46 +-
650 repoman/lib/repoman/modules/scan/eapi/__init__.py | 38 +-
651 repoman/lib/repoman/modules/scan/eapi/eapi.py | 87 +-
652 .../lib/repoman/modules/scan/ebuild/__init__.py | 106 +-
653 repoman/lib/repoman/modules/scan/ebuild/ebuild.py | 464 +-
654 .../lib/repoman/modules/scan/ebuild/multicheck.py | 100 +-
655 .../lib/repoman/modules/scan/eclasses/__init__.py | 78 +-
656 repoman/lib/repoman/modules/scan/eclasses/live.py | 119 +-
657 repoman/lib/repoman/modules/scan/eclasses/ruby.py | 86 +-
658 repoman/lib/repoman/modules/scan/fetch/__init__.py | 51 +-
659 repoman/lib/repoman/modules/scan/fetch/fetches.py | 369 +-
660 .../lib/repoman/modules/scan/keywords/__init__.py | 51 +-
661 .../lib/repoman/modules/scan/keywords/keywords.py | 331 +-
662 .../lib/repoman/modules/scan/manifest/__init__.py | 45 +-
663 .../lib/repoman/modules/scan/manifest/manifests.py | 92 +-
664 .../lib/repoman/modules/scan/metadata/__init__.py | 158 +-
665 .../repoman/modules/scan/metadata/description.py | 73 +-
666 .../modules/scan/metadata/ebuild_metadata.py | 128 +-
667 .../repoman/modules/scan/metadata/pkgmetadata.py | 374 +-
668 .../lib/repoman/modules/scan/metadata/restrict.py | 96 +-
669 .../lib/repoman/modules/scan/metadata/use_flags.py | 155 +-
670 repoman/lib/repoman/modules/scan/module.py | 173 +-
671 .../lib/repoman/modules/scan/options/__init__.py | 37 +-
672 .../lib/repoman/modules/scan/options/options.py | 48 +-
673 repoman/lib/repoman/modules/scan/scan.py | 99 +-
674 repoman/lib/repoman/modules/scan/scanbase.py | 106 +-
675 repoman/lib/repoman/modules/vcs/None/__init__.py | 46 +-
676 repoman/lib/repoman/modules/vcs/None/changes.py | 88 +-
677 repoman/lib/repoman/modules/vcs/None/status.py | 96 +-
678 repoman/lib/repoman/modules/vcs/__init__.py | 5 +-
679 repoman/lib/repoman/modules/vcs/bzr/__init__.py | 46 +-
680 repoman/lib/repoman/modules/vcs/bzr/changes.py | 115 +-
681 repoman/lib/repoman/modules/vcs/bzr/status.py | 104 +-
682 repoman/lib/repoman/modules/vcs/changes.py | 311 +-
683 repoman/lib/repoman/modules/vcs/cvs/__init__.py | 46 +-
684 repoman/lib/repoman/modules/vcs/cvs/changes.py | 218 +-
685 repoman/lib/repoman/modules/vcs/cvs/status.py | 239 +-
686 repoman/lib/repoman/modules/vcs/git/__init__.py | 46 +-
687 repoman/lib/repoman/modules/vcs/git/changes.py | 252 +-
688 repoman/lib/repoman/modules/vcs/git/status.py | 115 +-
689 repoman/lib/repoman/modules/vcs/hg/__init__.py | 46 +-
690 repoman/lib/repoman/modules/vcs/hg/changes.py | 193 +-
691 repoman/lib/repoman/modules/vcs/hg/status.py | 95 +-
692 repoman/lib/repoman/modules/vcs/settings.py | 176 +-
693 repoman/lib/repoman/modules/vcs/svn/__init__.py | 46 +-
694 repoman/lib/repoman/modules/vcs/svn/changes.py | 266 +-
695 repoman/lib/repoman/modules/vcs/svn/status.py | 268 +-
696 repoman/lib/repoman/modules/vcs/vcs.py | 259 +-
697 repoman/lib/repoman/profile.py | 133 +-
698 repoman/lib/repoman/qa_data.py | 374 +-
699 repoman/lib/repoman/qa_tracker.py | 81 +-
700 repoman/lib/repoman/repos.py | 624 +-
701 repoman/lib/repoman/scanner.py | 852 +-
702 repoman/lib/repoman/tests/__init__.py | 539 +-
703 .../lib/repoman/tests/changelog/test_echangelog.py | 251 +-
704 repoman/lib/repoman/tests/commit/test_commitmsg.py | 156 +-
705 repoman/lib/repoman/tests/runTests.py | 36 +-
706 repoman/lib/repoman/tests/simple/test_simple.py | 903 +-
707 repoman/lib/repoman/utilities.py | 1019 +-
708 repoman/runtests | 258 +-
709 repoman/setup.py | 768 +-
710 runtests | 252 +-
711 setup.py | 1427 +-
712 src/portage_util_file_copy_reflink_linux.c | 7 +-
713 tabcheck.py | 3 +-
714 tox.ini | 18 +-
715 703 files changed, 142061 insertions(+), 124529 deletions(-)
716
717 diff --cc README.md
718 index d9e004269,e75b430c6..2c17d09b2
719 --- a/README.md
720 +++ b/README.md
721 @@@ -1,20 -1,41 +1,46 @@@
722 -[![CI](https://github.com/gentoo/portage/actions/workflows/ci.yml/badge.svg)](https://github.com/gentoo/portage/actions/workflows/ci.yml)
723 -
724 + About Portage
725 + =============
726 +
727 + Portage is a package management system based on ports collections. The
728 + Package Manager Specification Project (PMS) standardises and documents
729 + the behaviour of Portage so that ebuild repositories can be used by
730 + other package managers.
731 +
732 +This is the prefix branch of portage, a branch that deals with portage
733 +setup as packagemanager for a given offset in the filesystem running
734 +with user privileges.
735 +
736 +If you are not looking for something Gentoo Prefix like, then this
737 +is not the right place.
738 +
739 + Contributing
740 + ============
741
742 - =======
743 - About Portage
744 - =============
745 + Contributions are always welcome! We've started using
746 + [black](https://pypi.org/project/black/) to format the code base. Please make
747 + sure you run it against any PR's prior to submitting (otherwise we'll probably
748 + reject it).
749
750 - Portage is a package management system based on ports collections. The
751 - Package Manager Specification Project (PMS) standardises and documents
752 - the behaviour of Portage so that ebuild repositories can be used by
753 - other package managers.
754 + There are [ways to
755 + integrate](https://black.readthedocs.io/en/stable/integrations/editors.html)
756 + black into your text editor and/or IDE.
757
758 + You can also set up a git hook to check your commits, in case you don't want
759 + editor integration. Something like this:
760 +
761 + ```sh
762 + # .git/hooks/pre-commit (don't forget to chmod +x)
763 +
764 + #!/bin/bash
765 + black --check --diff .
766 + ```
767 +
768 + To ignore commit 1bb64ff452 - which is a massive commit that simply formatted
769 + the code base using black - you can do the following:
770 +
771 + ```sh
772 + git config blame.ignoreRevsFile .gitignorerevs
773 + ```
774
775 Dependencies
776 ============
777 diff --cc bin/save-ebuild-env.sh
778 index 7d7235f23,98808814b..b3d4c7363
779 mode 100755,100644..100755
780 --- a/bin/save-ebuild-env.sh
781 +++ b/bin/save-ebuild-env.sh
782 @@@ -1,5 -1,5 +1,5 @@@
783 -#!/bin/bash
784 +#!@PORTAGE_BASH@
785 - # Copyright 1999-2018 Gentoo Foundation
786 + # Copyright 1999-2021 Gentoo Authors
787 # Distributed under the terms of the GNU General Public License v2
788
789 # @FUNCTION: __save_ebuild_env
790 @@@ -89,16 -89,12 +89,16 @@@ __save_ebuild_env()
791 ___eapi_has_package_manager_build_user && unset -f package_manager_build_user
792 ___eapi_has_package_manager_build_group && unset -f package_manager_build_group
793
794 - # Clear out the triple underscore namespace as it is reserved by the PM.
795 - unset -f $(compgen -A function ___)
796 - unset ${!___*}
797 + # BEGIN PREFIX LOCAL: compgen is not compiled in during bootstrap
798 + if type compgen >& /dev/null ; then
799 + # Clear out the triple underscore namespace as it is reserved by the PM.
800 + unset -f $(compgen -A function ___)
801 + unset ${!___*}
802 + fi
803 + # END PREFIX LOCAL
804
805 # portage config variables and variables set directly by portage
806 - unset ACCEPT_LICENSE BAD BRACKET BUILD_PREFIX COLS \
807 + unset ACCEPT_LICENSE BUILD_PREFIX COLS \
808 DISTDIR DOC_SYMLINKS_DIR \
809 EBUILD_FORCE_TEST EBUILD_MASTER_PID \
810 ECLASS_DEPTH ENDCOL FAKEROOTKEY \
811 diff --cc lib/_emerge/EbuildPhase.py
812 index 8eaa57497,12326fffd..d07cff7bd
813 --- a/lib/_emerge/EbuildPhase.py
814 +++ b/lib/_emerge/EbuildPhase.py
815 @@@ -26,29 -29,29 +29,31 @@@ from portage.util._async.AsyncTaskFutur
816 from portage.util._async.BuildLogger import BuildLogger
817 from portage.util.futures import asyncio
818 from portage.util.futures.executor.fork import ForkExecutor
819 +# PREFIX LOCAL
820 +from portage.const import EPREFIX
821
822 try:
823 - from portage.xml.metadata import MetaDataXML
824 + from portage.xml.metadata import MetaDataXML
825 except (SystemExit, KeyboardInterrupt):
826 - raise
827 + raise
828 except (ImportError, SystemError, RuntimeError, Exception):
829 - # broken or missing xml support
830 - # https://bugs.python.org/issue14988
831 - MetaDataXML = None
832 + # broken or missing xml support
833 + # https://bugs.python.org/issue14988
834 + MetaDataXML = None
835
836 import portage
837 - portage.proxy.lazyimport.lazyimport(globals(),
838 - 'portage.elog:messages@elog_messages',
839 - 'portage.package.ebuild.doebuild:_check_build_log,' + \
840 - '_post_phase_cmds,_post_phase_userpriv_perms,' + \
841 - '_post_phase_emptydir_cleanup,' +
842 - '_post_src_install_soname_symlinks,' + \
843 - '_post_src_install_uid_fix,_postinst_bsdflags,' + \
844 - '_post_src_install_write_metadata,' + \
845 - '_preinst_bsdflags',
846 - 'portage.util.futures.unix_events:_set_nonblocking',
847 +
848 + portage.proxy.lazyimport.lazyimport(
849 + globals(),
850 + "portage.elog:messages@elog_messages",
851 + "portage.package.ebuild.doebuild:_check_build_log,"
852 + + "_post_phase_cmds,_post_phase_userpriv_perms,"
853 + + "_post_phase_emptydir_cleanup,"
854 + + "_post_src_install_soname_symlinks,"
855 + + "_post_src_install_uid_fix,_postinst_bsdflags,"
856 + + "_post_src_install_write_metadata,"
857 + + "_preinst_bsdflags",
858 + "portage.util.futures.unix_events:_set_nonblocking",
859 )
860 from portage import os
861 from portage import _encodings
862 @@@ -450,84 -521,93 +523,110 @@@ class EbuildPhase(CompositeTask)
863
864 class _PostPhaseCommands(CompositeTask):
865
866 - __slots__ = ("commands", "elog", "fd_pipes", "logfile", "phase", "settings")
867 -
868 - def _start(self):
869 - if isinstance(self.commands, list):
870 - cmds = [({}, self.commands)]
871 - else:
872 - cmds = list(self.commands)
873 -
874 - if 'selinux' not in self.settings.features:
875 - cmds = [(kwargs, commands) for kwargs, commands in
876 - cmds if not kwargs.get('selinux_only')]
877 -
878 - tasks = TaskSequence()
879 - for kwargs, commands in cmds:
880 - # Select args intended for MiscFunctionsProcess.
881 - kwargs = dict((k, v) for k, v in kwargs.items()
882 - if k in ('ld_preload_sandbox',))
883 - tasks.add(MiscFunctionsProcess(background=self.background,
884 - commands=commands, fd_pipes=self.fd_pipes,
885 - logfile=self.logfile, phase=self.phase,
886 - scheduler=self.scheduler, settings=self.settings, **kwargs))
887 -
888 - self._start_task(tasks, self._commands_exit)
889 -
890 - def _commands_exit(self, task):
891 -
892 - if self._default_exit(task) != os.EX_OK:
893 - self._async_wait()
894 - return
895 -
896 - if self.phase == 'install':
897 - out = io.StringIO()
898 - _post_src_install_soname_symlinks(self.settings, out)
899 - msg = out.getvalue()
900 - if msg:
901 - self.scheduler.output(msg, log_path=self.settings.get("PORTAGE_LOG_FILE"))
902 -
903 - if 'qa-unresolved-soname-deps' in self.settings.features:
904 - # This operates on REQUIRES metadata generated by the above function call.
905 - future = asyncio.ensure_future(self._soname_deps_qa(), loop=self.scheduler)
906 - # If an unexpected exception occurs, then this will raise it.
907 - future.add_done_callback(lambda future: future.cancelled() or future.result())
908 - self._start_task(AsyncTaskFuture(future=future), self._default_final_exit)
909 - else:
910 - self._default_final_exit(task)
911 - else:
912 - self._default_final_exit(task)
913 -
914 - async def _soname_deps_qa(self):
915 -
916 - vardb = QueryCommand.get_db()[self.settings['EROOT']]['vartree'].dbapi
917 -
918 - all_provides = (await self.scheduler.run_in_executor(ForkExecutor(loop=self.scheduler), _get_all_provides, vardb))
919 -
920 - unresolved = _get_unresolved_soname_deps(os.path.join(self.settings['PORTAGE_BUILDDIR'], 'build-info'), all_provides)
921 + __slots__ = ("commands", "elog", "fd_pipes", "logfile", "phase", "settings")
922 +
923 + def _start(self):
924 + if isinstance(self.commands, list):
925 + cmds = [({}, self.commands)]
926 + else:
927 + cmds = list(self.commands)
928 +
929 + if "selinux" not in self.settings.features:
930 + cmds = [
931 + (kwargs, commands)
932 + for kwargs, commands in cmds
933 + if not kwargs.get("selinux_only")
934 + ]
935 +
936 + tasks = TaskSequence()
937 + for kwargs, commands in cmds:
938 + # Select args intended for MiscFunctionsProcess.
939 + kwargs = dict(
940 + (k, v) for k, v in kwargs.items() if k in ("ld_preload_sandbox",)
941 + )
942 + tasks.add(
943 + MiscFunctionsProcess(
944 + background=self.background,
945 + commands=commands,
946 + fd_pipes=self.fd_pipes,
947 + logfile=self.logfile,
948 + phase=self.phase,
949 + scheduler=self.scheduler,
950 + settings=self.settings,
951 + **kwargs
952 + )
953 + )
954 +
955 + self._start_task(tasks, self._commands_exit)
956 +
957 + def _commands_exit(self, task):
958 +
959 + if self._default_exit(task) != os.EX_OK:
960 + self._async_wait()
961 + return
962 +
963 + if self.phase == "install":
964 + out = io.StringIO()
965 + _post_src_install_soname_symlinks(self.settings, out)
966 + msg = out.getvalue()
967 + if msg:
968 + self.scheduler.output(
969 + msg, log_path=self.settings.get("PORTAGE_LOG_FILE")
970 + )
971 +
972 + if "qa-unresolved-soname-deps" in self.settings.features:
973 + # This operates on REQUIRES metadata generated by the above function call.
974 + future = asyncio.ensure_future(
975 + self._soname_deps_qa(), loop=self.scheduler
976 + )
977 + # If an unexpected exception occurs, then this will raise it.
978 + future.add_done_callback(
979 + lambda future: future.cancelled() or future.result()
980 + )
981 + self._start_task(
982 + AsyncTaskFuture(future=future), self._default_final_exit
983 + )
984 + else:
985 + self._default_final_exit(task)
986 + else:
987 + self._default_final_exit(task)
988 +
989 + async def _soname_deps_qa(self):
990 +
991 + vardb = QueryCommand.get_db()[self.settings["EROOT"]]["vartree"].dbapi
992 +
993 + all_provides = await self.scheduler.run_in_executor(
994 + ForkExecutor(loop=self.scheduler), _get_all_provides, vardb
995 + )
996 +
997 + unresolved = _get_unresolved_soname_deps(
998 + os.path.join(self.settings["PORTAGE_BUILDDIR"], "build-info"), all_provides
999 + )
1000
1001 + # BEGIN PREFIX LOCAL
1002 + if EPREFIX != "" and unresolved:
1003 + # in prefix, consider the host libs for any unresolved libs,
1004 + # so we kill warnings about missing libc.so.1, etc.
1005 + for obj, libs in list(unresolved):
1006 + unresolved.remove((obj, libs))
1007 + libs=list(libs)
1008 + for lib in list(libs):
1009 + for path in ['/lib64', '/lib/64', '/lib', \
1010 + '/usr/lib64', '/usr/lib/64', '/usr/lib']:
1011 + if os.path.exists(os.path.join(path, lib)):
1012 + libs.remove(lib)
1013 + break
1014 + if len(libs) > 0:
1015 + unresolved.append((obj, tuple(libs)))
1016 + # END PREFIX LOCAL
1017 +
1018 - if unresolved:
1019 - unresolved.sort()
1020 - qa_msg = ["QA Notice: Unresolved soname dependencies:"]
1021 - qa_msg.append("")
1022 - qa_msg.extend("\t%s: %s" % (filename, " ".join(sorted(soname_deps)))
1023 - for filename, soname_deps in unresolved)
1024 - qa_msg.append("")
1025 - await self.elog("eqawarn", qa_msg)
1026 + if unresolved:
1027 + unresolved.sort()
1028 + qa_msg = ["QA Notice: Unresolved soname dependencies:"]
1029 + qa_msg.append("")
1030 + qa_msg.extend(
1031 + "\t%s: %s" % (filename, " ".join(sorted(soname_deps)))
1032 + for filename, soname_deps in unresolved
1033 + )
1034 + qa_msg.append("")
1035 + await self.elog("eqawarn", qa_msg)
1036 diff --cc lib/_emerge/Package.py
1037 index 40e595f36,90dfccdef..79f5459b3
1038 --- a/lib/_emerge/Package.py
1039 +++ b/lib/_emerge/Package.py
1040 @@@ -16,783 -22,884 +22,886 @@@ from portage.exception import InvalidDa
1041 from portage.localization import _
1042 from _emerge.Task import Task
1043
1044 +
1045 class Package(Task):
1046
1047 - __hash__ = Task.__hash__
1048 - __slots__ = ("built", "cpv", "depth",
1049 - "installed", "onlydeps", "operation",
1050 - "root_config", "type_name",
1051 - "category", "counter", "cp", "cpv_split",
1052 - "inherited", "iuse", "mtime",
1053 - "pf", "root", "slot", "sub_slot", "slot_atom", "version") + \
1054 - ("_invalid", "_masks", "_metadata", "_provided_cps",
1055 - "_raw_metadata", "_provides", "_requires", "_use",
1056 - "_validated_atoms", "_visible")
1057 -
1058 - metadata_keys = [
1059 - "BDEPEND",
1060 - "BUILD_ID", "BUILD_TIME", "CHOST", "COUNTER", "DEFINED_PHASES",
1061 - "DEPEND", "EAPI", "IDEPEND", "INHERITED", "IUSE", "KEYWORDS",
1062 - "LICENSE", "MD5", "PDEPEND", "PROVIDES",
1063 - "RDEPEND", "repository", "REQUIRED_USE",
1064 - "PROPERTIES", "REQUIRES", "RESTRICT", "SIZE",
1065 - "SLOT", "USE", "_mtime_", "EPREFIX"]
1066 -
1067 - _dep_keys = ('BDEPEND', 'DEPEND', 'IDEPEND', 'PDEPEND', 'RDEPEND')
1068 - _buildtime_keys = ('BDEPEND', 'DEPEND')
1069 - _runtime_keys = ('IDEPEND', 'PDEPEND', 'RDEPEND')
1070 - _use_conditional_misc_keys = ('LICENSE', 'PROPERTIES', 'RESTRICT')
1071 - UNKNOWN_REPO = _unknown_repo
1072 -
1073 - def __init__(self, **kwargs):
1074 - metadata = _PackageMetadataWrapperBase(kwargs.pop('metadata'))
1075 - Task.__init__(self, **kwargs)
1076 - # the SlotObject constructor assigns self.root_config from keyword args
1077 - # and is an instance of a '_emerge.RootConfig.RootConfig class
1078 - self.root = self.root_config.root
1079 - self._raw_metadata = metadata
1080 - self._metadata = _PackageMetadataWrapper(self, metadata)
1081 - if not self.built:
1082 - self._metadata['CHOST'] = self.root_config.settings.get('CHOST', '')
1083 - eapi_attrs = _get_eapi_attrs(self.eapi)
1084 -
1085 - try:
1086 - db = self.cpv._db
1087 - except AttributeError:
1088 - if self.built:
1089 - # For independence from the source ebuild repository and
1090 - # profile implicit IUSE state, require the _db attribute
1091 - # for built packages.
1092 - raise
1093 - db = self.root_config.trees['porttree'].dbapi
1094 -
1095 - self.cpv = _pkg_str(self.cpv, metadata=self._metadata,
1096 - settings=self.root_config.settings, db=db)
1097 - if hasattr(self.cpv, 'slot_invalid'):
1098 - self._invalid_metadata('SLOT.invalid',
1099 - "SLOT: invalid value: '%s'" % self._metadata["SLOT"])
1100 - self.cpv_split = self.cpv.cpv_split
1101 - self.category, self.pf = portage.catsplit(self.cpv)
1102 - self.cp = self.cpv.cp
1103 - self.version = self.cpv.version
1104 - self.slot = self.cpv.slot
1105 - self.sub_slot = self.cpv.sub_slot
1106 - self.slot_atom = Atom("%s%s%s" % (self.cp, _slot_separator, self.slot))
1107 - # sync metadata with validated repo (may be UNKNOWN_REPO)
1108 - self._metadata['repository'] = self.cpv.repo
1109 -
1110 - if self.root_config.settings.local_config:
1111 - implicit_match = db._iuse_implicit_cnstr(self.cpv, self._metadata)
1112 - else:
1113 - implicit_match = db._repoman_iuse_implicit_cnstr(self.cpv, self._metadata)
1114 - usealiases = self.root_config.settings._use_manager.getUseAliases(self)
1115 - self.iuse = self._iuse(self, self._metadata["IUSE"].split(),
1116 - implicit_match, usealiases, self.eapi)
1117 -
1118 - if (self.iuse.enabled or self.iuse.disabled) and \
1119 - not eapi_attrs.iuse_defaults:
1120 - if not self.installed:
1121 - self._invalid_metadata('EAPI.incompatible',
1122 - "IUSE contains defaults, but EAPI doesn't allow them")
1123 - if self.inherited is None:
1124 - self.inherited = frozenset()
1125 -
1126 - if self.operation is None:
1127 - if self.onlydeps or self.installed:
1128 - self.operation = "nomerge"
1129 - else:
1130 - self.operation = "merge"
1131 -
1132 - self._hash_key = Package._gen_hash_key(cpv=self.cpv,
1133 - installed=self.installed, onlydeps=self.onlydeps,
1134 - operation=self.operation, repo_name=self.cpv.repo,
1135 - root_config=self.root_config,
1136 - type_name=self.type_name)
1137 - self._hash_value = hash(self._hash_key)
1138 -
1139 - @property
1140 - def eapi(self):
1141 - return self._metadata["EAPI"]
1142 -
1143 - @property
1144 - def build_id(self):
1145 - return self.cpv.build_id
1146 -
1147 - @property
1148 - def build_time(self):
1149 - if not self.built:
1150 - raise AttributeError('build_time')
1151 - return self.cpv.build_time
1152 -
1153 - @property
1154 - def defined_phases(self):
1155 - return self._metadata.defined_phases
1156 -
1157 - @property
1158 - def properties(self):
1159 - return self._metadata.properties
1160 -
1161 - @property
1162 - def provided_cps(self):
1163 - return (self.cp,)
1164 -
1165 - @property
1166 - def restrict(self):
1167 - return self._metadata.restrict
1168 -
1169 - @property
1170 - def metadata(self):
1171 - warnings.warn("_emerge.Package.Package.metadata is deprecated",
1172 - DeprecationWarning, stacklevel=3)
1173 - return self._metadata
1174 -
1175 - # These are calculated on-demand, so that they are calculated
1176 - # after FakeVartree applies its metadata tweaks.
1177 - @property
1178 - def invalid(self):
1179 - if self._invalid is None:
1180 - self._validate_deps()
1181 - if self._invalid is None:
1182 - self._invalid = False
1183 - return self._invalid
1184 -
1185 - @property
1186 - def masks(self):
1187 - if self._masks is None:
1188 - self._masks = self._eval_masks()
1189 - return self._masks
1190 -
1191 - @property
1192 - def visible(self):
1193 - if self._visible is None:
1194 - self._visible = self._eval_visiblity(self.masks)
1195 - return self._visible
1196 -
1197 - @property
1198 - def validated_atoms(self):
1199 - """
1200 - Returns *all* validated atoms from the deps, regardless
1201 - of USE conditionals, with USE conditionals inside
1202 - atoms left unevaluated.
1203 - """
1204 - if self._validated_atoms is None:
1205 - self._validate_deps()
1206 - return self._validated_atoms
1207 -
1208 - @property
1209 - def stable(self):
1210 - return self.cpv.stable
1211 -
1212 - @property
1213 - def provides(self):
1214 - self.invalid
1215 - return self._provides
1216 -
1217 - @property
1218 - def requires(self):
1219 - self.invalid
1220 - return self._requires
1221 -
1222 - @classmethod
1223 - def _gen_hash_key(cls, cpv=None, installed=None, onlydeps=None,
1224 - operation=None, repo_name=None, root_config=None,
1225 - type_name=None, **kwargs):
1226 -
1227 - if operation is None:
1228 - if installed or onlydeps:
1229 - operation = "nomerge"
1230 - else:
1231 - operation = "merge"
1232 -
1233 - root = None
1234 - if root_config is not None:
1235 - root = root_config.root
1236 - else:
1237 - raise TypeError("root_config argument is required")
1238 -
1239 - elements = [type_name, root, str(cpv), operation]
1240 -
1241 - # For installed (and binary) packages we don't care for the repo
1242 - # when it comes to hashing, because there can only be one cpv.
1243 - # So overwrite the repo_key with type_name.
1244 - if type_name is None:
1245 - raise TypeError("type_name argument is required")
1246 - elif type_name == "ebuild":
1247 - if repo_name is None:
1248 - raise AssertionError(
1249 - "Package._gen_hash_key() " + \
1250 - "called without 'repo_name' argument")
1251 - elements.append(repo_name)
1252 - elif type_name == "binary":
1253 - # Including a variety of fingerprints in the hash makes
1254 - # it possible to simultaneously consider multiple similar
1255 - # packages. Note that digests are not included here, since
1256 - # they are relatively expensive to compute, and they may
1257 - # not necessarily be available.
1258 - elements.extend([cpv.build_id, cpv.file_size,
1259 - cpv.build_time, cpv.mtime])
1260 - else:
1261 - # For installed (and binary) packages we don't care for the repo
1262 - # when it comes to hashing, because there can only be one cpv.
1263 - # So overwrite the repo_key with type_name.
1264 - elements.append(type_name)
1265 -
1266 - return tuple(elements)
1267 -
1268 - def _validate_deps(self):
1269 - """
1270 - Validate deps. This does not trigger USE calculation since that
1271 - is expensive for ebuilds and therefore we want to avoid doing
1272 - it unnecessarily (like for masked packages).
1273 - """
1274 - eapi = self.eapi
1275 - dep_eapi = eapi
1276 - dep_valid_flag = self.iuse.is_valid_flag
1277 - if self.installed:
1278 - # Ignore EAPI.incompatible and conditionals missing
1279 - # from IUSE for installed packages since these issues
1280 - # aren't relevant now (re-evaluate when new EAPIs are
1281 - # deployed).
1282 - dep_eapi = None
1283 - dep_valid_flag = None
1284 -
1285 - validated_atoms = []
1286 - for k in self._dep_keys:
1287 - v = self._metadata.get(k)
1288 - if not v:
1289 - continue
1290 - try:
1291 - atoms = use_reduce(v, eapi=dep_eapi,
1292 - matchall=True, is_valid_flag=dep_valid_flag,
1293 - token_class=Atom, flat=True)
1294 - except InvalidDependString as e:
1295 - self._metadata_exception(k, e)
1296 - else:
1297 - validated_atoms.extend(atoms)
1298 - if not self.built:
1299 - for atom in atoms:
1300 - if not isinstance(atom, Atom):
1301 - continue
1302 - if atom.slot_operator_built:
1303 - e = InvalidDependString(
1304 - _("Improper context for slot-operator "
1305 - "\"built\" atom syntax: %s") %
1306 - (atom.unevaluated_atom,))
1307 - self._metadata_exception(k, e)
1308 -
1309 - self._validated_atoms = tuple(set(atom for atom in
1310 - validated_atoms if isinstance(atom, Atom)))
1311 -
1312 - for k in self._use_conditional_misc_keys:
1313 - v = self._metadata.get(k)
1314 - if not v:
1315 - continue
1316 - try:
1317 - use_reduce(v, eapi=dep_eapi, matchall=True,
1318 - is_valid_flag=dep_valid_flag)
1319 - except InvalidDependString as e:
1320 - self._metadata_exception(k, e)
1321 -
1322 - k = 'REQUIRED_USE'
1323 - v = self._metadata.get(k)
1324 - if v and not self.built:
1325 - if not _get_eapi_attrs(eapi).required_use:
1326 - self._invalid_metadata('EAPI.incompatible',
1327 - "REQUIRED_USE set, but EAPI='%s' doesn't allow it" % eapi)
1328 - else:
1329 - try:
1330 - check_required_use(v, (),
1331 - self.iuse.is_valid_flag, eapi=eapi)
1332 - except InvalidDependString as e:
1333 - self._invalid_metadata(k + ".syntax", "%s: %s" % (k, e))
1334 -
1335 - k = 'SRC_URI'
1336 - v = self._metadata.get(k)
1337 - if v:
1338 - try:
1339 - use_reduce(v, is_src_uri=True, eapi=eapi, matchall=True,
1340 - is_valid_flag=self.iuse.is_valid_flag)
1341 - except InvalidDependString as e:
1342 - if not self.installed:
1343 - self._metadata_exception(k, e)
1344 -
1345 - if self.built:
1346 - k = 'PROVIDES'
1347 - try:
1348 - self._provides = frozenset(
1349 - parse_soname_deps(self._metadata[k]))
1350 - except InvalidData as e:
1351 - self._invalid_metadata(k + ".syntax", "%s: %s" % (k, e))
1352 -
1353 - k = 'REQUIRES'
1354 - try:
1355 - self._requires = frozenset(
1356 - parse_soname_deps(self._metadata[k]))
1357 - except InvalidData as e:
1358 - self._invalid_metadata(k + ".syntax", "%s: %s" % (k, e))
1359 -
1360 - def copy(self):
1361 - return Package(built=self.built, cpv=self.cpv, depth=self.depth,
1362 - installed=self.installed, metadata=self._raw_metadata,
1363 - onlydeps=self.onlydeps, operation=self.operation,
1364 - root_config=self.root_config, type_name=self.type_name)
1365 -
1366 - def _eval_masks(self):
1367 - masks = {}
1368 - settings = self.root_config.settings
1369 -
1370 - if self.invalid is not False:
1371 - masks['invalid'] = self.invalid
1372 -
1373 - if not settings._accept_chost(self.cpv, self._metadata):
1374 - masks['CHOST'] = self._metadata['CHOST']
1375 -
1376 - eapi = self.eapi
1377 - if not portage.eapi_is_supported(eapi):
1378 - masks['EAPI.unsupported'] = eapi
1379 - if portage._eapi_is_deprecated(eapi):
1380 - masks['EAPI.deprecated'] = eapi
1381 -
1382 - missing_keywords = settings._getMissingKeywords(
1383 - self.cpv, self._metadata)
1384 - if missing_keywords:
1385 - masks['KEYWORDS'] = missing_keywords
1386 -
1387 - try:
1388 - missing_properties = settings._getMissingProperties(
1389 - self.cpv, self._metadata)
1390 - if missing_properties:
1391 - masks['PROPERTIES'] = missing_properties
1392 - except InvalidDependString:
1393 - # already recorded as 'invalid'
1394 - pass
1395 -
1396 - try:
1397 - missing_restricts = settings._getMissingRestrict(
1398 - self.cpv, self._metadata)
1399 - if missing_restricts:
1400 - masks['RESTRICT'] = missing_restricts
1401 - except InvalidDependString:
1402 - # already recorded as 'invalid'
1403 - pass
1404 -
1405 - mask_atom = settings._getMaskAtom(self.cpv, self._metadata)
1406 - if mask_atom is not None:
1407 - masks['package.mask'] = mask_atom
1408 -
1409 - try:
1410 - missing_licenses = settings._getMissingLicenses(
1411 - self.cpv, self._metadata)
1412 - if missing_licenses:
1413 - masks['LICENSE'] = missing_licenses
1414 - except InvalidDependString:
1415 - # already recorded as 'invalid'
1416 - pass
1417 -
1418 - if not masks:
1419 - masks = False
1420 -
1421 - return masks
1422 -
1423 - def _eval_visiblity(self, masks):
1424 -
1425 - if masks is not False:
1426 -
1427 - if 'EAPI.unsupported' in masks:
1428 - return False
1429 -
1430 - if 'invalid' in masks:
1431 - return False
1432 -
1433 - if not self.installed and ( \
1434 - 'CHOST' in masks or \
1435 - 'EAPI.deprecated' in masks or \
1436 - 'KEYWORDS' in masks or \
1437 - 'PROPERTIES' in masks or \
1438 - 'RESTRICT' in masks):
1439 - return False
1440 -
1441 - if 'package.mask' in masks or \
1442 - 'LICENSE' in masks:
1443 - return False
1444 -
1445 - return True
1446 -
1447 - def get_keyword_mask(self):
1448 - """returns None, 'missing', or 'unstable'."""
1449 -
1450 - missing = self.root_config.settings._getRawMissingKeywords(
1451 - self.cpv, self._metadata)
1452 -
1453 - if not missing:
1454 - return None
1455 -
1456 - if '**' in missing:
1457 - return 'missing'
1458 -
1459 - global_accept_keywords = frozenset(
1460 - self.root_config.settings.get("ACCEPT_KEYWORDS", "").split())
1461 -
1462 - for keyword in missing:
1463 - if keyword.lstrip("~") in global_accept_keywords:
1464 - return 'unstable'
1465 -
1466 - return 'missing'
1467 -
1468 - def isHardMasked(self):
1469 - """returns a bool if the cpv is in the list of
1470 - expanded pmaskdict[cp] available ebuilds"""
1471 - pmask = self.root_config.settings._getRawMaskAtom(
1472 - self.cpv, self._metadata)
1473 - return pmask is not None
1474 -
1475 - def _metadata_exception(self, k, e):
1476 -
1477 - if k.endswith('DEPEND'):
1478 - qacat = 'dependency.syntax'
1479 - else:
1480 - qacat = k + ".syntax"
1481 -
1482 - if not self.installed:
1483 - categorized_error = False
1484 - if e.errors:
1485 - for error in e.errors:
1486 - if getattr(error, 'category', None) is None:
1487 - continue
1488 - categorized_error = True
1489 - self._invalid_metadata(error.category,
1490 - "%s: %s" % (k, error))
1491 -
1492 - if not categorized_error:
1493 - self._invalid_metadata(qacat,"%s: %s" % (k, e))
1494 - else:
1495 - # For installed packages, show the path of the file
1496 - # containing the invalid metadata, since the user may
1497 - # want to fix the deps by hand.
1498 - vardb = self.root_config.trees['vartree'].dbapi
1499 - path = vardb.getpath(self.cpv, filename=k)
1500 - self._invalid_metadata(qacat, "%s: %s in '%s'" % (k, e, path))
1501 -
1502 - def _invalid_metadata(self, msg_type, msg):
1503 - if self._invalid is None:
1504 - self._invalid = {}
1505 - msgs = self._invalid.get(msg_type)
1506 - if msgs is None:
1507 - msgs = []
1508 - self._invalid[msg_type] = msgs
1509 - msgs.append(msg)
1510 -
1511 - def __str__(self):
1512 - if self.operation == "merge":
1513 - if self.type_name == "binary":
1514 - cpv_color = "PKG_BINARY_MERGE"
1515 - else:
1516 - cpv_color = "PKG_MERGE"
1517 - elif self.operation == "uninstall":
1518 - cpv_color = "PKG_UNINSTALL"
1519 - else:
1520 - cpv_color = "PKG_NOMERGE"
1521 -
1522 - build_id_str = ""
1523 - if isinstance(self.cpv.build_id, int) and self.cpv.build_id > 0:
1524 - build_id_str = "-%s" % self.cpv.build_id
1525 -
1526 - s = "(%s, %s" \
1527 - % (portage.output.colorize(cpv_color, self.cpv +
1528 - build_id_str + _slot_separator + self.slot + "/" +
1529 - self.sub_slot + _repo_separator + self.repo),
1530 - self.type_name)
1531 -
1532 - if self.type_name == "installed":
1533 - if self.root_config.settings['ROOT'] != "/":
1534 - s += " in '%s'" % self.root_config.settings['ROOT']
1535 - if self.operation == "uninstall":
1536 - s += " scheduled for uninstall"
1537 - else:
1538 - if self.operation == "merge":
1539 - s += " scheduled for merge"
1540 - if self.root_config.settings['ROOT'] != "/":
1541 - s += " to '%s'" % self.root_config.settings['ROOT']
1542 - s += ")"
1543 - return s
1544 -
1545 - class _use_class:
1546 -
1547 - __slots__ = ("enabled", "_expand", "_expand_hidden",
1548 - "_force", "_pkg", "_mask")
1549 -
1550 - # Share identical frozenset instances when available.
1551 - _frozensets = {}
1552 -
1553 - def __init__(self, pkg, enabled_flags):
1554 - self._pkg = pkg
1555 - self._expand = None
1556 - self._expand_hidden = None
1557 - self._force = None
1558 - self._mask = None
1559 - if eapi_has_use_aliases(pkg.eapi):
1560 - for enabled_flag in enabled_flags:
1561 - enabled_flags.extend(pkg.iuse.alias_mapping.get(enabled_flag, []))
1562 - self.enabled = frozenset(enabled_flags)
1563 - if pkg.built:
1564 - # Use IUSE to validate USE settings for built packages,
1565 - # in case the package manager that built this package
1566 - # failed to do that for some reason (or in case of
1567 - # data corruption).
1568 - missing_iuse = pkg.iuse.get_missing_iuse(self.enabled)
1569 - if missing_iuse:
1570 - self.enabled = self.enabled.difference(missing_iuse)
1571 -
1572 - def _init_force_mask(self):
1573 - pkgsettings = self._pkg._get_pkgsettings()
1574 - frozensets = self._frozensets
1575 - s = frozenset(
1576 - pkgsettings.get("USE_EXPAND", "").lower().split())
1577 - self._expand = frozensets.setdefault(s, s)
1578 - s = frozenset(
1579 - pkgsettings.get("USE_EXPAND_HIDDEN", "").lower().split())
1580 - self._expand_hidden = frozensets.setdefault(s, s)
1581 - s = pkgsettings.useforce
1582 - self._force = frozensets.setdefault(s, s)
1583 - s = pkgsettings.usemask
1584 - self._mask = frozensets.setdefault(s, s)
1585 -
1586 - @property
1587 - def expand(self):
1588 - if self._expand is None:
1589 - self._init_force_mask()
1590 - return self._expand
1591 -
1592 - @property
1593 - def expand_hidden(self):
1594 - if self._expand_hidden is None:
1595 - self._init_force_mask()
1596 - return self._expand_hidden
1597 -
1598 - @property
1599 - def force(self):
1600 - if self._force is None:
1601 - self._init_force_mask()
1602 - return self._force
1603 -
1604 - @property
1605 - def mask(self):
1606 - if self._mask is None:
1607 - self._init_force_mask()
1608 - return self._mask
1609 -
1610 - @property
1611 - def repo(self):
1612 - return self._metadata['repository']
1613 -
1614 - @property
1615 - def repo_priority(self):
1616 - repo_info = self.root_config.settings.repositories.prepos.get(self.repo)
1617 - if repo_info is None:
1618 - return None
1619 - return repo_info.priority
1620 -
1621 - @property
1622 - def use(self):
1623 - if self._use is None:
1624 - self._init_use()
1625 - return self._use
1626 -
1627 - def _get_pkgsettings(self):
1628 - pkgsettings = self.root_config.trees[
1629 - 'porttree'].dbapi.doebuild_settings
1630 - pkgsettings.setcpv(self)
1631 - return pkgsettings
1632 -
1633 - def _init_use(self):
1634 - if self.built:
1635 - # Use IUSE to validate USE settings for built packages,
1636 - # in case the package manager that built this package
1637 - # failed to do that for some reason (or in case of
1638 - # data corruption). The enabled flags must be consistent
1639 - # with implicit IUSE, in order to avoid potential
1640 - # inconsistencies in USE dep matching (see bug #453400).
1641 - use_str = self._metadata['USE']
1642 - is_valid_flag = self.iuse.is_valid_flag
1643 - enabled_flags = [x for x in use_str.split() if is_valid_flag(x)]
1644 - use_str = " ".join(enabled_flags)
1645 - self._use = self._use_class(
1646 - self, enabled_flags)
1647 - else:
1648 - try:
1649 - use_str = _PackageMetadataWrapperBase.__getitem__(
1650 - self._metadata, 'USE')
1651 - except KeyError:
1652 - use_str = None
1653 - calculated_use = False
1654 - if not use_str:
1655 - use_str = self._get_pkgsettings()["PORTAGE_USE"]
1656 - calculated_use = True
1657 - self._use = self._use_class(
1658 - self, use_str.split())
1659 - # Initialize these now, since USE access has just triggered
1660 - # setcpv, and we want to cache the result of the force/mask
1661 - # calculations that were done.
1662 - if calculated_use:
1663 - self._use._init_force_mask()
1664 -
1665 - _PackageMetadataWrapperBase.__setitem__(
1666 - self._metadata, 'USE', use_str)
1667 -
1668 - return use_str
1669 -
1670 - class _iuse:
1671 -
1672 - __slots__ = ("__weakref__", "_iuse_implicit_match", "_pkg", "alias_mapping",
1673 - "all", "all_aliases", "enabled", "disabled", "tokens")
1674 -
1675 - def __init__(self, pkg, tokens, iuse_implicit_match, aliases, eapi):
1676 - self._pkg = pkg
1677 - self.tokens = tuple(tokens)
1678 - self._iuse_implicit_match = iuse_implicit_match
1679 - enabled = []
1680 - disabled = []
1681 - other = []
1682 - enabled_aliases = []
1683 - disabled_aliases = []
1684 - other_aliases = []
1685 - aliases_supported = eapi_has_use_aliases(eapi)
1686 - self.alias_mapping = {}
1687 - for x in tokens:
1688 - prefix = x[:1]
1689 - if prefix == "+":
1690 - enabled.append(x[1:])
1691 - if aliases_supported:
1692 - self.alias_mapping[x[1:]] = aliases.get(x[1:], [])
1693 - enabled_aliases.extend(self.alias_mapping[x[1:]])
1694 - elif prefix == "-":
1695 - disabled.append(x[1:])
1696 - if aliases_supported:
1697 - self.alias_mapping[x[1:]] = aliases.get(x[1:], [])
1698 - disabled_aliases.extend(self.alias_mapping[x[1:]])
1699 - else:
1700 - other.append(x)
1701 - if aliases_supported:
1702 - self.alias_mapping[x] = aliases.get(x, [])
1703 - other_aliases.extend(self.alias_mapping[x])
1704 - self.enabled = frozenset(chain(enabled, enabled_aliases))
1705 - self.disabled = frozenset(chain(disabled, disabled_aliases))
1706 - self.all = frozenset(chain(enabled, disabled, other))
1707 - self.all_aliases = frozenset(chain(enabled_aliases, disabled_aliases, other_aliases))
1708 -
1709 - def is_valid_flag(self, flags):
1710 - """
1711 - @return: True if all flags are valid USE values which may
1712 - be specified in USE dependencies, False otherwise.
1713 - """
1714 - if isinstance(flags, str):
1715 - flags = [flags]
1716 -
1717 - for flag in flags:
1718 - if not flag in self.all and not flag in self.all_aliases and \
1719 - not self._iuse_implicit_match(flag):
1720 - return False
1721 - return True
1722 -
1723 - def get_missing_iuse(self, flags):
1724 - """
1725 - @return: A list of flags missing from IUSE.
1726 - """
1727 - if isinstance(flags, str):
1728 - flags = [flags]
1729 - missing_iuse = []
1730 - for flag in flags:
1731 - if not flag in self.all and not flag in self.all_aliases and \
1732 - not self._iuse_implicit_match(flag):
1733 - missing_iuse.append(flag)
1734 - return missing_iuse
1735 -
1736 - def get_real_flag(self, flag):
1737 - """
1738 - Returns the flag's name within the scope of this package
1739 - (accounting for aliases), or None if the flag is unknown.
1740 - """
1741 - if flag in self.all:
1742 - return flag
1743 -
1744 - if flag in self.all_aliases:
1745 - for k, v in self.alias_mapping.items():
1746 - if flag in v:
1747 - return k
1748 -
1749 - if self._iuse_implicit_match(flag):
1750 - return flag
1751 -
1752 - return None
1753 -
1754 - def __len__(self):
1755 - return 4
1756 -
1757 - def __iter__(self):
1758 - """
1759 - This is used to generate mtimedb resume mergelist entries, so we
1760 - limit it to 4 items for backward compatibility.
1761 - """
1762 - return iter(self._hash_key[:4])
1763 -
1764 - def __lt__(self, other):
1765 - if other.cp != self.cp:
1766 - return self.cp < other.cp
1767 - result = portage.vercmp(self.version, other.version)
1768 - if result < 0:
1769 - return True
1770 - if result == 0 and self.built and other.built:
1771 - return self.build_time < other.build_time
1772 - return False
1773 -
1774 - def __le__(self, other):
1775 - if other.cp != self.cp:
1776 - return self.cp <= other.cp
1777 - result = portage.vercmp(self.version, other.version)
1778 - if result <= 0:
1779 - return True
1780 - if result == 0 and self.built and other.built:
1781 - return self.build_time <= other.build_time
1782 - return False
1783 -
1784 - def __gt__(self, other):
1785 - if other.cp != self.cp:
1786 - return self.cp > other.cp
1787 - result = portage.vercmp(self.version, other.version)
1788 - if result > 0:
1789 - return True
1790 - if result == 0 and self.built and other.built:
1791 - return self.build_time > other.build_time
1792 - return False
1793 -
1794 - def __ge__(self, other):
1795 - if other.cp != self.cp:
1796 - return self.cp >= other.cp
1797 - result = portage.vercmp(self.version, other.version)
1798 - if result >= 0:
1799 - return True
1800 - if result == 0 and self.built and other.built:
1801 - return self.build_time >= other.build_time
1802 - return False
1803 -
1804 - def with_use(self, use):
1805 - """
1806 - Return an Package instance with the specified USE flags. The
1807 - current instance may be returned if it has identical USE flags.
1808 - @param use: a set of USE flags
1809 - @type use: frozenset
1810 - @return: A package with the specified USE flags
1811 - @rtype: Package
1812 - """
1813 - if use is not self.use.enabled:
1814 - pkg = self.copy()
1815 - pkg._metadata["USE"] = " ".join(use)
1816 - else:
1817 - pkg = self
1818 - return pkg
1819 -
1820 - _all_metadata_keys = set(x for x in portage.auxdbkeys \
1821 - if not x.startswith("UNUSED_"))
1822 + __hash__ = Task.__hash__
1823 + __slots__ = (
1824 + "built",
1825 + "cpv",
1826 + "depth",
1827 + "installed",
1828 + "onlydeps",
1829 + "operation",
1830 + "root_config",
1831 + "type_name",
1832 + "category",
1833 + "counter",
1834 + "cp",
1835 + "cpv_split",
1836 + "inherited",
1837 + "iuse",
1838 + "mtime",
1839 + "pf",
1840 + "root",
1841 + "slot",
1842 + "sub_slot",
1843 + "slot_atom",
1844 + "version",
1845 + ) + (
1846 + "_invalid",
1847 + "_masks",
1848 + "_metadata",
1849 + "_provided_cps",
1850 + "_raw_metadata",
1851 + "_provides",
1852 + "_requires",
1853 + "_use",
1854 + "_validated_atoms",
1855 + "_visible",
1856 + )
1857 +
1858 + metadata_keys = [
1859 + "BDEPEND",
1860 + "BUILD_ID",
1861 + "BUILD_TIME",
1862 + "CHOST",
1863 + "COUNTER",
1864 + "DEFINED_PHASES",
1865 + "DEPEND",
1866 + "EAPI",
1867 + "IDEPEND",
1868 + "INHERITED",
1869 + "IUSE",
1870 + "KEYWORDS",
1871 + "LICENSE",
1872 + "MD5",
1873 + "PDEPEND",
1874 + "PROVIDES",
1875 + "RDEPEND",
1876 + "repository",
1877 + "REQUIRED_USE",
1878 + "PROPERTIES",
1879 + "REQUIRES",
1880 + "RESTRICT",
1881 + "SIZE",
1882 + "SLOT",
1883 + "USE",
1884 + "_mtime_",
1885 ++ # PREFIX LOCAL
1886 ++ "EPREFIX",
1887 + ]
1888 +
1889 + _dep_keys = ("BDEPEND", "DEPEND", "IDEPEND", "PDEPEND", "RDEPEND")
1890 + _buildtime_keys = ("BDEPEND", "DEPEND")
1891 + _runtime_keys = ("IDEPEND", "PDEPEND", "RDEPEND")
1892 + _use_conditional_misc_keys = ("LICENSE", "PROPERTIES", "RESTRICT")
1893 + UNKNOWN_REPO = _unknown_repo
1894 +
1895 + def __init__(self, **kwargs):
1896 + metadata = _PackageMetadataWrapperBase(kwargs.pop("metadata"))
1897 + Task.__init__(self, **kwargs)
1898 + # the SlotObject constructor assigns self.root_config from keyword args
1899 + # and is an instance of a '_emerge.RootConfig.RootConfig class
1900 + self.root = self.root_config.root
1901 + self._raw_metadata = metadata
1902 + self._metadata = _PackageMetadataWrapper(self, metadata)
1903 + if not self.built:
1904 + self._metadata["CHOST"] = self.root_config.settings.get("CHOST", "")
1905 + eapi_attrs = _get_eapi_attrs(self.eapi)
1906 +
1907 + try:
1908 + db = self.cpv._db
1909 + except AttributeError:
1910 + if self.built:
1911 + # For independence from the source ebuild repository and
1912 + # profile implicit IUSE state, require the _db attribute
1913 + # for built packages.
1914 + raise
1915 + db = self.root_config.trees["porttree"].dbapi
1916 +
1917 + self.cpv = _pkg_str(
1918 + self.cpv, metadata=self._metadata, settings=self.root_config.settings, db=db
1919 + )
1920 + if hasattr(self.cpv, "slot_invalid"):
1921 + self._invalid_metadata(
1922 + "SLOT.invalid", "SLOT: invalid value: '%s'" % self._metadata["SLOT"]
1923 + )
1924 + self.cpv_split = self.cpv.cpv_split
1925 + self.category, self.pf = portage.catsplit(self.cpv)
1926 + self.cp = self.cpv.cp
1927 + self.version = self.cpv.version
1928 + self.slot = self.cpv.slot
1929 + self.sub_slot = self.cpv.sub_slot
1930 + self.slot_atom = Atom("%s%s%s" % (self.cp, _slot_separator, self.slot))
1931 + # sync metadata with validated repo (may be UNKNOWN_REPO)
1932 + self._metadata["repository"] = self.cpv.repo
1933 +
1934 + if self.root_config.settings.local_config:
1935 + implicit_match = db._iuse_implicit_cnstr(self.cpv, self._metadata)
1936 + else:
1937 + implicit_match = db._repoman_iuse_implicit_cnstr(self.cpv, self._metadata)
1938 + usealiases = self.root_config.settings._use_manager.getUseAliases(self)
1939 + self.iuse = self._iuse(
1940 + self, self._metadata["IUSE"].split(), implicit_match, usealiases, self.eapi
1941 + )
1942 +
1943 + if (self.iuse.enabled or self.iuse.disabled) and not eapi_attrs.iuse_defaults:
1944 + if not self.installed:
1945 + self._invalid_metadata(
1946 + "EAPI.incompatible",
1947 + "IUSE contains defaults, but EAPI doesn't allow them",
1948 + )
1949 + if self.inherited is None:
1950 + self.inherited = frozenset()
1951 +
1952 + if self.operation is None:
1953 + if self.onlydeps or self.installed:
1954 + self.operation = "nomerge"
1955 + else:
1956 + self.operation = "merge"
1957 +
1958 + self._hash_key = Package._gen_hash_key(
1959 + cpv=self.cpv,
1960 + installed=self.installed,
1961 + onlydeps=self.onlydeps,
1962 + operation=self.operation,
1963 + repo_name=self.cpv.repo,
1964 + root_config=self.root_config,
1965 + type_name=self.type_name,
1966 + )
1967 + self._hash_value = hash(self._hash_key)
1968 +
1969 + @property
1970 + def eapi(self):
1971 + return self._metadata["EAPI"]
1972 +
1973 + @property
1974 + def build_id(self):
1975 + return self.cpv.build_id
1976 +
1977 + @property
1978 + def build_time(self):
1979 + if not self.built:
1980 + raise AttributeError("build_time")
1981 + return self.cpv.build_time
1982 +
1983 + @property
1984 + def defined_phases(self):
1985 + return self._metadata.defined_phases
1986 +
1987 + @property
1988 + def properties(self):
1989 + return self._metadata.properties
1990 +
1991 + @property
1992 + def provided_cps(self):
1993 + return (self.cp,)
1994 +
1995 + @property
1996 + def restrict(self):
1997 + return self._metadata.restrict
1998 +
1999 + @property
2000 + def metadata(self):
2001 + warnings.warn(
2002 + "_emerge.Package.Package.metadata is deprecated",
2003 + DeprecationWarning,
2004 + stacklevel=3,
2005 + )
2006 + return self._metadata
2007 +
2008 + # These are calculated on-demand, so that they are calculated
2009 + # after FakeVartree applies its metadata tweaks.
2010 + @property
2011 + def invalid(self):
2012 + if self._invalid is None:
2013 + self._validate_deps()
2014 + if self._invalid is None:
2015 + self._invalid = False
2016 + return self._invalid
2017 +
2018 + @property
2019 + def masks(self):
2020 + if self._masks is None:
2021 + self._masks = self._eval_masks()
2022 + return self._masks
2023 +
2024 + @property
2025 + def visible(self):
2026 + if self._visible is None:
2027 + self._visible = self._eval_visiblity(self.masks)
2028 + return self._visible
2029 +
2030 + @property
2031 + def validated_atoms(self):
2032 + """
2033 + Returns *all* validated atoms from the deps, regardless
2034 + of USE conditionals, with USE conditionals inside
2035 + atoms left unevaluated.
2036 + """
2037 + if self._validated_atoms is None:
2038 + self._validate_deps()
2039 + return self._validated_atoms
2040 +
2041 + @property
2042 + def stable(self):
2043 + return self.cpv.stable
2044 +
2045 + @property
2046 + def provides(self):
2047 + self.invalid
2048 + return self._provides
2049 +
2050 + @property
2051 + def requires(self):
2052 + self.invalid
2053 + return self._requires
2054 +
2055 + @classmethod
2056 + def _gen_hash_key(
2057 + cls,
2058 + cpv=None,
2059 + installed=None,
2060 + onlydeps=None,
2061 + operation=None,
2062 + repo_name=None,
2063 + root_config=None,
2064 + type_name=None,
2065 + **kwargs
2066 + ):
2067 +
2068 + if operation is None:
2069 + if installed or onlydeps:
2070 + operation = "nomerge"
2071 + else:
2072 + operation = "merge"
2073 +
2074 + root = None
2075 + if root_config is not None:
2076 + root = root_config.root
2077 + else:
2078 + raise TypeError("root_config argument is required")
2079 +
2080 + elements = [type_name, root, str(cpv), operation]
2081 +
2082 + # For installed (and binary) packages we don't care for the repo
2083 + # when it comes to hashing, because there can only be one cpv.
2084 + # So overwrite the repo_key with type_name.
2085 + if type_name is None:
2086 + raise TypeError("type_name argument is required")
2087 + elif type_name == "ebuild":
2088 + if repo_name is None:
2089 + raise AssertionError(
2090 + "Package._gen_hash_key() " + "called without 'repo_name' argument"
2091 + )
2092 + elements.append(repo_name)
2093 + elif type_name == "binary":
2094 + # Including a variety of fingerprints in the hash makes
2095 + # it possible to simultaneously consider multiple similar
2096 + # packages. Note that digests are not included here, since
2097 + # they are relatively expensive to compute, and they may
2098 + # not necessarily be available.
2099 + elements.extend([cpv.build_id, cpv.file_size, cpv.build_time, cpv.mtime])
2100 + else:
2101 + # For installed (and binary) packages we don't care for the repo
2102 + # when it comes to hashing, because there can only be one cpv.
2103 + # So overwrite the repo_key with type_name.
2104 + elements.append(type_name)
2105 +
2106 + return tuple(elements)
2107 +
2108 + def _validate_deps(self):
2109 + """
2110 + Validate deps. This does not trigger USE calculation since that
2111 + is expensive for ebuilds and therefore we want to avoid doing
2112 + it unnecessarily (like for masked packages).
2113 + """
2114 + eapi = self.eapi
2115 + dep_eapi = eapi
2116 + dep_valid_flag = self.iuse.is_valid_flag
2117 + if self.installed:
2118 + # Ignore EAPI.incompatible and conditionals missing
2119 + # from IUSE for installed packages since these issues
2120 + # aren't relevant now (re-evaluate when new EAPIs are
2121 + # deployed).
2122 + dep_eapi = None
2123 + dep_valid_flag = None
2124 +
2125 + validated_atoms = []
2126 + for k in self._dep_keys:
2127 + v = self._metadata.get(k)
2128 + if not v:
2129 + continue
2130 + try:
2131 + atoms = use_reduce(
2132 + v,
2133 + eapi=dep_eapi,
2134 + matchall=True,
2135 + is_valid_flag=dep_valid_flag,
2136 + token_class=Atom,
2137 + flat=True,
2138 + )
2139 + except InvalidDependString as e:
2140 + self._metadata_exception(k, e)
2141 + else:
2142 + validated_atoms.extend(atoms)
2143 + if not self.built:
2144 + for atom in atoms:
2145 + if not isinstance(atom, Atom):
2146 + continue
2147 + if atom.slot_operator_built:
2148 + e = InvalidDependString(
2149 + _(
2150 + "Improper context for slot-operator "
2151 + '"built" atom syntax: %s'
2152 + )
2153 + % (atom.unevaluated_atom,)
2154 + )
2155 + self._metadata_exception(k, e)
2156 +
2157 + self._validated_atoms = tuple(
2158 + set(atom for atom in validated_atoms if isinstance(atom, Atom))
2159 + )
2160 +
2161 + for k in self._use_conditional_misc_keys:
2162 + v = self._metadata.get(k)
2163 + if not v:
2164 + continue
2165 + try:
2166 + use_reduce(
2167 + v, eapi=dep_eapi, matchall=True, is_valid_flag=dep_valid_flag
2168 + )
2169 + except InvalidDependString as e:
2170 + self._metadata_exception(k, e)
2171 +
2172 + k = "REQUIRED_USE"
2173 + v = self._metadata.get(k)
2174 + if v and not self.built:
2175 + if not _get_eapi_attrs(eapi).required_use:
2176 + self._invalid_metadata(
2177 + "EAPI.incompatible",
2178 + "REQUIRED_USE set, but EAPI='%s' doesn't allow it" % eapi,
2179 + )
2180 + else:
2181 + try:
2182 + check_required_use(v, (), self.iuse.is_valid_flag, eapi=eapi)
2183 + except InvalidDependString as e:
2184 + self._invalid_metadata(k + ".syntax", "%s: %s" % (k, e))
2185 +
2186 + k = "SRC_URI"
2187 + v = self._metadata.get(k)
2188 + if v:
2189 + try:
2190 + use_reduce(
2191 + v,
2192 + is_src_uri=True,
2193 + eapi=eapi,
2194 + matchall=True,
2195 + is_valid_flag=self.iuse.is_valid_flag,
2196 + )
2197 + except InvalidDependString as e:
2198 + if not self.installed:
2199 + self._metadata_exception(k, e)
2200 +
2201 + if self.built:
2202 + k = "PROVIDES"
2203 + try:
2204 + self._provides = frozenset(parse_soname_deps(self._metadata[k]))
2205 + except InvalidData as e:
2206 + self._invalid_metadata(k + ".syntax", "%s: %s" % (k, e))
2207 +
2208 + k = "REQUIRES"
2209 + try:
2210 + self._requires = frozenset(parse_soname_deps(self._metadata[k]))
2211 + except InvalidData as e:
2212 + self._invalid_metadata(k + ".syntax", "%s: %s" % (k, e))
2213 +
2214 + def copy(self):
2215 + return Package(
2216 + built=self.built,
2217 + cpv=self.cpv,
2218 + depth=self.depth,
2219 + installed=self.installed,
2220 + metadata=self._raw_metadata,
2221 + onlydeps=self.onlydeps,
2222 + operation=self.operation,
2223 + root_config=self.root_config,
2224 + type_name=self.type_name,
2225 + )
2226 +
2227 + def _eval_masks(self):
2228 + masks = {}
2229 + settings = self.root_config.settings
2230 +
2231 + if self.invalid is not False:
2232 + masks["invalid"] = self.invalid
2233 +
2234 + if not settings._accept_chost(self.cpv, self._metadata):
2235 + masks["CHOST"] = self._metadata["CHOST"]
2236 +
2237 + eapi = self.eapi
2238 + if not portage.eapi_is_supported(eapi):
2239 + masks["EAPI.unsupported"] = eapi
2240 + if portage._eapi_is_deprecated(eapi):
2241 + masks["EAPI.deprecated"] = eapi
2242 +
2243 + missing_keywords = settings._getMissingKeywords(self.cpv, self._metadata)
2244 + if missing_keywords:
2245 + masks["KEYWORDS"] = missing_keywords
2246 +
2247 + try:
2248 + missing_properties = settings._getMissingProperties(
2249 + self.cpv, self._metadata
2250 + )
2251 + if missing_properties:
2252 + masks["PROPERTIES"] = missing_properties
2253 + except InvalidDependString:
2254 + # already recorded as 'invalid'
2255 + pass
2256 +
2257 + try:
2258 + missing_restricts = settings._getMissingRestrict(self.cpv, self._metadata)
2259 + if missing_restricts:
2260 + masks["RESTRICT"] = missing_restricts
2261 + except InvalidDependString:
2262 + # already recorded as 'invalid'
2263 + pass
2264 +
2265 + mask_atom = settings._getMaskAtom(self.cpv, self._metadata)
2266 + if mask_atom is not None:
2267 + masks["package.mask"] = mask_atom
2268 +
2269 + try:
2270 + missing_licenses = settings._getMissingLicenses(self.cpv, self._metadata)
2271 + if missing_licenses:
2272 + masks["LICENSE"] = missing_licenses
2273 + except InvalidDependString:
2274 + # already recorded as 'invalid'
2275 + pass
2276 +
2277 + if not masks:
2278 + masks = False
2279 +
2280 + return masks
2281 +
2282 + def _eval_visiblity(self, masks):
2283 +
2284 + if masks is not False:
2285 +
2286 + if "EAPI.unsupported" in masks:
2287 + return False
2288 +
2289 + if "invalid" in masks:
2290 + return False
2291 +
2292 + if not self.installed and (
2293 + "CHOST" in masks
2294 + or "EAPI.deprecated" in masks
2295 + or "KEYWORDS" in masks
2296 + or "PROPERTIES" in masks
2297 + or "RESTRICT" in masks
2298 + ):
2299 + return False
2300 +
2301 + if "package.mask" in masks or "LICENSE" in masks:
2302 + return False
2303 +
2304 + return True
2305 +
2306 + def get_keyword_mask(self):
2307 + """returns None, 'missing', or 'unstable'."""
2308 +
2309 + missing = self.root_config.settings._getRawMissingKeywords(
2310 + self.cpv, self._metadata
2311 + )
2312 +
2313 + if not missing:
2314 + return None
2315 +
2316 + if "**" in missing:
2317 + return "missing"
2318 +
2319 + global_accept_keywords = frozenset(
2320 + self.root_config.settings.get("ACCEPT_KEYWORDS", "").split()
2321 + )
2322 +
2323 + for keyword in missing:
2324 + if keyword.lstrip("~") in global_accept_keywords:
2325 + return "unstable"
2326 +
2327 + return "missing"
2328 +
2329 + def isHardMasked(self):
2330 + """returns a bool if the cpv is in the list of
2331 + expanded pmaskdict[cp] available ebuilds"""
2332 + pmask = self.root_config.settings._getRawMaskAtom(self.cpv, self._metadata)
2333 + return pmask is not None
2334 +
2335 + def _metadata_exception(self, k, e):
2336 +
2337 + if k.endswith("DEPEND"):
2338 + qacat = "dependency.syntax"
2339 + else:
2340 + qacat = k + ".syntax"
2341 +
2342 + if not self.installed:
2343 + categorized_error = False
2344 + if e.errors:
2345 + for error in e.errors:
2346 + if getattr(error, "category", None) is None:
2347 + continue
2348 + categorized_error = True
2349 + self._invalid_metadata(error.category, "%s: %s" % (k, error))
2350 +
2351 + if not categorized_error:
2352 + self._invalid_metadata(qacat, "%s: %s" % (k, e))
2353 + else:
2354 + # For installed packages, show the path of the file
2355 + # containing the invalid metadata, since the user may
2356 + # want to fix the deps by hand.
2357 + vardb = self.root_config.trees["vartree"].dbapi
2358 + path = vardb.getpath(self.cpv, filename=k)
2359 + self._invalid_metadata(qacat, "%s: %s in '%s'" % (k, e, path))
2360 +
2361 + def _invalid_metadata(self, msg_type, msg):
2362 + if self._invalid is None:
2363 + self._invalid = {}
2364 + msgs = self._invalid.get(msg_type)
2365 + if msgs is None:
2366 + msgs = []
2367 + self._invalid[msg_type] = msgs
2368 + msgs.append(msg)
2369 +
2370 + def __str__(self):
2371 + if self.operation == "merge":
2372 + if self.type_name == "binary":
2373 + cpv_color = "PKG_BINARY_MERGE"
2374 + else:
2375 + cpv_color = "PKG_MERGE"
2376 + elif self.operation == "uninstall":
2377 + cpv_color = "PKG_UNINSTALL"
2378 + else:
2379 + cpv_color = "PKG_NOMERGE"
2380 +
2381 + build_id_str = ""
2382 + if isinstance(self.cpv.build_id, int) and self.cpv.build_id > 0:
2383 + build_id_str = "-%s" % self.cpv.build_id
2384 +
2385 + s = "(%s, %s" % (
2386 + portage.output.colorize(
2387 + cpv_color,
2388 + self.cpv
2389 + + build_id_str
2390 + + _slot_separator
2391 + + self.slot
2392 + + "/"
2393 + + self.sub_slot
2394 + + _repo_separator
2395 + + self.repo,
2396 + ),
2397 + self.type_name,
2398 + )
2399 +
2400 + if self.type_name == "installed":
2401 + if self.root_config.settings["ROOT"] != "/":
2402 + s += " in '%s'" % self.root_config.settings["ROOT"]
2403 + if self.operation == "uninstall":
2404 + s += " scheduled for uninstall"
2405 + else:
2406 + if self.operation == "merge":
2407 + s += " scheduled for merge"
2408 + if self.root_config.settings["ROOT"] != "/":
2409 + s += " to '%s'" % self.root_config.settings["ROOT"]
2410 + s += ")"
2411 + return s
2412 +
2413 + class _use_class:
2414 +
2415 + __slots__ = ("enabled", "_expand", "_expand_hidden", "_force", "_pkg", "_mask")
2416 +
2417 + # Share identical frozenset instances when available.
2418 + _frozensets = {}
2419 +
2420 + def __init__(self, pkg, enabled_flags):
2421 + self._pkg = pkg
2422 + self._expand = None
2423 + self._expand_hidden = None
2424 + self._force = None
2425 + self._mask = None
2426 + if eapi_has_use_aliases(pkg.eapi):
2427 + for enabled_flag in enabled_flags:
2428 + enabled_flags.extend(pkg.iuse.alias_mapping.get(enabled_flag, []))
2429 + self.enabled = frozenset(enabled_flags)
2430 + if pkg.built:
2431 + # Use IUSE to validate USE settings for built packages,
2432 + # in case the package manager that built this package
2433 + # failed to do that for some reason (or in case of
2434 + # data corruption).
2435 + missing_iuse = pkg.iuse.get_missing_iuse(self.enabled)
2436 + if missing_iuse:
2437 + self.enabled = self.enabled.difference(missing_iuse)
2438 +
2439 + def _init_force_mask(self):
2440 + pkgsettings = self._pkg._get_pkgsettings()
2441 + frozensets = self._frozensets
2442 + s = frozenset(pkgsettings.get("USE_EXPAND", "").lower().split())
2443 + self._expand = frozensets.setdefault(s, s)
2444 + s = frozenset(pkgsettings.get("USE_EXPAND_HIDDEN", "").lower().split())
2445 + self._expand_hidden = frozensets.setdefault(s, s)
2446 + s = pkgsettings.useforce
2447 + self._force = frozensets.setdefault(s, s)
2448 + s = pkgsettings.usemask
2449 + self._mask = frozensets.setdefault(s, s)
2450 +
2451 + @property
2452 + def expand(self):
2453 + if self._expand is None:
2454 + self._init_force_mask()
2455 + return self._expand
2456 +
2457 + @property
2458 + def expand_hidden(self):
2459 + if self._expand_hidden is None:
2460 + self._init_force_mask()
2461 + return self._expand_hidden
2462 +
2463 + @property
2464 + def force(self):
2465 + if self._force is None:
2466 + self._init_force_mask()
2467 + return self._force
2468 +
2469 + @property
2470 + def mask(self):
2471 + if self._mask is None:
2472 + self._init_force_mask()
2473 + return self._mask
2474 +
2475 + @property
2476 + def repo(self):
2477 + return self._metadata["repository"]
2478 +
2479 + @property
2480 + def repo_priority(self):
2481 + repo_info = self.root_config.settings.repositories.prepos.get(self.repo)
2482 + if repo_info is None:
2483 + return None
2484 + return repo_info.priority
2485 +
2486 + @property
2487 + def use(self):
2488 + if self._use is None:
2489 + self._init_use()
2490 + return self._use
2491 +
2492 + def _get_pkgsettings(self):
2493 + pkgsettings = self.root_config.trees["porttree"].dbapi.doebuild_settings
2494 + pkgsettings.setcpv(self)
2495 + return pkgsettings
2496 +
2497 + def _init_use(self):
2498 + if self.built:
2499 + # Use IUSE to validate USE settings for built packages,
2500 + # in case the package manager that built this package
2501 + # failed to do that for some reason (or in case of
2502 + # data corruption). The enabled flags must be consistent
2503 + # with implicit IUSE, in order to avoid potential
2504 + # inconsistencies in USE dep matching (see bug #453400).
2505 + use_str = self._metadata["USE"]
2506 + is_valid_flag = self.iuse.is_valid_flag
2507 + enabled_flags = [x for x in use_str.split() if is_valid_flag(x)]
2508 + use_str = " ".join(enabled_flags)
2509 + self._use = self._use_class(self, enabled_flags)
2510 + else:
2511 + try:
2512 + use_str = _PackageMetadataWrapperBase.__getitem__(self._metadata, "USE")
2513 + except KeyError:
2514 + use_str = None
2515 + calculated_use = False
2516 + if not use_str:
2517 + use_str = self._get_pkgsettings()["PORTAGE_USE"]
2518 + calculated_use = True
2519 + self._use = self._use_class(self, use_str.split())
2520 + # Initialize these now, since USE access has just triggered
2521 + # setcpv, and we want to cache the result of the force/mask
2522 + # calculations that were done.
2523 + if calculated_use:
2524 + self._use._init_force_mask()
2525 +
2526 + _PackageMetadataWrapperBase.__setitem__(self._metadata, "USE", use_str)
2527 +
2528 + return use_str
2529 +
2530 + class _iuse:
2531 +
2532 + __slots__ = (
2533 + "__weakref__",
2534 + "_iuse_implicit_match",
2535 + "_pkg",
2536 + "alias_mapping",
2537 + "all",
2538 + "all_aliases",
2539 + "enabled",
2540 + "disabled",
2541 + "tokens",
2542 + )
2543 +
2544 + def __init__(self, pkg, tokens, iuse_implicit_match, aliases, eapi):
2545 + self._pkg = pkg
2546 + self.tokens = tuple(tokens)
2547 + self._iuse_implicit_match = iuse_implicit_match
2548 + enabled = []
2549 + disabled = []
2550 + other = []
2551 + enabled_aliases = []
2552 + disabled_aliases = []
2553 + other_aliases = []
2554 + aliases_supported = eapi_has_use_aliases(eapi)
2555 + self.alias_mapping = {}
2556 + for x in tokens:
2557 + prefix = x[:1]
2558 + if prefix == "+":
2559 + enabled.append(x[1:])
2560 + if aliases_supported:
2561 + self.alias_mapping[x[1:]] = aliases.get(x[1:], [])
2562 + enabled_aliases.extend(self.alias_mapping[x[1:]])
2563 + elif prefix == "-":
2564 + disabled.append(x[1:])
2565 + if aliases_supported:
2566 + self.alias_mapping[x[1:]] = aliases.get(x[1:], [])
2567 + disabled_aliases.extend(self.alias_mapping[x[1:]])
2568 + else:
2569 + other.append(x)
2570 + if aliases_supported:
2571 + self.alias_mapping[x] = aliases.get(x, [])
2572 + other_aliases.extend(self.alias_mapping[x])
2573 + self.enabled = frozenset(chain(enabled, enabled_aliases))
2574 + self.disabled = frozenset(chain(disabled, disabled_aliases))
2575 + self.all = frozenset(chain(enabled, disabled, other))
2576 + self.all_aliases = frozenset(
2577 + chain(enabled_aliases, disabled_aliases, other_aliases)
2578 + )
2579 +
2580 + def is_valid_flag(self, flags):
2581 + """
2582 + @return: True if all flags are valid USE values which may
2583 + be specified in USE dependencies, False otherwise.
2584 + """
2585 + if isinstance(flags, str):
2586 + flags = [flags]
2587 +
2588 + for flag in flags:
2589 + if (
2590 + not flag in self.all
2591 + and not flag in self.all_aliases
2592 + and not self._iuse_implicit_match(flag)
2593 + ):
2594 + return False
2595 + return True
2596 +
2597 + def get_missing_iuse(self, flags):
2598 + """
2599 + @return: A list of flags missing from IUSE.
2600 + """
2601 + if isinstance(flags, str):
2602 + flags = [flags]
2603 + missing_iuse = []
2604 + for flag in flags:
2605 + if (
2606 + not flag in self.all
2607 + and not flag in self.all_aliases
2608 + and not self._iuse_implicit_match(flag)
2609 + ):
2610 + missing_iuse.append(flag)
2611 + return missing_iuse
2612 +
2613 + def get_real_flag(self, flag):
2614 + """
2615 + Returns the flag's name within the scope of this package
2616 + (accounting for aliases), or None if the flag is unknown.
2617 + """
2618 + if flag in self.all:
2619 + return flag
2620 +
2621 + if flag in self.all_aliases:
2622 + for k, v in self.alias_mapping.items():
2623 + if flag in v:
2624 + return k
2625 +
2626 + if self._iuse_implicit_match(flag):
2627 + return flag
2628 +
2629 + return None
2630 +
2631 + def __len__(self):
2632 + return 4
2633 +
2634 + def __iter__(self):
2635 + """
2636 + This is used to generate mtimedb resume mergelist entries, so we
2637 + limit it to 4 items for backward compatibility.
2638 + """
2639 + return iter(self._hash_key[:4])
2640 +
2641 + def __lt__(self, other):
2642 + if other.cp != self.cp:
2643 + return self.cp < other.cp
2644 + result = portage.vercmp(self.version, other.version)
2645 + if result < 0:
2646 + return True
2647 + if result == 0 and self.built and other.built:
2648 + return self.build_time < other.build_time
2649 + return False
2650 +
2651 + def __le__(self, other):
2652 + if other.cp != self.cp:
2653 + return self.cp <= other.cp
2654 + result = portage.vercmp(self.version, other.version)
2655 + if result <= 0:
2656 + return True
2657 + if result == 0 and self.built and other.built:
2658 + return self.build_time <= other.build_time
2659 + return False
2660 +
2661 + def __gt__(self, other):
2662 + if other.cp != self.cp:
2663 + return self.cp > other.cp
2664 + result = portage.vercmp(self.version, other.version)
2665 + if result > 0:
2666 + return True
2667 + if result == 0 and self.built and other.built:
2668 + return self.build_time > other.build_time
2669 + return False
2670 +
2671 + def __ge__(self, other):
2672 + if other.cp != self.cp:
2673 + return self.cp >= other.cp
2674 + result = portage.vercmp(self.version, other.version)
2675 + if result >= 0:
2676 + return True
2677 + if result == 0 and self.built and other.built:
2678 + return self.build_time >= other.build_time
2679 + return False
2680 +
2681 + def with_use(self, use):
2682 + """
2683 + Return an Package instance with the specified USE flags. The
2684 + current instance may be returned if it has identical USE flags.
2685 + @param use: a set of USE flags
2686 + @type use: frozenset
2687 + @return: A package with the specified USE flags
2688 + @rtype: Package
2689 + """
2690 + if use is not self.use.enabled:
2691 + pkg = self.copy()
2692 + pkg._metadata["USE"] = " ".join(use)
2693 + else:
2694 + pkg = self
2695 + return pkg
2696 +
2697 +
2698 + _all_metadata_keys = set(x for x in portage.auxdbkeys)
2699 _all_metadata_keys.update(Package.metadata_keys)
2700 _all_metadata_keys = frozenset(_all_metadata_keys)
2701
2702 diff --cc lib/_emerge/actions.py
2703 index 7eea1c93a,515b22b66..0ed90cd71
2704 --- a/lib/_emerge/actions.py
2705 +++ b/lib/_emerge/actions.py
2706 @@@ -2479,151 -2842,124 +2843,153 @@@ def getportageversion(portdir, _unused
2707
2708 class _emerge_config(SlotObject):
2709
2710 - __slots__ = ('action', 'args', 'opts',
2711 - 'running_config', 'target_config', 'trees')
2712 + __slots__ = ("action", "args", "opts", "running_config", "target_config", "trees")
2713
2714 - # Support unpack as tuple, for load_emerge_config backward compatibility.
2715 - def __iter__(self):
2716 - yield self.target_config.settings
2717 - yield self.trees
2718 - yield self.target_config.mtimedb
2719 + # Support unpack as tuple, for load_emerge_config backward compatibility.
2720 + def __iter__(self):
2721 + yield self.target_config.settings
2722 + yield self.trees
2723 + yield self.target_config.mtimedb
2724
2725 - def __getitem__(self, index):
2726 - return list(self)[index]
2727 + def __getitem__(self, index):
2728 + return list(self)[index]
2729
2730 - def __len__(self):
2731 - return 3
2732 + def __len__(self):
2733 + return 3
2734
2735 - def load_emerge_config(emerge_config=None, env=None, **kargs):
2736
2737 - if emerge_config is None:
2738 - emerge_config = _emerge_config(**kargs)
2739 -
2740 - env = os.environ if env is None else env
2741 - kwargs = {'env': env}
2742 - for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT"),
2743 - ("sysroot", "SYSROOT"), ("eprefix", "EPREFIX")):
2744 - v = env.get(envvar)
2745 - if v is not None:
2746 - kwargs[k] = v
2747 - emerge_config.trees = portage.create_trees(trees=emerge_config.trees,
2748 - **kwargs)
2749 -
2750 - for root_trees in emerge_config.trees.values():
2751 - settings = root_trees["vartree"].settings
2752 - settings._init_dirs()
2753 - setconfig = load_default_config(settings, root_trees)
2754 - root_config = RootConfig(settings, root_trees, setconfig)
2755 - if "root_config" in root_trees:
2756 - # Propagate changes to the existing instance,
2757 - # which may be referenced by a depgraph.
2758 - root_trees["root_config"].update(root_config)
2759 - else:
2760 - root_trees["root_config"] = root_config
2761 + def load_emerge_config(emerge_config=None, env=None, **kargs):
2762
2763 - target_eroot = emerge_config.trees._target_eroot
2764 - emerge_config.target_config = \
2765 - emerge_config.trees[target_eroot]['root_config']
2766 - emerge_config.target_config.mtimedb = portage.MtimeDB(
2767 - os.path.join(target_eroot, portage.CACHE_PATH, "mtimedb"))
2768 - emerge_config.running_config = emerge_config.trees[
2769 - emerge_config.trees._running_eroot]['root_config']
2770 - QueryCommand._db = emerge_config.trees
2771 + if emerge_config is None:
2772 + emerge_config = _emerge_config(**kargs)
2773 +
2774 + env = os.environ if env is None else env
2775 + kwargs = {"env": env}
2776 + for k, envvar in (
2777 + ("config_root", "PORTAGE_CONFIGROOT"),
2778 + ("target_root", "ROOT"),
2779 + ("sysroot", "SYSROOT"),
2780 + ("eprefix", "EPREFIX"),
2781 + ):
2782 + v = env.get(envvar)
2783 + if v is not None:
2784 + kwargs[k] = v
2785 + emerge_config.trees = portage.create_trees(trees=emerge_config.trees, **kwargs)
2786 +
2787 + for root_trees in emerge_config.trees.values():
2788 + settings = root_trees["vartree"].settings
2789 + settings._init_dirs()
2790 + setconfig = load_default_config(settings, root_trees)
2791 + root_config = RootConfig(settings, root_trees, setconfig)
2792 + if "root_config" in root_trees:
2793 + # Propagate changes to the existing instance,
2794 + # which may be referenced by a depgraph.
2795 + root_trees["root_config"].update(root_config)
2796 + else:
2797 + root_trees["root_config"] = root_config
2798 +
2799 + target_eroot = emerge_config.trees._target_eroot
2800 + emerge_config.target_config = emerge_config.trees[target_eroot]["root_config"]
2801 + emerge_config.target_config.mtimedb = portage.MtimeDB(
2802 + os.path.join(target_eroot, portage.CACHE_PATH, "mtimedb")
2803 + )
2804 + emerge_config.running_config = emerge_config.trees[
2805 + emerge_config.trees._running_eroot
2806 + ]["root_config"]
2807 + QueryCommand._db = emerge_config.trees
2808 +
2809 + return emerge_config
2810
2811 - return emerge_config
2812
2813 def getgccversion(chost=None):
2814 - """
2815 - rtype: C{str}
2816 - return: the current in-use gcc version
2817 - """
2818 + """
2819 + rtype: C{str}
2820 + return: the current in-use gcc version
2821 + """
2822
2823 - gcc_ver_command = ['gcc', '-dumpversion']
2824 - gcc_ver_prefix = 'gcc-'
2825 + gcc_ver_command = ["gcc", "-dumpversion"]
2826 + gcc_ver_prefix = "gcc-"
2827
2828 + clang_ver_command = ['clang', '--version']
2829 + clang_ver_prefix = 'clang-'
2830 +
2831 + ubinpath = os.path.join('/', portage.const.EPREFIX, 'usr', 'bin')
2832 +
2833 - gcc_not_found_error = red(
2834 - "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
2835 - "!!! to update the environment of this terminal and possibly\n" +
2836 - "!!! other terminals also.\n"
2837 - )
2838 + gcc_not_found_error = red(
2839 + "!!! No gcc found. You probably need to 'source /etc/profile'\n"
2840 + + "!!! to update the environment of this terminal and possibly\n"
2841 + + "!!! other terminals also.\n"
2842 + )
2843
2844 + def getclangversion(output):
2845 + version = re.search('clang version ([0-9.]+) ', output)
2846 + if version:
2847 + return version.group(1)
2848 + return "unknown"
2849 +
2850 - if chost:
2851 - try:
2852 - proc = subprocess.Popen(["gcc-config", "-c"],
2853 - stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
2854 - except OSError:
2855 - myoutput = None
2856 - mystatus = 1
2857 - else:
2858 - myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
2859 - mystatus = proc.wait()
2860 - if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
2861 - return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
2862 + if chost:
2863 + try:
2864 + proc = subprocess.Popen(
2865 - ["gcc-config", "-c"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT
2866 ++ [ubinpath + "/" + "gcc-config", "-c"],
2867 ++ stdout=subprocess.PIPE, stderr=subprocess.STDOUT
2868 + )
2869 + except OSError:
2870 + myoutput = None
2871 + mystatus = 1
2872 + else:
2873 + myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
2874 + mystatus = proc.wait()
2875 + if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
2876 + return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
2877 +
2878 + try:
2879 + proc = subprocess.Popen(
2880 - [chost + "-" + gcc_ver_command[0]] + gcc_ver_command[1:],
2881 ++ [ubinpath + "/" + chost + "-" + gcc_ver_command[0]]
2882 ++ + gcc_ver_command[1:],
2883 + stdout=subprocess.PIPE,
2884 + stderr=subprocess.STDOUT,
2885 + )
2886 + except OSError:
2887 + myoutput = None
2888 + mystatus = 1
2889 + else:
2890 + myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
2891 + mystatus = proc.wait()
2892 + if mystatus == os.EX_OK:
2893 + return gcc_ver_prefix + myoutput
2894
2895 + try:
2896 + proc = subprocess.Popen(
2897 - [chost + "-" + gcc_ver_command[0]] + gcc_ver_command[1:],
2898 - stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
2899 - except OSError:
2900 - myoutput = None
2901 - mystatus = 1
2902 - else:
2903 - myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
2904 - mystatus = proc.wait()
2905 - if mystatus == os.EX_OK:
2906 - return gcc_ver_prefix + myoutput
2907 -
2908 - try:
2909 - proc = subprocess.Popen([ubinpath + "/" + gcc_ver_command[0]] + gcc_ver_command[1:],
2910 - stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
2911 - except OSError:
2912 - myoutput = None
2913 - mystatus = 1
2914 - else:
2915 - myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
2916 - mystatus = proc.wait()
2917 - if mystatus == os.EX_OK:
2918 - return gcc_ver_prefix + myoutput
2919 -
2920 - if chost:
2921 - try:
2922 - proc = subprocess.Popen(
2923 - [ubinpath + "/" + chost + "-" + clang_ver_command[0]] + clang_ver_command[1:],
2924 - stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
2925 ++ [ubinpath + "/" + chost + "-" + clang_ver_command[0]]
2926 ++ + clang_ver_command[1:],
2927 ++ stdout=subprocess.PIPE,
2928 ++ stderr=subprocess.STDOUT,
2929 ++ )
2930 + except OSError:
2931 + myoutput = None
2932 + mystatus = 1
2933 + else:
2934 + myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
2935 + mystatus = proc.wait()
2936 + if mystatus == os.EX_OK:
2937 + return clang_ver_prefix + getclangversion(myoutput)
2938 +
2939 - try:
2940 - proc = subprocess.Popen([ubinpath + "/" + clang_ver_command[0]] + clang_ver_command[1:],
2941 - stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
2942 - except OSError:
2943 - myoutput = None
2944 - mystatus = 1
2945 - else:
2946 - myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
2947 - mystatus = proc.wait()
2948 - if mystatus == os.EX_OK:
2949 - return clang_ver_prefix + getclangversion(myoutput)
2950 -
2951 - portage.writemsg(gcc_not_found_error, noiselevel=-1)
2952 - return "[unavailable]"
2953 + try:
2954 + proc = subprocess.Popen(
2955 + gcc_ver_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
2956 + )
2957 + except OSError:
2958 + myoutput = None
2959 + mystatus = 1
2960 + else:
2961 + myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
2962 + mystatus = proc.wait()
2963 + if mystatus == os.EX_OK:
2964 + return gcc_ver_prefix + myoutput
2965 +
2966 + portage.writemsg(gcc_not_found_error, noiselevel=-1)
2967 + return "[unavailable]"
2968 +
2969
2970 # Warn about features that may confuse users and
2971 # lead them to report invalid bugs.
2972 diff --cc lib/_emerge/depgraph.py
2973 index 4d578d557,f6549eba6..61be9d02b
2974 --- a/lib/_emerge/depgraph.py
2975 +++ b/lib/_emerge/depgraph.py
2976 @@@ -10083,234 -11596,273 +11596,283 @@@ def _backtrack_depgraph(settings, trees
2977
2978
2979 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
2980 - """
2981 - Raises PackageSetNotFound if myfiles contains a missing package set.
2982 - """
2983 - _spinner_start(spinner, myopts)
2984 - try:
2985 - return _resume_depgraph(settings, trees, mtimedb, myopts,
2986 - myparams, spinner)
2987 - finally:
2988 - _spinner_stop(spinner)
2989 + """
2990 + Raises PackageSetNotFound if myfiles contains a missing package set.
2991 + """
2992 + _spinner_start(spinner, myopts)
2993 + try:
2994 + return _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner)
2995 + finally:
2996 + _spinner_stop(spinner)
2997 +
2998
2999 def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
3000 - """
3001 - Construct a depgraph for the given resume list. This will raise
3002 - PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
3003 - TODO: Return reasons for dropped_tasks, for display/logging.
3004 - @rtype: tuple
3005 - @return: (success, depgraph, dropped_tasks)
3006 - """
3007 - skip_masked = True
3008 - skip_unsatisfied = True
3009 - mergelist = mtimedb["resume"]["mergelist"]
3010 - dropped_tasks = {}
3011 - frozen_config = _frozen_depgraph_config(settings, trees,
3012 - myopts, myparams, spinner)
3013 - while True:
3014 - mydepgraph = depgraph(settings, trees,
3015 - myopts, myparams, spinner, frozen_config=frozen_config)
3016 - try:
3017 - success = mydepgraph._loadResumeCommand(mtimedb["resume"],
3018 - skip_masked=skip_masked)
3019 - except depgraph.UnsatisfiedResumeDep as e:
3020 - if not skip_unsatisfied:
3021 - raise
3022 -
3023 - graph = mydepgraph._dynamic_config.digraph
3024 - unsatisfied_parents = {}
3025 - traversed_nodes = set()
3026 - unsatisfied_stack = [(dep.parent, dep.atom) for dep in e.value]
3027 - while unsatisfied_stack:
3028 - pkg, atom = unsatisfied_stack.pop()
3029 - if atom is not None and \
3030 - mydepgraph._select_pkg_from_installed(
3031 - pkg.root, atom)[0] is not None:
3032 - continue
3033 - atoms = unsatisfied_parents.get(pkg)
3034 - if atoms is None:
3035 - atoms = []
3036 - unsatisfied_parents[pkg] = atoms
3037 - if atom is not None:
3038 - atoms.append(atom)
3039 - if pkg in traversed_nodes:
3040 - continue
3041 - traversed_nodes.add(pkg)
3042 -
3043 - # If this package was pulled in by a parent
3044 - # package scheduled for merge, removing this
3045 - # package may cause the parent package's
3046 - # dependency to become unsatisfied.
3047 - for parent_node, atom in \
3048 - mydepgraph._dynamic_config._parent_atoms.get(pkg, []):
3049 - if not isinstance(parent_node, Package) \
3050 - or parent_node.operation not in ("merge", "nomerge"):
3051 - continue
3052 - # We need to traverse all priorities here, in order to
3053 - # ensure that a package with an unsatisfied depenedency
3054 - # won't get pulled in, even indirectly via a soft
3055 - # dependency.
3056 - unsatisfied_stack.append((parent_node, atom))
3057 -
3058 - unsatisfied_tuples = frozenset(tuple(parent_node)
3059 - for parent_node in unsatisfied_parents
3060 - if isinstance(parent_node, Package))
3061 - pruned_mergelist = []
3062 - for x in mergelist:
3063 - if isinstance(x, list) and \
3064 - tuple(x) not in unsatisfied_tuples:
3065 - pruned_mergelist.append(x)
3066 -
3067 - # If the mergelist doesn't shrink then this loop is infinite.
3068 - if len(pruned_mergelist) == len(mergelist):
3069 - # This happens if a package can't be dropped because
3070 - # it's already installed, but it has unsatisfied PDEPEND.
3071 - raise
3072 - mergelist[:] = pruned_mergelist
3073 -
3074 - # Exclude installed packages that have been removed from the graph due
3075 - # to failure to build/install runtime dependencies after the dependent
3076 - # package has already been installed.
3077 - dropped_tasks.update((pkg, atoms) for pkg, atoms in \
3078 - unsatisfied_parents.items() if pkg.operation != "nomerge")
3079 -
3080 - del e, graph, traversed_nodes, \
3081 - unsatisfied_parents, unsatisfied_stack
3082 - continue
3083 - else:
3084 - break
3085 - return (success, mydepgraph, dropped_tasks)
3086 -
3087 - def get_mask_info(root_config, cpv, pkgsettings,
3088 - db, pkg_type, built, installed, db_keys, myrepo = None, _pkg_use_enabled=None):
3089 - try:
3090 - metadata = dict(zip(db_keys,
3091 - db.aux_get(cpv, db_keys, myrepo=myrepo)))
3092 - except KeyError:
3093 - metadata = None
3094 -
3095 - if metadata is None:
3096 - mreasons = ["corruption"]
3097 - else:
3098 - eapi = metadata['EAPI']
3099 - if not portage.eapi_is_supported(eapi):
3100 - mreasons = ['EAPI %s' % eapi]
3101 - else:
3102 - pkg = Package(type_name=pkg_type, root_config=root_config,
3103 - cpv=cpv, built=built, installed=installed, metadata=metadata)
3104 -
3105 - modified_use = None
3106 - if _pkg_use_enabled is not None:
3107 - modified_use = _pkg_use_enabled(pkg)
3108 -
3109 - mreasons = get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=modified_use)
3110 -
3111 - return metadata, mreasons
3112 + """
3113 + Construct a depgraph for the given resume list. This will raise
3114 + PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
3115 + TODO: Return reasons for dropped_tasks, for display/logging.
3116 + @rtype: tuple
3117 + @return: (success, depgraph, dropped_tasks)
3118 + """
3119 + skip_masked = True
3120 + skip_unsatisfied = True
3121 + mergelist = mtimedb["resume"]["mergelist"]
3122 + dropped_tasks = {}
3123 + frozen_config = _frozen_depgraph_config(settings, trees, myopts, myparams, spinner)
3124 + while True:
3125 + mydepgraph = depgraph(
3126 + settings, trees, myopts, myparams, spinner, frozen_config=frozen_config
3127 + )
3128 + try:
3129 + success = mydepgraph._loadResumeCommand(
3130 + mtimedb["resume"], skip_masked=skip_masked
3131 + )
3132 + except depgraph.UnsatisfiedResumeDep as e:
3133 + if not skip_unsatisfied:
3134 + raise
3135 +
3136 + graph = mydepgraph._dynamic_config.digraph
3137 + unsatisfied_parents = {}
3138 + traversed_nodes = set()
3139 + unsatisfied_stack = [(dep.parent, dep.atom) for dep in e.value]
3140 + while unsatisfied_stack:
3141 + pkg, atom = unsatisfied_stack.pop()
3142 + if (
3143 + atom is not None
3144 + and mydepgraph._select_pkg_from_installed(pkg.root, atom)[0]
3145 + is not None
3146 + ):
3147 + continue
3148 + atoms = unsatisfied_parents.get(pkg)
3149 + if atoms is None:
3150 + atoms = []
3151 + unsatisfied_parents[pkg] = atoms
3152 + if atom is not None:
3153 + atoms.append(atom)
3154 + if pkg in traversed_nodes:
3155 + continue
3156 + traversed_nodes.add(pkg)
3157 +
3158 + # If this package was pulled in by a parent
3159 + # package scheduled for merge, removing this
3160 + # package may cause the parent package's
3161 + # dependency to become unsatisfied.
3162 + for parent_node, atom in mydepgraph._dynamic_config._parent_atoms.get(
3163 + pkg, []
3164 + ):
3165 + if not isinstance(
3166 + parent_node, Package
3167 + ) or parent_node.operation not in ("merge", "nomerge"):
3168 + continue
3169 + # We need to traverse all priorities here, in order to
3170 + # ensure that a package with an unsatisfied depenedency
3171 + # won't get pulled in, even indirectly via a soft
3172 + # dependency.
3173 + unsatisfied_stack.append((parent_node, atom))
3174 +
3175 + unsatisfied_tuples = frozenset(
3176 + tuple(parent_node)
3177 + for parent_node in unsatisfied_parents
3178 + if isinstance(parent_node, Package)
3179 + )
3180 + pruned_mergelist = []
3181 + for x in mergelist:
3182 + if isinstance(x, list) and tuple(x) not in unsatisfied_tuples:
3183 + pruned_mergelist.append(x)
3184 +
3185 + # If the mergelist doesn't shrink then this loop is infinite.
3186 + if len(pruned_mergelist) == len(mergelist):
3187 + # This happens if a package can't be dropped because
3188 + # it's already installed, but it has unsatisfied PDEPEND.
3189 + raise
3190 + mergelist[:] = pruned_mergelist
3191 +
3192 + # Exclude installed packages that have been removed from the graph due
3193 + # to failure to build/install runtime dependencies after the dependent
3194 + # package has already been installed.
3195 + dropped_tasks.update(
3196 + (pkg, atoms)
3197 + for pkg, atoms in unsatisfied_parents.items()
3198 + if pkg.operation != "nomerge"
3199 + )
3200 +
3201 + del e, graph, traversed_nodes, unsatisfied_parents, unsatisfied_stack
3202 + continue
3203 + else:
3204 + break
3205 + return (success, mydepgraph, dropped_tasks)
3206 +
3207 +
3208 + def get_mask_info(
3209 + root_config,
3210 + cpv,
3211 + pkgsettings,
3212 + db,
3213 + pkg_type,
3214 + built,
3215 + installed,
3216 + db_keys,
3217 + myrepo=None,
3218 + _pkg_use_enabled=None,
3219 + ):
3220 + try:
3221 + metadata = dict(zip(db_keys, db.aux_get(cpv, db_keys, myrepo=myrepo)))
3222 + except KeyError:
3223 + metadata = None
3224 +
3225 + if metadata is None:
3226 + mreasons = ["corruption"]
3227 + else:
3228 + eapi = metadata["EAPI"]
3229 + if not portage.eapi_is_supported(eapi):
3230 + mreasons = ["EAPI %s" % eapi]
3231 + else:
3232 + pkg = Package(
3233 + type_name=pkg_type,
3234 + root_config=root_config,
3235 + cpv=cpv,
3236 + built=built,
3237 + installed=installed,
3238 + metadata=metadata,
3239 + )
3240 +
3241 + modified_use = None
3242 + if _pkg_use_enabled is not None:
3243 + modified_use = _pkg_use_enabled(pkg)
3244 +
3245 + mreasons = get_masking_status(
3246 + pkg, pkgsettings, root_config, myrepo=myrepo, use=modified_use
3247 + )
3248 +
3249 + return metadata, mreasons
3250 +
3251
3252 def show_masked_packages(masked_packages):
3253 - shown_licenses = set()
3254 - shown_comments = set()
3255 - # Maybe there is both an ebuild and a binary. Only
3256 - # show one of them to avoid redundant appearance.
3257 - shown_cpvs = set()
3258 - have_eapi_mask = False
3259 - for (root_config, pkgsettings, cpv, repo,
3260 - metadata, mreasons) in masked_packages:
3261 - output_cpv = cpv
3262 - if repo:
3263 - output_cpv += _repo_separator + repo
3264 - if output_cpv in shown_cpvs:
3265 - continue
3266 - shown_cpvs.add(output_cpv)
3267 - eapi_masked = metadata is not None and \
3268 - not portage.eapi_is_supported(metadata["EAPI"])
3269 - if eapi_masked:
3270 - have_eapi_mask = True
3271 - # When masked by EAPI, metadata is mostly useless since
3272 - # it doesn't contain essential things like SLOT.
3273 - metadata = None
3274 - comment, filename = None, None
3275 - if not eapi_masked and \
3276 - "package.mask" in mreasons:
3277 - comment, filename = \
3278 - portage.getmaskingreason(
3279 - cpv, metadata=metadata,
3280 - settings=pkgsettings,
3281 - portdb=root_config.trees["porttree"].dbapi,
3282 - return_location=True)
3283 - missing_licenses = []
3284 - if not eapi_masked and metadata is not None:
3285 - try:
3286 - missing_licenses = \
3287 - pkgsettings._getMissingLicenses(
3288 - cpv, metadata)
3289 - except portage.exception.InvalidDependString:
3290 - # This will have already been reported
3291 - # above via mreasons.
3292 - pass
3293 -
3294 - writemsg("- "+output_cpv+" (masked by: "+", ".join(mreasons)+")\n",
3295 - noiselevel=-1)
3296 -
3297 - if comment and comment not in shown_comments:
3298 - writemsg(filename + ":\n" + comment + "\n",
3299 - noiselevel=-1)
3300 - shown_comments.add(comment)
3301 - portdb = root_config.trees["porttree"].dbapi
3302 - for l in missing_licenses:
3303 - if l in shown_licenses:
3304 - continue
3305 - l_path = portdb.findLicensePath(l)
3306 - if l_path is None:
3307 - continue
3308 - msg = ("A copy of the '%s' license" + \
3309 - " is located at '%s'.\n\n") % (l, l_path)
3310 - writemsg(msg, noiselevel=-1)
3311 - shown_licenses.add(l)
3312 - return have_eapi_mask
3313 + shown_licenses = set()
3314 + shown_comments = set()
3315 + # Maybe there is both an ebuild and a binary. Only
3316 + # show one of them to avoid redundant appearance.
3317 + shown_cpvs = set()
3318 + have_eapi_mask = False
3319 + for (root_config, pkgsettings, cpv, repo, metadata, mreasons) in masked_packages:
3320 + output_cpv = cpv
3321 + if repo:
3322 + output_cpv += _repo_separator + repo
3323 + if output_cpv in shown_cpvs:
3324 + continue
3325 + shown_cpvs.add(output_cpv)
3326 + eapi_masked = metadata is not None and not portage.eapi_is_supported(
3327 + metadata["EAPI"]
3328 + )
3329 + if eapi_masked:
3330 + have_eapi_mask = True
3331 + # When masked by EAPI, metadata is mostly useless since
3332 + # it doesn't contain essential things like SLOT.
3333 + metadata = None
3334 + comment, filename = None, None
3335 + if not eapi_masked and "package.mask" in mreasons:
3336 + comment, filename = portage.getmaskingreason(
3337 + cpv,
3338 + metadata=metadata,
3339 + settings=pkgsettings,
3340 + portdb=root_config.trees["porttree"].dbapi,
3341 + return_location=True,
3342 + )
3343 + missing_licenses = []
3344 + if not eapi_masked and metadata is not None:
3345 + try:
3346 + missing_licenses = pkgsettings._getMissingLicenses(cpv, metadata)
3347 + except portage.exception.InvalidDependString:
3348 + # This will have already been reported
3349 + # above via mreasons.
3350 + pass
3351 +
3352 + writemsg(
3353 + "- " + output_cpv + " (masked by: " + ", ".join(mreasons) + ")\n",
3354 + noiselevel=-1,
3355 + )
3356 +
3357 + if comment and comment not in shown_comments:
3358 + writemsg(filename + ":\n" + comment + "\n", noiselevel=-1)
3359 + shown_comments.add(comment)
3360 + portdb = root_config.trees["porttree"].dbapi
3361 + for l in missing_licenses:
3362 + if l in shown_licenses:
3363 + continue
3364 + l_path = portdb.findLicensePath(l)
3365 + if l_path is None:
3366 + continue
3367 + msg = ("A copy of the '%s' license" + " is located at '%s'.\n\n") % (
3368 + l,
3369 + l_path,
3370 + )
3371 + writemsg(msg, noiselevel=-1)
3372 + shown_licenses.add(l)
3373 + return have_eapi_mask
3374 +
3375
3376 def show_mask_docs():
3377 - writemsg("For more information, see the MASKED PACKAGES "
3378 - "section in the emerge\n", noiselevel=-1)
3379 - writemsg("man page or refer to the Gentoo Handbook.\n", noiselevel=-1)
3380 + writemsg(
3381 + "For more information, see the MASKED PACKAGES " "section in the emerge\n",
3382 + noiselevel=-1,
3383 + )
3384 + writemsg("man page or refer to the Gentoo Handbook.\n", noiselevel=-1)
3385 +
3386
3387 def show_blocker_docs_link():
3388 - writemsg("\nFor more information about " + bad("Blocked Packages") + ", please refer to the following\n", noiselevel=-1)
3389 - writemsg("section of the Gentoo Linux x86 Handbook (architecture is irrelevant):\n\n", noiselevel=-1)
3390 - writemsg("https://wiki.gentoo.org/wiki/Handbook:X86/Working/Portage#Blocked_packages\n\n", noiselevel=-1)
3391 + writemsg(
3392 + "\nFor more information about "
3393 + + bad("Blocked Packages")
3394 + + ", please refer to the following\n",
3395 + noiselevel=-1,
3396 + )
3397 + writemsg(
3398 + "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):\n\n",
3399 + noiselevel=-1,
3400 + )
3401 + writemsg(
3402 + "https://wiki.gentoo.org/wiki/Handbook:X86/Working/Portage#Blocked_packages\n\n",
3403 + noiselevel=-1,
3404 + )
3405 +
3406
3407 def get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
3408 - return [mreason.message for \
3409 - mreason in _get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=use)]
3410 + return [
3411 + mreason.message
3412 + for mreason in _get_masking_status(
3413 + pkg, pkgsettings, root_config, myrepo=myrepo, use=use
3414 + )
3415 + ]
3416 +
3417
3418 def _get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
3419 - mreasons = _getmaskingstatus(
3420 - pkg, settings=pkgsettings,
3421 - portdb=root_config.trees["porttree"].dbapi, myrepo=myrepo)
3422 + mreasons = _getmaskingstatus(
3423 + pkg,
3424 + settings=pkgsettings,
3425 + portdb=root_config.trees["porttree"].dbapi,
3426 + myrepo=myrepo,
3427 + )
3428
3429 - if not pkg.installed:
3430 - if not pkgsettings._accept_chost(pkg.cpv, pkg._metadata):
3431 - mreasons.append(_MaskReason("CHOST", "CHOST: %s" % \
3432 - pkg._metadata["CHOST"]))
3433 + if not pkg.installed:
3434 + if not pkgsettings._accept_chost(pkg.cpv, pkg._metadata):
3435 + mreasons.append(_MaskReason("CHOST", "CHOST: %s" % pkg._metadata["CHOST"]))
3436
3437 + eprefix = pkgsettings["EPREFIX"]
3438 + if len(eprefix.rstrip('/')) > 0 and pkg.built and not pkg.installed:
3439 + if not "EPREFIX" in pkg._metadata:
3440 + mreasons.append(_MaskReason("EPREFIX",
3441 + "missing EPREFIX"))
3442 + elif len(pkg._metadata["EPREFIX"].strip()) < len(eprefix):
3443 + mreasons.append(_MaskReason("EPREFIX",
3444 + "EPREFIX: '%s' too small" % \
3445 + pkg._metadata["EPREFIX"]))
3446 +
3447 - if pkg.invalid:
3448 - for msgs in pkg.invalid.values():
3449 - for msg in msgs:
3450 - mreasons.append(
3451 - _MaskReason("invalid", "invalid: %s" % (msg,)))
3452 + if pkg.invalid:
3453 + for msgs in pkg.invalid.values():
3454 + for msg in msgs:
3455 + mreasons.append(_MaskReason("invalid", "invalid: %s" % (msg,)))
3456
3457 - if not pkg._metadata["SLOT"]:
3458 - mreasons.append(
3459 - _MaskReason("invalid", "SLOT: undefined"))
3460 + if not pkg._metadata["SLOT"]:
3461 + mreasons.append(_MaskReason("invalid", "SLOT: undefined"))
3462
3463 - return mreasons
3464 + return mreasons
3465 diff --cc lib/_emerge/emergelog.py
3466 index 3562f8eb3,14439da6e..a891f5b54
3467 --- a/lib/_emerge/emergelog.py
3468 +++ b/lib/_emerge/emergelog.py
3469 @@@ -16,7 -15,8 +16,9 @@@ from portage.const import EPREFI
3470 # dblink.merge() and we don't want that to trigger log writes
3471 # unless it's really called via emerge.
3472 _disable = True
3473 +_emerge_log_dir = EPREFIX + '/var/log'
3474 + _emerge_log_dir = "/var/log"
3475 +
3476
3477 def emergelog(xterm_titles, mystr, short_msg=None):
3478
3479 diff --cc lib/portage/__init__.py
3480 index de2dbfc05,13af8da09..b6b00a8c2
3481 --- a/lib/portage/__init__.py
3482 +++ b/lib/portage/__init__.py
3483 @@@ -9,146 -9,182 +9,198 @@@ VERSION = "HEAD
3484 # ===========================================================================
3485
3486 try:
3487 - import asyncio
3488 - import sys
3489 - import errno
3490 - if not hasattr(errno, 'ESTALE'):
3491 - # ESTALE may not be defined on some systems, such as interix.
3492 - errno.ESTALE = -1
3493 - import multiprocessing.util
3494 - import re
3495 - import types
3496 - import platform
3497 + import asyncio
3498 + import sys
3499 + import errno
3500 + # PREFIX LOCAL
3501 + import multiprocessing
3502
3503 - # Temporarily delete these imports, to ensure that only the
3504 - # wrapped versions are imported by portage internals.
3505 - import os
3506 - del os
3507 - import shutil
3508 - del shutil
3509 + if not hasattr(errno, "ESTALE"):
3510 + # ESTALE may not be defined on some systems, such as interix.
3511 + errno.ESTALE = -1
3512 + import multiprocessing.util
3513 + import re
3514 + import types
3515 + import platform
3516
3517 - except ImportError as e:
3518 - sys.stderr.write("\n\n")
3519 - sys.stderr.write("!!! Failed to complete python imports. These are internal modules for\n")
3520 - sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
3521 - sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n")
3522 + # Temporarily delete these imports, to ensure that only the
3523 + # wrapped versions are imported by portage internals.
3524 + import os
3525 +
3526 + del os
3527 + import shutil
3528 +
3529 + del shutil
3530
3531 - sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
3532 - sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
3533 - sys.stderr.write(" "+str(e)+"\n\n")
3534 - raise
3535 + except ImportError as e:
3536 + sys.stderr.write("\n\n")
3537 + sys.stderr.write(
3538 + "!!! Failed to complete python imports. These are internal modules for\n"
3539 + )
3540 + sys.stderr.write(
3541 + "!!! python and failure here indicates that you have a problem with python\n"
3542 + )
3543 + sys.stderr.write(
3544 + "!!! itself and thus portage is not able to continue processing.\n\n"
3545 + )
3546 +
3547 + sys.stderr.write(
3548 + "!!! You might consider starting python with verbose flags to see what has\n"
3549 + )
3550 + sys.stderr.write(
3551 + "!!! gone wrong. Here is the information we got for this exception:\n"
3552 + )
3553 + sys.stderr.write(" " + str(e) + "\n\n")
3554 + raise
3555
3556 +# BEGIN PREFIX LOCAL
3557 +# for bug #758230, on macOS the default was switched from fork to spawn,
3558 +# the latter causing issues because all kinds of things can't be
3559 +# pickled, so force fork mode for now
3560 +try:
3561 + multiprocessing.set_start_method('fork')
3562 +except RuntimeError:
3563 + pass
3564 +# END PREFIX LOCAL
3565 +
3566 try:
3567
3568 - import portage.proxy.lazyimport
3569 - import portage.proxy as proxy
3570 - proxy.lazyimport.lazyimport(globals(),
3571 - 'portage.cache.cache_errors:CacheError',
3572 - 'portage.checksum',
3573 - 'portage.checksum:perform_checksum,perform_md5,prelink_capable',
3574 - 'portage.cvstree',
3575 - 'portage.data',
3576 - 'portage.data:lchown,ostype,portage_gid,portage_uid,secpass,' + \
3577 - 'uid,userland,userpriv_groups,wheelgid',
3578 - 'portage.dbapi',
3579 - 'portage.dbapi.bintree:bindbapi,binarytree',
3580 - 'portage.dbapi.cpv_expand:cpv_expand',
3581 - 'portage.dbapi.dep_expand:dep_expand',
3582 - 'portage.dbapi.porttree:close_portdbapi_caches,FetchlistDict,' + \
3583 - 'portagetree,portdbapi',
3584 - 'portage.dbapi.vartree:dblink,merge,unmerge,vardbapi,vartree',
3585 - 'portage.dbapi.virtual:fakedbapi',
3586 - 'portage.debug',
3587 - 'portage.dep',
3588 - 'portage.dep:best_match_to_list,dep_getcpv,dep_getkey,' + \
3589 - 'flatten,get_operator,isjustname,isspecific,isvalidatom,' + \
3590 - 'match_from_list,match_to_list',
3591 - 'portage.dep.dep_check:dep_check,dep_eval,dep_wordreduce,dep_zapdeps',
3592 - 'portage.eclass_cache',
3593 - 'portage.elog',
3594 - 'portage.exception',
3595 - 'portage.getbinpkg',
3596 - 'portage.locks',
3597 - 'portage.locks:lockdir,lockfile,unlockdir,unlockfile',
3598 - 'portage.mail',
3599 - 'portage.manifest:Manifest',
3600 - 'portage.output',
3601 - 'portage.output:bold,colorize',
3602 - 'portage.package.ebuild.doebuild:doebuild,' + \
3603 - 'doebuild_environment,spawn,spawnebuild',
3604 - 'portage.package.ebuild.config:autouse,best_from_dict,' + \
3605 - 'check_config_instance,config',
3606 - 'portage.package.ebuild.deprecated_profile_check:' + \
3607 - 'deprecated_profile_check',
3608 - 'portage.package.ebuild.digestcheck:digestcheck',
3609 - 'portage.package.ebuild.digestgen:digestgen',
3610 - 'portage.package.ebuild.fetch:fetch',
3611 - 'portage.package.ebuild.getmaskingreason:getmaskingreason',
3612 - 'portage.package.ebuild.getmaskingstatus:getmaskingstatus',
3613 - 'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
3614 - 'portage.process',
3615 - 'portage.process:atexit_register,run_exitfuncs',
3616 - 'portage.update:dep_transform,fixdbentries,grab_updates,' + \
3617 - 'parse_updates,update_config_files,update_dbentries,' + \
3618 - 'update_dbentry',
3619 - 'portage.util',
3620 - 'portage.util:atomic_ofstream,apply_secpass_permissions,' + \
3621 - 'apply_recursive_permissions,dump_traceback,getconfig,' + \
3622 - 'grabdict,grabdict_package,grabfile,grabfile_package,' + \
3623 - 'map_dictlist_vals,new_protect_filename,normalize_path,' + \
3624 - 'pickle_read,pickle_write,stack_dictlist,stack_dicts,' + \
3625 - 'stack_lists,unique_array,varexpand,writedict,writemsg,' + \
3626 - 'writemsg_stdout,write_atomic',
3627 - 'portage.util.digraph:digraph',
3628 - 'portage.util.env_update:env_update',
3629 - 'portage.util.ExtractKernelVersion:ExtractKernelVersion',
3630 - 'portage.util.listdir:cacheddir,listdir',
3631 - 'portage.util.movefile:movefile',
3632 - 'portage.util.mtimedb:MtimeDB',
3633 - 'portage.versions',
3634 - 'portage.versions:best,catpkgsplit,catsplit,cpv_getkey,' + \
3635 - 'cpv_getkey@getCPFromCPV,endversion_keys,' + \
3636 - 'suffix_value@endversion,pkgcmp,pkgsplit,vercmp,ververify',
3637 - 'portage.xpak',
3638 - 'subprocess',
3639 - 'time',
3640 - )
3641 -
3642 - from collections import OrderedDict
3643 -
3644 - import portage.const
3645 - from portage.const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \
3646 - USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \
3647 - PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \
3648 - EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \
3649 - MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \
3650 - DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \
3651 - INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, CONFIG_MEMORY_FILE,\
3652 - INCREMENTALS, EAPI, MISC_SH_BINARY, REPO_NAME_LOC, REPO_NAME_FILE, \
3653 - EPREFIX, rootuid
3654 + import portage.proxy.lazyimport
3655 + import portage.proxy as proxy
3656 +
3657 + proxy.lazyimport.lazyimport(
3658 + globals(),
3659 + "portage.cache.cache_errors:CacheError",
3660 + "portage.checksum",
3661 + "portage.checksum:perform_checksum,perform_md5,prelink_capable",
3662 + "portage.cvstree",
3663 + "portage.data",
3664 + "portage.data:lchown,ostype,portage_gid,portage_uid,secpass,"
3665 + + "uid,userland,userpriv_groups,wheelgid",
3666 + "portage.dbapi",
3667 + "portage.dbapi.bintree:bindbapi,binarytree",
3668 + "portage.dbapi.cpv_expand:cpv_expand",
3669 + "portage.dbapi.dep_expand:dep_expand",
3670 + "portage.dbapi.porttree:close_portdbapi_caches,FetchlistDict,"
3671 + + "portagetree,portdbapi",
3672 + "portage.dbapi.vartree:dblink,merge,unmerge,vardbapi,vartree",
3673 + "portage.dbapi.virtual:fakedbapi",
3674 + "portage.debug",
3675 + "portage.dep",
3676 + "portage.dep:best_match_to_list,dep_getcpv,dep_getkey,"
3677 + + "flatten,get_operator,isjustname,isspecific,isvalidatom,"
3678 + + "match_from_list,match_to_list",
3679 + "portage.dep.dep_check:dep_check,dep_eval,dep_wordreduce,dep_zapdeps",
3680 + "portage.eclass_cache",
3681 + "portage.elog",
3682 + "portage.exception",
3683 + "portage.getbinpkg",
3684 + "portage.locks",
3685 + "portage.locks:lockdir,lockfile,unlockdir,unlockfile",
3686 + "portage.mail",
3687 + "portage.manifest:Manifest",
3688 + "portage.output",
3689 + "portage.output:bold,colorize",
3690 + "portage.package.ebuild.doebuild:doebuild,"
3691 + + "doebuild_environment,spawn,spawnebuild",
3692 + "portage.package.ebuild.config:autouse,best_from_dict,"
3693 + + "check_config_instance,config",
3694 + "portage.package.ebuild.deprecated_profile_check:" + "deprecated_profile_check",
3695 + "portage.package.ebuild.digestcheck:digestcheck",
3696 + "portage.package.ebuild.digestgen:digestgen",
3697 + "portage.package.ebuild.fetch:fetch",
3698 + "portage.package.ebuild.getmaskingreason:getmaskingreason",
3699 + "portage.package.ebuild.getmaskingstatus:getmaskingstatus",
3700 + "portage.package.ebuild.prepare_build_dirs:prepare_build_dirs",
3701 + "portage.process",
3702 + "portage.process:atexit_register,run_exitfuncs",
3703 + "portage.update:dep_transform,fixdbentries,grab_updates,"
3704 + + "parse_updates,update_config_files,update_dbentries,"
3705 + + "update_dbentry",
3706 + "portage.util",
3707 + "portage.util:atomic_ofstream,apply_secpass_permissions,"
3708 + + "apply_recursive_permissions,dump_traceback,getconfig,"
3709 + + "grabdict,grabdict_package,grabfile,grabfile_package,"
3710 + + "map_dictlist_vals,new_protect_filename,normalize_path,"
3711 + + "pickle_read,pickle_write,stack_dictlist,stack_dicts,"
3712 + + "stack_lists,unique_array,varexpand,writedict,writemsg,"
3713 + + "writemsg_stdout,write_atomic",
3714 + "portage.util.digraph:digraph",
3715 + "portage.util.env_update:env_update",
3716 + "portage.util.ExtractKernelVersion:ExtractKernelVersion",
3717 + "portage.util.listdir:cacheddir,listdir",
3718 + "portage.util.movefile:movefile",
3719 + "portage.util.mtimedb:MtimeDB",
3720 + "portage.versions",
3721 + "portage.versions:best,catpkgsplit,catsplit,cpv_getkey,"
3722 + + "cpv_getkey@getCPFromCPV,endversion_keys,"
3723 + + "suffix_value@endversion,pkgcmp,pkgsplit,vercmp,ververify",
3724 + "portage.xpak",
3725 + "subprocess",
3726 + "time",
3727 + )
3728 +
3729 + from collections import OrderedDict
3730 +
3731 + import portage.const
3732 + from portage.const import (
3733 + VDB_PATH,
3734 + PRIVATE_PATH,
3735 + CACHE_PATH,
3736 + DEPCACHE_PATH,
3737 + USER_CONFIG_PATH,
3738 + MODULES_FILE_PATH,
3739 + CUSTOM_PROFILE_PATH,
3740 + PORTAGE_BASE_PATH,
3741 + PORTAGE_BIN_PATH,
3742 + PORTAGE_PYM_PATH,
3743 + PROFILE_PATH,
3744 + LOCALE_DATA_PATH,
3745 + EBUILD_SH_BINARY,
3746 + SANDBOX_BINARY,
3747 + BASH_BINARY,
3748 + MOVE_BINARY,
3749 + PRELINK_BINARY,
3750 + WORLD_FILE,
3751 + MAKE_CONF_FILE,
3752 + MAKE_DEFAULTS_FILE,
3753 + DEPRECATED_PROFILE_FILE,
3754 + USER_VIRTUALS_FILE,
3755 + EBUILD_SH_ENV_FILE,
3756 + INVALID_ENV_FILE,
3757 + CUSTOM_MIRRORS_FILE,
3758 + CONFIG_MEMORY_FILE,
3759 + INCREMENTALS,
3760 + EAPI,
3761 + MISC_SH_BINARY,
3762 + REPO_NAME_LOC,
3763 + REPO_NAME_FILE,
3764 ++ # BEGIN PREFIX LOCAL
3765 ++ EPREFIX,
3766 ++ rootuid,
3767 ++ # END PREFIX LOCAL
3768 + )
3769
3770 except ImportError as e:
3771 - sys.stderr.write("\n\n")
3772 - sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n")
3773 - sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n")
3774 - sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the ebuild\n")
3775 - sys.stderr.write("!!! repository under '/var/db/repos/gentoo/sys-apps/portage/files/' (default).\n")
3776 - sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n")
3777 - sys.stderr.write("!!! a recovery of portage.\n")
3778 - sys.stderr.write(" "+str(e)+"\n\n")
3779 - raise
3780 + sys.stderr.write("\n\n")
3781 + sys.stderr.write(
3782 + "!!! Failed to complete portage imports. There are internal modules for\n"
3783 + )
3784 + sys.stderr.write(
3785 + "!!! portage and failure here indicates that you have a problem with your\n"
3786 + )
3787 + sys.stderr.write(
3788 + "!!! installation of portage. Please try a rescue portage located in the ebuild\n"
3789 + )
3790 + sys.stderr.write(
3791 + "!!! repository under '/var/db/repos/gentoo/sys-apps/portage/files/' (default).\n"
3792 + )
3793 + sys.stderr.write(
3794 + "!!! There is a README.RESCUE file that details the steps required to perform\n"
3795 + )
3796 + sys.stderr.write("!!! a recovery of portage.\n")
3797 + sys.stderr.write(" " + str(e) + "\n\n")
3798 + raise
3799
3800
3801 # We use utf_8 encoding everywhere. Previously, we used
3802 diff --cc lib/portage/const.py
3803 index 892766c68,1edc5fcf1..f2c69a4bb
3804 --- a/lib/portage/const.py
3805 +++ b/lib/portage/const.py
3806 @@@ -58,185 -53,176 +58,196 @@@ NEWS_LIB_PATH = "var/lib/gentoo
3807
3808 # these variables get EPREFIX prepended automagically when they are
3809 # translated into their lowercase variants
3810 - DEPCACHE_PATH = "/var/cache/edb/dep"
3811 - GLOBAL_CONFIG_PATH = "/usr/share/portage/config"
3812 + DEPCACHE_PATH = "/var/cache/edb/dep"
3813 + GLOBAL_CONFIG_PATH = "/usr/share/portage/config"
3814
3815 # these variables are not used with target_root or config_root
3816 +PORTAGE_BASE_PATH = PORTAGE_BASE
3817 # NOTE: Use realpath(__file__) so that python module symlinks in site-packages
3818 # are followed back to the real location of the whole portage installation.
3819 +#PREFIX: below should work, but I'm not sure how it it affects other places
3820 # NOTE: Please keep PORTAGE_BASE_PATH in one line to help substitutions.
3821 - #PORTAGE_BASE_PATH = os.path.join(os.sep, os.sep.join(os.path.realpath(__file__.rstrip("co")).split(os.sep)[:-3]))
3822 - PORTAGE_BIN_PATH = PORTAGE_BASE_PATH + "/bin"
3823 - PORTAGE_PYM_PATH = os.path.realpath(os.path.join(__file__, '../..'))
3824 - LOCALE_DATA_PATH = PORTAGE_BASE_PATH + "/locale" # FIXME: not used
3825 - EBUILD_SH_BINARY = PORTAGE_BIN_PATH + "/ebuild.sh"
3826 - MISC_SH_BINARY = PORTAGE_BIN_PATH + "/misc-functions.sh"
3827 - SANDBOX_BINARY = EPREFIX + "/usr/bin/sandbox"
3828 - FAKEROOT_BINARY = EPREFIX + "/usr/bin/fakeroot"
3829 - BASH_BINARY = PORTAGE_BASH
3830 - MOVE_BINARY = PORTAGE_MV
3831 - PRELINK_BINARY = "/usr/sbin/prelink"
3832 + # fmt:off
3833 -PORTAGE_BASE_PATH = os.path.join(os.sep, os.sep.join(os.path.realpath(__file__.rstrip("co")).split(os.sep)[:-3]))
3834 ++# PREFIX LOCAL (from const_autotools)
3835 ++#PORTAGE_BASE_PATH = os.path.join(os.sep, os.sep.join(os.path.realpath(__file__.rstrip("co")).split(os.sep)[:-3]))
3836 + # fmt:on
3837 + PORTAGE_BIN_PATH = PORTAGE_BASE_PATH + "/bin"
3838 + PORTAGE_PYM_PATH = os.path.realpath(os.path.join(__file__, "../.."))
3839 + LOCALE_DATA_PATH = PORTAGE_BASE_PATH + "/locale" # FIXME: not used
3840 + EBUILD_SH_BINARY = PORTAGE_BIN_PATH + "/ebuild.sh"
3841 + MISC_SH_BINARY = PORTAGE_BIN_PATH + "/misc-functions.sh"
3842 -SANDBOX_BINARY = "/usr/bin/sandbox"
3843 -FAKEROOT_BINARY = "/usr/bin/fakeroot"
3844 ++# BEGIN PREFIX LOCAL
3845 ++SANDBOX_BINARY = EPREFIX + "/usr/bin/sandbox"
3846 ++FAKEROOT_BINARY = EPREFIX + "/usr/bin/fakeroot"
3847 ++# END PREFIX LOCAL
3848 + BASH_BINARY = "/bin/bash"
3849 + MOVE_BINARY = "/bin/mv"
3850 + PRELINK_BINARY = "/usr/sbin/prelink"
3851 ++# BEGIN PREFIX LOCAL
3852 +MACOSSANDBOX_BINARY = "/usr/bin/sandbox-exec"
3853 +MACOSSANDBOX_PROFILE = '''(version 1)
3854 +(allow default)
3855 +(deny file-write*)
3856 +(allow file-write* file-write-setugid
3857 +@@MACOSSANDBOX_PATHS@@)
3858 +(allow file-write-data
3859 +@@MACOSSANDBOX_PATHS_CONTENT_ONLY@@)'''
3860 +
3861 +PORTAGE_GROUPNAME = portagegroup
3862 +PORTAGE_USERNAME = portageuser
3863 ++# END PREFIX LOCAL
3864
3865 - INVALID_ENV_FILE = "/etc/spork/is/not/valid/profile.env"
3866 - MERGING_IDENTIFIER = "-MERGING-"
3867 - REPO_NAME_FILE = "repo_name"
3868 - REPO_NAME_LOC = "profiles" + "/" + REPO_NAME_FILE
3869 + INVALID_ENV_FILE = "/etc/spork/is/not/valid/profile.env"
3870 + MERGING_IDENTIFIER = "-MERGING-"
3871 + REPO_NAME_FILE = "repo_name"
3872 + REPO_NAME_LOC = "profiles" + "/" + REPO_NAME_FILE
3873
3874 - PORTAGE_PACKAGE_ATOM = "sys-apps/portage"
3875 - LIBC_PACKAGE_ATOM = "virtual/libc"
3876 - OS_HEADERS_PACKAGE_ATOM = "virtual/os-headers"
3877 - CVS_PACKAGE_ATOM = "dev-vcs/cvs"
3878 - GIT_PACKAGE_ATOM = "dev-vcs/git"
3879 - HG_PACKAGE_ATOM = "dev-vcs/mercurial"
3880 - RSYNC_PACKAGE_ATOM = "net-misc/rsync"
3881 + PORTAGE_PACKAGE_ATOM = "sys-apps/portage"
3882 + LIBC_PACKAGE_ATOM = "virtual/libc"
3883 + OS_HEADERS_PACKAGE_ATOM = "virtual/os-headers"
3884 + CVS_PACKAGE_ATOM = "dev-vcs/cvs"
3885 + GIT_PACKAGE_ATOM = "dev-vcs/git"
3886 + HG_PACKAGE_ATOM = "dev-vcs/mercurial"
3887 + RSYNC_PACKAGE_ATOM = "net-misc/rsync"
3888
3889 - INCREMENTALS = (
3890 - "ACCEPT_KEYWORDS",
3891 - "CONFIG_PROTECT",
3892 - "CONFIG_PROTECT_MASK",
3893 - "ENV_UNSET",
3894 - "FEATURES",
3895 - "IUSE_IMPLICIT",
3896 - "PRELINK_PATH",
3897 - "PRELINK_PATH_MASK",
3898 - "PROFILE_ONLY_VARIABLES",
3899 - "USE",
3900 - "USE_EXPAND",
3901 - "USE_EXPAND_HIDDEN",
3902 - "USE_EXPAND_IMPLICIT",
3903 - "USE_EXPAND_UNPREFIXED",
3904 + INCREMENTALS = (
3905 + "ACCEPT_KEYWORDS",
3906 + "CONFIG_PROTECT",
3907 + "CONFIG_PROTECT_MASK",
3908 + "ENV_UNSET",
3909 + "FEATURES",
3910 + "IUSE_IMPLICIT",
3911 + "PRELINK_PATH",
3912 + "PRELINK_PATH_MASK",
3913 + "PROFILE_ONLY_VARIABLES",
3914 + "USE",
3915 + "USE_EXPAND",
3916 + "USE_EXPAND_HIDDEN",
3917 + "USE_EXPAND_IMPLICIT",
3918 + "USE_EXPAND_UNPREFIXED",
3919 )
3920 - EBUILD_PHASES = (
3921 - "pretend",
3922 - "setup",
3923 - "unpack",
3924 - "prepare",
3925 - "configure",
3926 - "compile",
3927 - "test",
3928 - "install",
3929 - "package",
3930 - "instprep",
3931 - "preinst",
3932 - "postinst",
3933 - "prerm",
3934 - "postrm",
3935 - "nofetch",
3936 - "config",
3937 - "info",
3938 - "other",
3939 + EBUILD_PHASES = (
3940 + "pretend",
3941 + "setup",
3942 + "unpack",
3943 + "prepare",
3944 + "configure",
3945 + "compile",
3946 + "test",
3947 + "install",
3948 + "package",
3949 + "instprep",
3950 + "preinst",
3951 + "postinst",
3952 + "prerm",
3953 + "postrm",
3954 + "nofetch",
3955 + "config",
3956 + "info",
3957 + "other",
3958 + )
3959 + SUPPORTED_FEATURES = frozenset(
3960 + [
3961 + "assume-digests",
3962 + "binpkg-docompress",
3963 + "binpkg-dostrip",
3964 + "binpkg-logs",
3965 + "binpkg-multi-instance",
3966 + "buildpkg",
3967 + "buildpkg-live",
3968 + "buildsyspkg",
3969 + "candy",
3970 + "case-insensitive-fs",
3971 + "ccache",
3972 + "cgroup",
3973 + "chflags",
3974 + "clean-logs",
3975 + "collision-protect",
3976 + "compress-build-logs",
3977 + "compressdebug",
3978 + "compress-index",
3979 + "config-protect-if-modified",
3980 + "digest",
3981 + "distcc",
3982 + "distlocks",
3983 + "downgrade-backup",
3984 + "ebuild-locks",
3985 + "fail-clean",
3986 + "fakeroot",
3987 + "fixlafiles",
3988 + "force-mirror",
3989 + "force-prefix",
3990 + "getbinpkg",
3991 + "icecream",
3992 + "installsources",
3993 + "ipc-sandbox",
3994 + "keeptemp",
3995 + "keepwork",
3996 + "lmirror",
3997 + "merge-sync",
3998 + "metadata-transfer",
3999 + "mirror",
4000 + "mount-sandbox",
4001 + "multilib-strict",
4002 + "network-sandbox",
4003 + "network-sandbox-proxy",
4004 + "news",
4005 + "noauto",
4006 + "noclean",
4007 + "nodoc",
4008 + "noinfo",
4009 + "noman",
4010 + "nostrip",
4011 + "notitles",
4012 + "parallel-fetch",
4013 + "parallel-install",
4014 + "pid-sandbox",
4015 + "pkgdir-index-trusted",
4016 + "prelink-checksums",
4017 + "preserve-libs",
4018 + "protect-owned",
4019 + "python-trace",
4020 + "qa-unresolved-soname-deps",
4021 + "sandbox",
4022 + "selinux",
4023 + "sesandbox",
4024 + "sfperms",
4025 + "sign",
4026 + "skiprocheck",
4027 + "splitdebug",
4028 + "split-elog",
4029 + "split-log",
4030 + "strict",
4031 + "strict-keepdir",
4032 + "stricter",
4033 + "suidctl",
4034 + "test",
4035 + "test-fail-continue",
4036 + "unknown-features-filter",
4037 + "unknown-features-warn",
4038 + "unmerge-backup",
4039 + "unmerge-logs",
4040 + "unmerge-orphans",
4041 + "unprivileged",
4042 + "userfetch",
4043 + "userpriv",
4044 + "usersandbox",
4045 + "usersync",
4046 + "webrsync-gpg",
4047 + "xattr",
4048 ++ # PREFIX LOCAL
4049 ++ "stacked-prefix",
4050 + ]
4051 )
4052 - SUPPORTED_FEATURES = frozenset([
4053 - "assume-digests",
4054 - "binpkg-docompress",
4055 - "binpkg-dostrip",
4056 - "binpkg-logs",
4057 - "binpkg-multi-instance",
4058 - "buildpkg",
4059 - "buildsyspkg",
4060 - "candy",
4061 - "case-insensitive-fs",
4062 - "ccache",
4063 - "cgroup",
4064 - "chflags",
4065 - "clean-logs",
4066 - "collision-protect",
4067 - "compress-build-logs",
4068 - "compressdebug",
4069 - "compress-index",
4070 - "config-protect-if-modified",
4071 - "digest",
4072 - "distcc",
4073 - "distlocks",
4074 - "downgrade-backup",
4075 - "ebuild-locks",
4076 - "fail-clean",
4077 - "fakeroot",
4078 - "fixlafiles",
4079 - "force-mirror",
4080 - "force-prefix",
4081 - "getbinpkg",
4082 - "icecream",
4083 - "installsources",
4084 - "ipc-sandbox",
4085 - "keeptemp",
4086 - "keepwork",
4087 - "lmirror",
4088 - "merge-sync",
4089 - "metadata-transfer",
4090 - "mirror",
4091 - "mount-sandbox",
4092 - "multilib-strict",
4093 - "network-sandbox",
4094 - "network-sandbox-proxy",
4095 - "news",
4096 - "noauto",
4097 - "noclean",
4098 - "nodoc",
4099 - "noinfo",
4100 - "noman",
4101 - "nostrip",
4102 - "notitles",
4103 - "parallel-fetch",
4104 - "parallel-install",
4105 - "pid-sandbox",
4106 - "pkgdir-index-trusted",
4107 - "prelink-checksums",
4108 - "preserve-libs",
4109 - "protect-owned",
4110 - "python-trace",
4111 - "qa-unresolved-soname-deps",
4112 - "sandbox",
4113 - "selinux",
4114 - "sesandbox",
4115 - "sfperms",
4116 - "sign",
4117 - "skiprocheck",
4118 - "splitdebug",
4119 - "split-elog",
4120 - "split-log",
4121 - "stacked-prefix", # PREFIX LOCAL
4122 - "strict",
4123 - "strict-keepdir",
4124 - "stricter",
4125 - "suidctl",
4126 - "test",
4127 - "test-fail-continue",
4128 - "unknown-features-filter",
4129 - "unknown-features-warn",
4130 - "unmerge-backup",
4131 - "unmerge-logs",
4132 - "unmerge-orphans",
4133 - "unprivileged",
4134 - "userfetch",
4135 - "userpriv",
4136 - "usersandbox",
4137 - "usersync",
4138 - "webrsync-gpg",
4139 - "xattr",
4140 - ])
4141
4142 - EAPI = 8
4143 + EAPI = 8
4144
4145 - HASHING_BLOCKSIZE = 32768
4146 + HASHING_BLOCKSIZE = 32768
4147
4148 MANIFEST2_HASH_DEFAULTS = frozenset(["BLAKE2B", "SHA512"])
4149 - MANIFEST2_HASH_DEFAULT = "BLAKE2B"
4150 + MANIFEST2_HASH_DEFAULT = "BLAKE2B"
4151
4152 - MANIFEST2_IDENTIFIERS = ("AUX", "MISC", "DIST", "EBUILD")
4153 + MANIFEST2_IDENTIFIERS = ("AUX", "MISC", "DIST", "EBUILD")
4154
4155 # The EPREFIX for the current install is hardcoded here, but access to this
4156 # constant should be minimal, in favor of access via the EPREFIX setting of
4157 diff --cc lib/portage/data.py
4158 index d2d356f95,09a4dd079..0dac72845
4159 --- a/lib/portage/data.py
4160 +++ b/lib/portage/data.py
4161 @@@ -6,26 -6,24 +6,28 @@@ import gr
4162 import os
4163 import platform
4164 import pwd
4165 +from portage.const import PORTAGE_GROUPNAME, PORTAGE_USERNAME, EPREFIX
4166
4167 import portage
4168 - portage.proxy.lazyimport.lazyimport(globals(),
4169 - 'portage.output:colorize',
4170 - 'portage.util:writemsg',
4171 - 'portage.util.path:first_existing',
4172 - 'subprocess'
4173 +
4174 + portage.proxy.lazyimport.lazyimport(
4175 + globals(),
4176 + "portage.output:colorize",
4177 + "portage.util:writemsg",
4178 + "portage.util.path:first_existing",
4179 + "subprocess",
4180 )
4181 from portage.localization import _
4182
4183 ostype = platform.system()
4184 userland = None
4185 -if ostype == "DragonFly" or ostype.endswith("BSD"):
4186 +# Prefix always has USERLAND=GNU, even on
4187 +# FreeBSD, OpenBSD and Darwin (thank the lord!).
4188 +# Hopefully this entire USERLAND hack can go once
4189 +if EPREFIX == "" and (ostype == "DragonFly" or ostype.endswith("BSD")):
4190 - userland = "BSD"
4191 + userland = "BSD"
4192 else:
4193 - userland = "GNU"
4194 + userland = "GNU"
4195
4196 lchown = getattr(os, "lchown", None)
4197
4198 @@@ -119,221 -134,231 +138,236 @@@ except KeyError
4199 # configurations with different constants could be used simultaneously.
4200 _initialized_globals = set()
4201
4202 +
4203 def _get_global(k):
4204 - if k in _initialized_globals:
4205 - return globals()[k]
4206 -
4207 - if k == 'secpass':
4208 -
4209 - unprivileged = False
4210 - if hasattr(portage, 'settings'):
4211 - unprivileged = "unprivileged" in portage.settings.features
4212 - else:
4213 - # The config class has equivalent code, but we also need to
4214 - # do it here if _disable_legacy_globals() has been called.
4215 - eroot_or_parent = first_existing(os.path.join(
4216 - _target_root(), _target_eprefix().lstrip(os.sep)))
4217 - try:
4218 - eroot_st = os.stat(eroot_or_parent)
4219 - except OSError:
4220 - pass
4221 - else:
4222 - unprivileged = _unprivileged_mode(
4223 - eroot_or_parent, eroot_st)
4224 -
4225 - v = 0
4226 - if uid == 0:
4227 - v = 2
4228 - elif unprivileged:
4229 - v = 2
4230 - elif _get_global('portage_gid') in os.getgroups():
4231 - v = 1
4232 -
4233 - elif k in ('portage_gid', 'portage_uid'):
4234 -
4235 - #Discover the uid and gid of the portage user/group
4236 - keyerror = False
4237 - try:
4238 - username = str(_get_global('_portage_username'))
4239 - portage_uid = pwd.getpwnam(username).pw_uid
4240 - except KeyError:
4241 - # PREFIX LOCAL: some sysadmins are insane, bug #344307
4242 - if username.isdigit():
4243 - portage_uid = int(username)
4244 - else:
4245 - keyerror = True
4246 - portage_uid = 0
4247 - # END PREFIX LOCAL
4248 -
4249 - try:
4250 - grpname = str(_get_global('_portage_grpname'))
4251 - portage_gid = grp.getgrnam(grpname).gr_gid
4252 - except KeyError:
4253 - # PREFIX LOCAL: some sysadmins are insane, bug #344307
4254 - if grpname.isdigit():
4255 - portage_gid = int(grpname)
4256 - else:
4257 - keyerror = True
4258 - portage_gid = 0
4259 - # END PREFIX LOCAL
4260 -
4261 - # Suppress this error message if both PORTAGE_GRPNAME and
4262 - # PORTAGE_USERNAME are set to "root", for things like
4263 - # Android (see bug #454060).
4264 - if keyerror and not (_get_global('_portage_username') == "root" and
4265 - _get_global('_portage_grpname') == "root"):
4266 - # PREFIX LOCAL: we need to fix this one day to distinguish prefix vs non-prefix
4267 - writemsg(colorize("BAD",
4268 - _("portage: '%s' user or '%s' group missing." % (_get_global('_portage_username'), _get_global('_portage_grpname')))) + "\n", noiselevel=-1)
4269 - writemsg(colorize("BAD",
4270 - _(" In Prefix Portage this is quite dramatic")) + "\n", noiselevel=-1)
4271 - writemsg(colorize("BAD",
4272 - _(" since it means you have thrown away yourself.")) + "\n", noiselevel=-1)
4273 - writemsg(colorize("BAD",
4274 - _(" Re-add yourself or re-bootstrap Gentoo Prefix.")) + "\n", noiselevel=-1)
4275 - # END PREFIX LOCAL
4276 - portage_group_warning()
4277 -
4278 - globals()['portage_gid'] = portage_gid
4279 - _initialized_globals.add('portage_gid')
4280 - globals()['portage_uid'] = portage_uid
4281 - _initialized_globals.add('portage_uid')
4282 -
4283 - if k == 'portage_gid':
4284 - return portage_gid
4285 - if k == 'portage_uid':
4286 - return portage_uid
4287 - raise AssertionError('unknown name: %s' % k)
4288 -
4289 - elif k == 'userpriv_groups':
4290 - v = [_get_global('portage_gid')]
4291 - if secpass >= 2:
4292 - # Get a list of group IDs for the portage user. Do not use
4293 - # grp.getgrall() since it is known to trigger spurious
4294 - # SIGPIPE problems with nss_ldap.
4295 - cmd = ["id", "-G", _portage_username]
4296 -
4297 - encoding = portage._encodings['content']
4298 - cmd = [portage._unicode_encode(x,
4299 - encoding=encoding, errors='strict') for x in cmd]
4300 - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
4301 - stderr=subprocess.STDOUT)
4302 - myoutput = proc.communicate()[0]
4303 - status = proc.wait()
4304 - if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
4305 - for x in portage._unicode_decode(myoutput,
4306 - encoding=encoding, errors='strict').split():
4307 - try:
4308 - v.append(int(x))
4309 - except ValueError:
4310 - pass
4311 - v = sorted(set(v))
4312 -
4313 - # Avoid instantiating portage.settings when the desired
4314 - # variable is set in os.environ.
4315 - elif k in ('_portage_grpname', '_portage_username'):
4316 - v = None
4317 - if k == '_portage_grpname':
4318 - env_key = 'PORTAGE_GRPNAME'
4319 - else:
4320 - env_key = 'PORTAGE_USERNAME'
4321 -
4322 - if env_key in os.environ:
4323 - v = os.environ[env_key]
4324 - elif hasattr(portage, 'settings'):
4325 - v = portage.settings.get(env_key)
4326 - else:
4327 - # The config class has equivalent code, but we also need to
4328 - # do it here if _disable_legacy_globals() has been called.
4329 - eroot_or_parent = first_existing(os.path.join(
4330 - _target_root(), _target_eprefix().lstrip(os.sep)))
4331 - try:
4332 - eroot_st = os.stat(eroot_or_parent)
4333 - except OSError:
4334 - pass
4335 - else:
4336 - if _unprivileged_mode(eroot_or_parent, eroot_st):
4337 - if k == '_portage_grpname':
4338 - try:
4339 - grp_struct = grp.getgrgid(eroot_st.st_gid)
4340 - except KeyError:
4341 - pass
4342 - else:
4343 - v = grp_struct.gr_name
4344 - else:
4345 - try:
4346 - pwd_struct = pwd.getpwuid(eroot_st.st_uid)
4347 - except KeyError:
4348 - pass
4349 - else:
4350 - v = pwd_struct.pw_name
4351 -
4352 - if v is None:
4353 - # PREFIX LOCAL: use var iso hardwired 'portage'
4354 - if k == '_portage_grpname':
4355 - v = PORTAGE_GROUPNAME
4356 - else:
4357 - v = PORTAGE_USERNAME
4358 - # END PREFIX LOCAL
4359 - else:
4360 - raise AssertionError('unknown name: %s' % k)
4361 -
4362 - globals()[k] = v
4363 - _initialized_globals.add(k)
4364 - return v
4365 + if k in _initialized_globals:
4366 + return globals()[k]
4367 +
4368 + if k == "secpass":
4369 +
4370 + unprivileged = False
4371 + if hasattr(portage, "settings"):
4372 + unprivileged = "unprivileged" in portage.settings.features
4373 + else:
4374 + # The config class has equivalent code, but we also need to
4375 + # do it here if _disable_legacy_globals() has been called.
4376 + eroot_or_parent = first_existing(
4377 + os.path.join(_target_root(), _target_eprefix().lstrip(os.sep))
4378 + )
4379 + try:
4380 + eroot_st = os.stat(eroot_or_parent)
4381 + except OSError:
4382 + pass
4383 + else:
4384 + unprivileged = _unprivileged_mode(eroot_or_parent, eroot_st)
4385 +
4386 + v = 0
4387 + if uid == 0:
4388 + v = 2
4389 + elif unprivileged:
4390 + v = 2
4391 + elif _get_global("portage_gid") in os.getgroups():
4392 + v = 1
4393 +
4394 + elif k in ("portage_gid", "portage_uid"):
4395 +
4396 + # Discover the uid and gid of the portage user/group
4397 + keyerror = False
4398 + try:
4399 + portage_uid = pwd.getpwnam(_get_global("_portage_username")).pw_uid
4400 + except KeyError:
4401 - keyerror = True
4402 - portage_uid = 0
4403 ++ # PREFIX LOCAL: some sysadmins are insane, bug #344307
4404 ++ if username.isdigit():
4405 ++ portage_uid = int(username)
4406 ++ else:
4407 ++ keyerror = True
4408 ++ portage_uid = 0
4409 ++ # END PREFIX LOCAL
4410 +
4411 + try:
4412 + portage_gid = grp.getgrnam(_get_global("_portage_grpname")).gr_gid
4413 + except KeyError:
4414 - keyerror = True
4415 - portage_gid = 0
4416 ++ # PREFIX LOCAL: some sysadmins are insane, bug #344307
4417 ++ if grpname.isdigit():
4418 ++ portage_gid = int(grpname)
4419 ++ else:
4420 ++ keyerror = True
4421 ++ portage_gid = 0
4422 ++ # END PREFIX LOCAL
4423 +
4424 + # Suppress this error message if both PORTAGE_GRPNAME and
4425 + # PORTAGE_USERNAME are set to "root", for things like
4426 + # Android (see bug #454060).
4427 - if keyerror and not (
4428 - _get_global("_portage_username") == "root"
4429 - and _get_global("_portage_grpname") == "root"
4430 - ):
4431 - writemsg(
4432 - colorize("BAD", _("portage: 'portage' user or group missing.")) + "\n",
4433 - noiselevel=-1,
4434 - )
4435 - writemsg(
4436 - _(
4437 - " For the defaults, line 1 goes into passwd, "
4438 - "and 2 into group.\n"
4439 - ),
4440 - noiselevel=-1,
4441 - )
4442 - writemsg(
4443 - colorize(
4444 - "GOOD",
4445 - " portage:x:250:250:portage:/var/tmp/portage:/bin/false",
4446 - )
4447 - + "\n",
4448 - noiselevel=-1,
4449 - )
4450 - writemsg(
4451 - colorize("GOOD", " portage::250:portage") + "\n", noiselevel=-1
4452 - )
4453 ++ if keyerror and not (_get_global('_portage_username') == "root" and
4454 ++ _get_global('_portage_grpname') == "root"):
4455 ++ # PREFIX LOCAL: we need to fix this one day to distinguish prefix vs non-prefix
4456 ++ writemsg(colorize("BAD",
4457 ++ _("portage: '%s' user or '%s' group missing." % (_get_global('_portage_username'), _get_global('_portage_grpname')))) + "\n", noiselevel=-1)
4458 ++ writemsg(colorize("BAD",
4459 ++ _(" In Prefix Portage this is quite dramatic")) + "\n", noiselevel=-1)
4460 ++ writemsg(colorize("BAD",
4461 ++ _(" since it means you have thrown away yourself.")) + "\n", noiselevel=-1)
4462 ++ writemsg(colorize("BAD",
4463 ++ _(" Re-add yourself or re-bootstrap Gentoo Prefix.")) + "\n", noiselevel=-1)
4464 ++ # END PREFIX LOCAL
4465 + portage_group_warning()
4466 +
4467 + globals()["portage_gid"] = portage_gid
4468 + _initialized_globals.add("portage_gid")
4469 + globals()["portage_uid"] = portage_uid
4470 + _initialized_globals.add("portage_uid")
4471 +
4472 + if k == "portage_gid":
4473 + return portage_gid
4474 + if k == "portage_uid":
4475 + return portage_uid
4476 + raise AssertionError("unknown name: %s" % k)
4477 +
4478 + elif k == "userpriv_groups":
4479 + v = [_get_global("portage_gid")]
4480 + if secpass >= 2:
4481 + # Get a list of group IDs for the portage user. Do not use
4482 + # grp.getgrall() since it is known to trigger spurious
4483 + # SIGPIPE problems with nss_ldap.
4484 + cmd = ["id", "-G", _portage_username]
4485 +
4486 + encoding = portage._encodings["content"]
4487 + cmd = [
4488 + portage._unicode_encode(x, encoding=encoding, errors="strict")
4489 + for x in cmd
4490 + ]
4491 + proc = subprocess.Popen(
4492 + cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
4493 + )
4494 + myoutput = proc.communicate()[0]
4495 + status = proc.wait()
4496 + if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
4497 + for x in portage._unicode_decode(
4498 + myoutput, encoding=encoding, errors="strict"
4499 + ).split():
4500 + try:
4501 + v.append(int(x))
4502 + except ValueError:
4503 + pass
4504 + v = sorted(set(v))
4505 +
4506 + # Avoid instantiating portage.settings when the desired
4507 + # variable is set in os.environ.
4508 + elif k in ("_portage_grpname", "_portage_username"):
4509 + v = None
4510 + if k == "_portage_grpname":
4511 + env_key = "PORTAGE_GRPNAME"
4512 + else:
4513 + env_key = "PORTAGE_USERNAME"
4514 +
4515 + if env_key in os.environ:
4516 + v = os.environ[env_key]
4517 + elif hasattr(portage, "settings"):
4518 + v = portage.settings.get(env_key)
4519 + else:
4520 + # The config class has equivalent code, but we also need to
4521 + # do it here if _disable_legacy_globals() has been called.
4522 + eroot_or_parent = first_existing(
4523 + os.path.join(_target_root(), _target_eprefix().lstrip(os.sep))
4524 + )
4525 + try:
4526 + eroot_st = os.stat(eroot_or_parent)
4527 + except OSError:
4528 + pass
4529 + else:
4530 + if _unprivileged_mode(eroot_or_parent, eroot_st):
4531 + if k == "_portage_grpname":
4532 + try:
4533 + grp_struct = grp.getgrgid(eroot_st.st_gid)
4534 + except KeyError:
4535 + pass
4536 + else:
4537 + v = grp_struct.gr_name
4538 + else:
4539 + try:
4540 + pwd_struct = pwd.getpwuid(eroot_st.st_uid)
4541 + except KeyError:
4542 + pass
4543 + else:
4544 + v = pwd_struct.pw_name
4545 +
4546 + if v is None:
4547 - v = "portage"
4548 ++ # PREFIX LOCAL: use var iso hardwired 'portage'
4549 ++ if k == '_portage_grpname':
4550 ++ v = PORTAGE_GROUPNAME
4551 ++ else:
4552 ++ v = PORTAGE_USERNAME
4553 ++ # END PREFIX LOCAL
4554 + else:
4555 + raise AssertionError("unknown name: %s" % k)
4556 +
4557 + globals()[k] = v
4558 + _initialized_globals.add(k)
4559 + return v
4560 +
4561
4562 class _GlobalProxy(portage.proxy.objectproxy.ObjectProxy):
4563
4564 - __slots__ = ('_name',)
4565 + __slots__ = ("_name",)
4566
4567 - def __init__(self, name):
4568 - portage.proxy.objectproxy.ObjectProxy.__init__(self)
4569 - object.__setattr__(self, '_name', name)
4570 + def __init__(self, name):
4571 + portage.proxy.objectproxy.ObjectProxy.__init__(self)
4572 + object.__setattr__(self, "_name", name)
4573
4574 - def _get_target(self):
4575 - return _get_global(object.__getattribute__(self, '_name'))
4576 + def _get_target(self):
4577 + return _get_global(object.__getattribute__(self, "_name"))
4578
4579 - for k in ('portage_gid', 'portage_uid', 'secpass', 'userpriv_groups',
4580 - '_portage_grpname', '_portage_username'):
4581 - globals()[k] = _GlobalProxy(k)
4582 +
4583 + for k in (
4584 + "portage_gid",
4585 + "portage_uid",
4586 + "secpass",
4587 + "userpriv_groups",
4588 + "_portage_grpname",
4589 + "_portage_username",
4590 + ):
4591 + globals()[k] = _GlobalProxy(k)
4592 del k
4593
4594 +
4595 def _init(settings):
4596 - """
4597 - Use config variables like PORTAGE_GRPNAME and PORTAGE_USERNAME to
4598 - initialize global variables. This allows settings to come from make.conf
4599 - instead of requiring them to be set in the calling environment.
4600 - """
4601 - if '_portage_grpname' not in _initialized_globals and \
4602 - '_portage_username' not in _initialized_globals:
4603 -
4604 - # Prevents "TypeError: expected string" errors
4605 - # from grp.getgrnam() with PyPy
4606 - native_string = platform.python_implementation() == 'PyPy'
4607 -
4608 - # PREFIX LOCAL: use var iso hardwired 'portage'
4609 - v = settings.get('PORTAGE_GRPNAME', PORTAGE_GROUPNAME)
4610 - # END PREFIX LOCAL
4611 - if native_string:
4612 - v = portage._native_string(v)
4613 - globals()['_portage_grpname'] = v
4614 - _initialized_globals.add('_portage_grpname')
4615 -
4616 - # PREFIX LOCAL: use var iso hardwired 'portage'
4617 - v = settings.get('PORTAGE_USERNAME', PORTAGE_USERNAME)
4618 - # END PREFIX LOCAL
4619 - if native_string:
4620 - v = portage._native_string(v)
4621 - globals()['_portage_username'] = v
4622 - _initialized_globals.add('_portage_username')
4623 -
4624 - if 'secpass' not in _initialized_globals:
4625 - v = 0
4626 - if uid == 0:
4627 - v = 2
4628 - elif "unprivileged" in settings.features:
4629 - v = 2
4630 - elif portage_gid in os.getgroups():
4631 - v = 1
4632 - globals()['secpass'] = v
4633 - _initialized_globals.add('secpass')
4634 + """
4635 + Use config variables like PORTAGE_GRPNAME and PORTAGE_USERNAME to
4636 + initialize global variables. This allows settings to come from make.conf
4637 + instead of requiring them to be set in the calling environment.
4638 + """
4639 + if (
4640 + "_portage_grpname" not in _initialized_globals
4641 + and "_portage_username" not in _initialized_globals
4642 + ):
4643 +
4644 + # Prevents "TypeError: expected string" errors
4645 + # from grp.getgrnam() with PyPy
4646 + native_string = platform.python_implementation() == "PyPy"
4647 +
4648 - v = settings.get("PORTAGE_GRPNAME", "portage")
4649 ++ # PREFIX LOCAL: use var iso hardwired 'portage'
4650 ++ v = settings.get('PORTAGE_GRPNAME', PORTAGE_GROUPNAME)
4651 ++ # END PREFIX LOCAL
4652 + if native_string:
4653 + v = portage._native_string(v)
4654 - globals()["_portage_grpname"] = v
4655 - _initialized_globals.add("_portage_grpname")
4656 ++ globals()['_portage_grpname'] = v
4657 ++ _initialized_globals.add('_portage_grpname')
4658 +
4659 - v = settings.get("PORTAGE_USERNAME", "portage")
4660 ++ # PREFIX LOCAL: use var iso hardwired 'portage'
4661 ++ v = settings.get('PORTAGE_USERNAME', PORTAGE_USERNAME)
4662 ++ # END PREFIX LOCAL
4663 + if native_string:
4664 + v = portage._native_string(v)
4665 - globals()["_portage_username"] = v
4666 - _initialized_globals.add("_portage_username")
4667 ++ globals()['_portage_username'] = v
4668 ++ _initialized_globals.add('_portage_username')
4669 +
4670 + if "secpass" not in _initialized_globals:
4671 + v = 0
4672 + if uid == 0:
4673 + v = 2
4674 + elif "unprivileged" in settings.features:
4675 + v = 2
4676 + elif portage_gid in os.getgroups():
4677 + v = 1
4678 + globals()["secpass"] = v
4679 + _initialized_globals.add("secpass")
4680 diff --cc lib/portage/dbapi/bintree.py
4681 index 7e81b7879,9dbf9ee8b..8b008a93d
4682 --- a/lib/portage/dbapi/bintree.py
4683 +++ b/lib/portage/dbapi/bintree.py
4684 @@@ -57,1806 -62,1994 +62,2002 @@@ from urllib.parse import urlpars
4685
4686
4687 class UseCachedCopyOfRemoteIndex(Exception):
4688 - # If the local copy is recent enough
4689 - # then fetching the remote index can be skipped.
4690 - pass
4691 + # If the local copy is recent enough
4692 + # then fetching the remote index can be skipped.
4693 + pass
4694 +
4695
4696 class bindbapi(fakedbapi):
4697 - _known_keys = frozenset(list(fakedbapi._known_keys) + \
4698 - ["CHOST", "repository", "USE"])
4699 - _pkg_str_aux_keys = fakedbapi._pkg_str_aux_keys + ("BUILD_ID", "BUILD_TIME", "_mtime_")
4700 -
4701 - def __init__(self, mybintree=None, **kwargs):
4702 - # Always enable multi_instance mode for bindbapi indexing. This
4703 - # does not affect the local PKGDIR file layout, since that is
4704 - # controlled independently by FEATURES=binpkg-multi-instance.
4705 - # The multi_instance mode is useful for the following reasons:
4706 - # * binary packages with the same cpv from multiple binhosts
4707 - # can be considered simultaneously
4708 - # * if binpkg-multi-instance is disabled, it's still possible
4709 - # to properly access a PKGDIR which has binpkg-multi-instance
4710 - # layout (or mixed layout)
4711 - fakedbapi.__init__(self, exclusive_slots=False,
4712 - multi_instance=True, **kwargs)
4713 - self.bintree = mybintree
4714 - self.move_ent = mybintree.move_ent
4715 - # Selectively cache metadata in order to optimize dep matching.
4716 - self._aux_cache_keys = set(
4717 - ["BDEPEND", "BUILD_ID", "BUILD_TIME", "CHOST", "DEFINED_PHASES",
4718 - "DEPEND", "EAPI", "IDEPEND", "IUSE", "KEYWORDS",
4719 - "LICENSE", "MD5", "PDEPEND", "PROPERTIES",
4720 - "PROVIDES", "RDEPEND", "repository", "REQUIRES", "RESTRICT",
4721 - "SIZE", "SLOT", "USE", "_mtime_", "EPREFIX"
4722 - ])
4723 - self._aux_cache_slot_dict = slot_dict_class(self._aux_cache_keys)
4724 - self._aux_cache = {}
4725 -
4726 - @property
4727 - def writable(self):
4728 - """
4729 - Check if PKGDIR is writable, or permissions are sufficient
4730 - to create it if it does not exist yet.
4731 - @rtype: bool
4732 - @return: True if PKGDIR is writable or can be created,
4733 - False otherwise
4734 - """
4735 - return os.access(first_existing(self.bintree.pkgdir), os.W_OK)
4736 -
4737 - def match(self, *pargs, **kwargs):
4738 - if self.bintree and not self.bintree.populated:
4739 - self.bintree.populate()
4740 - return fakedbapi.match(self, *pargs, **kwargs)
4741 -
4742 - def cpv_exists(self, cpv, myrepo=None):
4743 - if self.bintree and not self.bintree.populated:
4744 - self.bintree.populate()
4745 - return fakedbapi.cpv_exists(self, cpv)
4746 -
4747 - def cpv_inject(self, cpv, **kwargs):
4748 - if not self.bintree.populated:
4749 - self.bintree.populate()
4750 - fakedbapi.cpv_inject(self, cpv,
4751 - metadata=cpv._metadata, **kwargs)
4752 -
4753 - def cpv_remove(self, cpv):
4754 - if not self.bintree.populated:
4755 - self.bintree.populate()
4756 - fakedbapi.cpv_remove(self, cpv)
4757 -
4758 - def aux_get(self, mycpv, wants, myrepo=None):
4759 - if self.bintree and not self.bintree.populated:
4760 - self.bintree.populate()
4761 - # Support plain string for backward compatibility with API
4762 - # consumers (including portageq, which passes in a cpv from
4763 - # a command-line argument).
4764 - instance_key = self._instance_key(mycpv,
4765 - support_string=True)
4766 - if not self._known_keys.intersection(
4767 - wants).difference(self._aux_cache_keys):
4768 - aux_cache = self.cpvdict[instance_key]
4769 - if aux_cache is not None:
4770 - return [aux_cache.get(x, "") for x in wants]
4771 - mysplit = mycpv.split("/")
4772 - mylist = []
4773 - add_pkg = self.bintree._additional_pkgs.get(instance_key)
4774 - if add_pkg is not None:
4775 - return add_pkg._db.aux_get(add_pkg, wants)
4776 - if not self.bintree._remotepkgs or \
4777 - not self.bintree.isremote(mycpv):
4778 - try:
4779 - tbz2_path = self.bintree._pkg_paths[instance_key]
4780 - except KeyError:
4781 - raise KeyError(mycpv)
4782 - tbz2_path = os.path.join(self.bintree.pkgdir, tbz2_path)
4783 - try:
4784 - st = os.lstat(tbz2_path)
4785 - except OSError:
4786 - raise KeyError(mycpv)
4787 - metadata_bytes = portage.xpak.tbz2(tbz2_path).get_data()
4788 - def getitem(k):
4789 - if k == "_mtime_":
4790 - return str(st[stat.ST_MTIME])
4791 - if k == "SIZE":
4792 - return str(st.st_size)
4793 - v = metadata_bytes.get(_unicode_encode(k,
4794 - encoding=_encodings['repo.content'],
4795 - errors='backslashreplace'))
4796 - if v is not None:
4797 - v = _unicode_decode(v,
4798 - encoding=_encodings['repo.content'], errors='replace')
4799 - return v
4800 - else:
4801 - getitem = self.cpvdict[instance_key].get
4802 - mydata = {}
4803 - mykeys = wants
4804 - for x in mykeys:
4805 - myval = getitem(x)
4806 - # myval is None if the key doesn't exist
4807 - # or the tbz2 is corrupt.
4808 - if myval:
4809 - mydata[x] = " ".join(myval.split())
4810 -
4811 - if not mydata.setdefault('EAPI', '0'):
4812 - mydata['EAPI'] = '0'
4813 -
4814 - return [mydata.get(x, '') for x in wants]
4815 -
4816 - def aux_update(self, cpv, values):
4817 - if not self.bintree.populated:
4818 - self.bintree.populate()
4819 - build_id = None
4820 - try:
4821 - build_id = cpv.build_id
4822 - except AttributeError:
4823 - if self.bintree._multi_instance:
4824 - # The cpv.build_id attribute is required if we are in
4825 - # multi-instance mode, since otherwise we won't know
4826 - # which instance to update.
4827 - raise
4828 - else:
4829 - cpv = self._instance_key(cpv, support_string=True)[0]
4830 - build_id = cpv.build_id
4831 -
4832 - tbz2path = self.bintree.getname(cpv)
4833 - if not os.path.exists(tbz2path):
4834 - raise KeyError(cpv)
4835 - mytbz2 = portage.xpak.tbz2(tbz2path)
4836 - mydata = mytbz2.get_data()
4837 -
4838 - for k, v in values.items():
4839 - k = _unicode_encode(k,
4840 - encoding=_encodings['repo.content'], errors='backslashreplace')
4841 - v = _unicode_encode(v,
4842 - encoding=_encodings['repo.content'], errors='backslashreplace')
4843 - mydata[k] = v
4844 -
4845 - for k, v in list(mydata.items()):
4846 - if not v:
4847 - del mydata[k]
4848 - mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))
4849 - # inject will clear stale caches via cpv_inject.
4850 - self.bintree.inject(cpv)
4851 -
4852 -
4853 - @coroutine
4854 - def unpack_metadata(self, pkg, dest_dir, loop=None):
4855 - """
4856 - Unpack package metadata to a directory. This method is a coroutine.
4857 -
4858 - @param pkg: package to unpack
4859 - @type pkg: _pkg_str or portage.config
4860 - @param dest_dir: destination directory
4861 - @type dest_dir: str
4862 - """
4863 - loop = asyncio._wrap_loop(loop)
4864 - if isinstance(pkg, _pkg_str):
4865 - cpv = pkg
4866 - else:
4867 - cpv = pkg.mycpv
4868 - key = self._instance_key(cpv)
4869 - add_pkg = self.bintree._additional_pkgs.get(key)
4870 - if add_pkg is not None:
4871 - yield add_pkg._db.unpack_metadata(pkg, dest_dir, loop=loop)
4872 - else:
4873 - tbz2_file = self.bintree.getname(cpv)
4874 - yield loop.run_in_executor(ForkExecutor(loop=loop),
4875 - portage.xpak.tbz2(tbz2_file).unpackinfo, dest_dir)
4876 -
4877 - @coroutine
4878 - def unpack_contents(self, pkg, dest_dir, loop=None):
4879 - """
4880 - Unpack package contents to a directory. This method is a coroutine.
4881 -
4882 - @param pkg: package to unpack
4883 - @type pkg: _pkg_str or portage.config
4884 - @param dest_dir: destination directory
4885 - @type dest_dir: str
4886 - """
4887 - loop = asyncio._wrap_loop(loop)
4888 - if isinstance(pkg, _pkg_str):
4889 - settings = self.settings
4890 - cpv = pkg
4891 - else:
4892 - settings = pkg
4893 - cpv = settings.mycpv
4894 -
4895 - pkg_path = self.bintree.getname(cpv)
4896 - if pkg_path is not None:
4897 -
4898 - extractor = BinpkgExtractorAsync(
4899 - background=settings.get('PORTAGE_BACKGROUND') == '1',
4900 - env=settings.environ(),
4901 - features=settings.features,
4902 - image_dir=dest_dir,
4903 - pkg=cpv, pkg_path=pkg_path,
4904 - logfile=settings.get('PORTAGE_LOG_FILE'),
4905 - scheduler=SchedulerInterface(loop))
4906 -
4907 - extractor.start()
4908 - yield extractor.async_wait()
4909 - if extractor.returncode != os.EX_OK:
4910 - raise PortageException("Error Extracting '{}'".format(pkg_path))
4911 -
4912 - else:
4913 - instance_key = self._instance_key(cpv)
4914 - add_pkg = self.bintree._additional_pkgs.get(instance_key)
4915 - if add_pkg is None:
4916 - raise portage.exception.PackageNotFound(cpv)
4917 - yield add_pkg._db.unpack_contents(pkg, dest_dir, loop=loop)
4918 -
4919 - def cp_list(self, *pargs, **kwargs):
4920 - if not self.bintree.populated:
4921 - self.bintree.populate()
4922 - return fakedbapi.cp_list(self, *pargs, **kwargs)
4923 -
4924 - def cp_all(self, sort=False):
4925 - if not self.bintree.populated:
4926 - self.bintree.populate()
4927 - return fakedbapi.cp_all(self, sort=sort)
4928 -
4929 - def cpv_all(self):
4930 - if not self.bintree.populated:
4931 - self.bintree.populate()
4932 - return fakedbapi.cpv_all(self)
4933 -
4934 - def getfetchsizes(self, pkg):
4935 - """
4936 - This will raise MissingSignature if SIZE signature is not available,
4937 - or InvalidSignature if SIZE signature is invalid.
4938 - """
4939 -
4940 - if not self.bintree.populated:
4941 - self.bintree.populate()
4942 -
4943 - pkg = getattr(pkg, 'cpv', pkg)
4944 -
4945 - filesdict = {}
4946 - if not self.bintree.isremote(pkg):
4947 - pass
4948 - else:
4949 - metadata = self.bintree._remotepkgs[self._instance_key(pkg)]
4950 - try:
4951 - size = int(metadata["SIZE"])
4952 - except KeyError:
4953 - raise portage.exception.MissingSignature("SIZE")
4954 - except ValueError:
4955 - raise portage.exception.InvalidSignature(
4956 - "SIZE: %s" % metadata["SIZE"])
4957 - else:
4958 - filesdict[os.path.basename(self.bintree.getname(pkg))] = size
4959 -
4960 - return filesdict
4961 + _known_keys = frozenset(
4962 + list(fakedbapi._known_keys) + ["CHOST", "repository", "USE"]
4963 + )
4964 + _pkg_str_aux_keys = fakedbapi._pkg_str_aux_keys + (
4965 + "BUILD_ID",
4966 + "BUILD_TIME",
4967 + "_mtime_",
4968 + )
4969 +
4970 + def __init__(self, mybintree=None, **kwargs):
4971 + # Always enable multi_instance mode for bindbapi indexing. This
4972 + # does not affect the local PKGDIR file layout, since that is
4973 + # controlled independently by FEATURES=binpkg-multi-instance.
4974 + # The multi_instance mode is useful for the following reasons:
4975 + # * binary packages with the same cpv from multiple binhosts
4976 + # can be considered simultaneously
4977 + # * if binpkg-multi-instance is disabled, it's still possible
4978 + # to properly access a PKGDIR which has binpkg-multi-instance
4979 + # layout (or mixed layout)
4980 + fakedbapi.__init__(self, exclusive_slots=False, multi_instance=True, **kwargs)
4981 + self.bintree = mybintree
4982 + self.move_ent = mybintree.move_ent
4983 + # Selectively cache metadata in order to optimize dep matching.
4984 + self._aux_cache_keys = set(
4985 + [
4986 + "BDEPEND",
4987 + "BUILD_ID",
4988 + "BUILD_TIME",
4989 + "CHOST",
4990 + "DEFINED_PHASES",
4991 + "DEPEND",
4992 + "EAPI",
4993 + "IDEPEND",
4994 + "IUSE",
4995 + "KEYWORDS",
4996 + "LICENSE",
4997 + "MD5",
4998 + "PDEPEND",
4999 + "PROPERTIES",
5000 + "PROVIDES",
5001 + "RDEPEND",
5002 + "repository",
5003 + "REQUIRES",
5004 + "RESTRICT",
5005 + "SIZE",
5006 + "SLOT",
5007 + "USE",
5008 + "_mtime_",
5009 ++ # PREFIX LOCAL
5010 ++ "EPREFIX",
5011 + ]
5012 + )
5013 + self._aux_cache_slot_dict = slot_dict_class(self._aux_cache_keys)
5014 + self._aux_cache = {}
5015 +
5016 + @property
5017 + def writable(self):
5018 + """
5019 + Check if PKGDIR is writable, or permissions are sufficient
5020 + to create it if it does not exist yet.
5021 + @rtype: bool
5022 + @return: True if PKGDIR is writable or can be created,
5023 + False otherwise
5024 + """
5025 + return os.access(first_existing(self.bintree.pkgdir), os.W_OK)
5026 +
5027 + def match(self, *pargs, **kwargs):
5028 + if self.bintree and not self.bintree.populated:
5029 + self.bintree.populate()
5030 + return fakedbapi.match(self, *pargs, **kwargs)
5031 +
5032 + def cpv_exists(self, cpv, myrepo=None):
5033 + if self.bintree and not self.bintree.populated:
5034 + self.bintree.populate()
5035 + return fakedbapi.cpv_exists(self, cpv)
5036 +
5037 + def cpv_inject(self, cpv, **kwargs):
5038 + if not self.bintree.populated:
5039 + self.bintree.populate()
5040 + fakedbapi.cpv_inject(self, cpv, metadata=cpv._metadata, **kwargs)
5041 +
5042 + def cpv_remove(self, cpv):
5043 + if not self.bintree.populated:
5044 + self.bintree.populate()
5045 + fakedbapi.cpv_remove(self, cpv)
5046 +
5047 + def aux_get(self, mycpv, wants, myrepo=None):
5048 + if self.bintree and not self.bintree.populated:
5049 + self.bintree.populate()
5050 + # Support plain string for backward compatibility with API
5051 + # consumers (including portageq, which passes in a cpv from
5052 + # a command-line argument).
5053 + instance_key = self._instance_key(mycpv, support_string=True)
5054 + if not self._known_keys.intersection(wants).difference(self._aux_cache_keys):
5055 + aux_cache = self.cpvdict[instance_key]
5056 + if aux_cache is not None:
5057 + return [aux_cache.get(x, "") for x in wants]
5058 + mysplit = mycpv.split("/")
5059 + mylist = []
5060 + add_pkg = self.bintree._additional_pkgs.get(instance_key)
5061 + if add_pkg is not None:
5062 + return add_pkg._db.aux_get(add_pkg, wants)
5063 + if not self.bintree._remotepkgs or not self.bintree.isremote(mycpv):
5064 + try:
5065 + tbz2_path = self.bintree._pkg_paths[instance_key]
5066 + except KeyError:
5067 + raise KeyError(mycpv)
5068 + tbz2_path = os.path.join(self.bintree.pkgdir, tbz2_path)
5069 + try:
5070 + st = os.lstat(tbz2_path)
5071 + except OSError:
5072 + raise KeyError(mycpv)
5073 + metadata_bytes = portage.xpak.tbz2(tbz2_path).get_data()
5074 +
5075 + def getitem(k):
5076 + if k == "_mtime_":
5077 + return str(st[stat.ST_MTIME])
5078 + if k == "SIZE":
5079 + return str(st.st_size)
5080 + v = metadata_bytes.get(
5081 + _unicode_encode(
5082 + k,
5083 + encoding=_encodings["repo.content"],
5084 + errors="backslashreplace",
5085 + )
5086 + )
5087 + if v is not None:
5088 + v = _unicode_decode(
5089 + v, encoding=_encodings["repo.content"], errors="replace"
5090 + )
5091 + return v
5092 +
5093 + else:
5094 + getitem = self.cpvdict[instance_key].get
5095 + mydata = {}
5096 + mykeys = wants
5097 + for x in mykeys:
5098 + myval = getitem(x)
5099 + # myval is None if the key doesn't exist
5100 + # or the tbz2 is corrupt.
5101 + if myval:
5102 + mydata[x] = " ".join(myval.split())
5103 +
5104 + if not mydata.setdefault("EAPI", "0"):
5105 + mydata["EAPI"] = "0"
5106 +
5107 + return [mydata.get(x, "") for x in wants]
5108 +
5109 + def aux_update(self, cpv, values):
5110 + if not self.bintree.populated:
5111 + self.bintree.populate()
5112 + build_id = None
5113 + try:
5114 + build_id = cpv.build_id
5115 + except AttributeError:
5116 + if self.bintree._multi_instance:
5117 + # The cpv.build_id attribute is required if we are in
5118 + # multi-instance mode, since otherwise we won't know
5119 + # which instance to update.
5120 + raise
5121 + else:
5122 + cpv = self._instance_key(cpv, support_string=True)[0]
5123 + build_id = cpv.build_id
5124 +
5125 + tbz2path = self.bintree.getname(cpv)
5126 + if not os.path.exists(tbz2path):
5127 + raise KeyError(cpv)
5128 + mytbz2 = portage.xpak.tbz2(tbz2path)
5129 + mydata = mytbz2.get_data()
5130 +
5131 + for k, v in values.items():
5132 + k = _unicode_encode(
5133 + k, encoding=_encodings["repo.content"], errors="backslashreplace"
5134 + )
5135 + v = _unicode_encode(
5136 + v, encoding=_encodings["repo.content"], errors="backslashreplace"
5137 + )
5138 + mydata[k] = v
5139 +
5140 + for k, v in list(mydata.items()):
5141 + if not v:
5142 + del mydata[k]
5143 + mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))
5144 + # inject will clear stale caches via cpv_inject.
5145 + self.bintree.inject(cpv)
5146 +
5147 + async def unpack_metadata(self, pkg, dest_dir, loop=None):
5148 + """
5149 + Unpack package metadata to a directory. This method is a coroutine.
5150 +
5151 + @param pkg: package to unpack
5152 + @type pkg: _pkg_str or portage.config
5153 + @param dest_dir: destination directory
5154 + @type dest_dir: str
5155 + """
5156 + loop = asyncio._wrap_loop(loop)
5157 + if isinstance(pkg, _pkg_str):
5158 + cpv = pkg
5159 + else:
5160 + cpv = pkg.mycpv
5161 + key = self._instance_key(cpv)
5162 + add_pkg = self.bintree._additional_pkgs.get(key)
5163 + if add_pkg is not None:
5164 + await add_pkg._db.unpack_metadata(pkg, dest_dir, loop=loop)
5165 + else:
5166 + tbz2_file = self.bintree.getname(cpv)
5167 + await loop.run_in_executor(
5168 + ForkExecutor(loop=loop),
5169 + portage.xpak.tbz2(tbz2_file).unpackinfo,
5170 + dest_dir,
5171 + )
5172 +
5173 + async def unpack_contents(self, pkg, dest_dir, loop=None):
5174 + """
5175 + Unpack package contents to a directory. This method is a coroutine.
5176 +
5177 + @param pkg: package to unpack
5178 + @type pkg: _pkg_str or portage.config
5179 + @param dest_dir: destination directory
5180 + @type dest_dir: str
5181 + """
5182 + loop = asyncio._wrap_loop(loop)
5183 + if isinstance(pkg, _pkg_str):
5184 + settings = self.settings
5185 + cpv = pkg
5186 + else:
5187 + settings = pkg
5188 + cpv = settings.mycpv
5189 +
5190 + pkg_path = self.bintree.getname(cpv)
5191 + if pkg_path is not None:
5192 +
5193 + extractor = BinpkgExtractorAsync(
5194 + background=settings.get("PORTAGE_BACKGROUND") == "1",
5195 + env=settings.environ(),
5196 + features=settings.features,
5197 + image_dir=dest_dir,
5198 + pkg=cpv,
5199 + pkg_path=pkg_path,
5200 + logfile=settings.get("PORTAGE_LOG_FILE"),
5201 + scheduler=SchedulerInterface(loop),
5202 + )
5203 +
5204 + extractor.start()
5205 + await extractor.async_wait()
5206 + if extractor.returncode != os.EX_OK:
5207 + raise PortageException("Error Extracting '{}'".format(pkg_path))
5208 +
5209 + else:
5210 + instance_key = self._instance_key(cpv)
5211 + add_pkg = self.bintree._additional_pkgs.get(instance_key)
5212 + if add_pkg is None:
5213 + raise portage.exception.PackageNotFound(cpv)
5214 + await add_pkg._db.unpack_contents(pkg, dest_dir, loop=loop)
5215 +
5216 + def cp_list(self, *pargs, **kwargs):
5217 + if not self.bintree.populated:
5218 + self.bintree.populate()
5219 + return fakedbapi.cp_list(self, *pargs, **kwargs)
5220 +
5221 + def cp_all(self, sort=False):
5222 + if not self.bintree.populated:
5223 + self.bintree.populate()
5224 + return fakedbapi.cp_all(self, sort=sort)
5225 +
5226 + def cpv_all(self):
5227 + if not self.bintree.populated:
5228 + self.bintree.populate()
5229 + return fakedbapi.cpv_all(self)
5230 +
5231 + def getfetchsizes(self, pkg):
5232 + """
5233 + This will raise MissingSignature if SIZE signature is not available,
5234 + or InvalidSignature if SIZE signature is invalid.
5235 + """
5236 +
5237 + if not self.bintree.populated:
5238 + self.bintree.populate()
5239 +
5240 + pkg = getattr(pkg, "cpv", pkg)
5241 +
5242 + filesdict = {}
5243 + if not self.bintree.isremote(pkg):
5244 + pass
5245 + else:
5246 + metadata = self.bintree._remotepkgs[self._instance_key(pkg)]
5247 + try:
5248 + size = int(metadata["SIZE"])
5249 + except KeyError:
5250 + raise portage.exception.MissingSignature("SIZE")
5251 + except ValueError:
5252 + raise portage.exception.InvalidSignature("SIZE: %s" % metadata["SIZE"])
5253 + else:
5254 + filesdict[os.path.basename(self.bintree.getname(pkg))] = size
5255 +
5256 + return filesdict
5257
5258
5259 class binarytree:
5260 - "this tree scans for a list of all packages available in PKGDIR"
5261 - def __init__(self, _unused=DeprecationWarning, pkgdir=None,
5262 - virtual=DeprecationWarning, settings=None):
5263 -
5264 - if pkgdir is None:
5265 - raise TypeError("pkgdir parameter is required")
5266 -
5267 - if settings is None:
5268 - raise TypeError("settings parameter is required")
5269 -
5270 - if _unused is not DeprecationWarning:
5271 - warnings.warn("The first parameter of the "
5272 - "portage.dbapi.bintree.binarytree"
5273 - " constructor is now unused. Instead "
5274 - "settings['ROOT'] is used.",
5275 - DeprecationWarning, stacklevel=2)
5276 -
5277 - if virtual is not DeprecationWarning:
5278 - warnings.warn("The 'virtual' parameter of the "
5279 - "portage.dbapi.bintree.binarytree"
5280 - " constructor is unused",
5281 - DeprecationWarning, stacklevel=2)
5282 -
5283 - if True:
5284 - self.pkgdir = normalize_path(pkgdir)
5285 - # NOTE: Event if binpkg-multi-instance is disabled, it's
5286 - # still possible to access a PKGDIR which uses the
5287 - # binpkg-multi-instance layout (or mixed layout).
5288 - self._multi_instance = ("binpkg-multi-instance" in
5289 - settings.features)
5290 - if self._multi_instance:
5291 - self._allocate_filename = self._allocate_filename_multi
5292 - self.dbapi = bindbapi(self, settings=settings)
5293 - self.update_ents = self.dbapi.update_ents
5294 - self.move_slot_ent = self.dbapi.move_slot_ent
5295 - self.populated = 0
5296 - self.tree = {}
5297 - self._binrepos_conf = None
5298 - self._remote_has_index = False
5299 - self._remotepkgs = None # remote metadata indexed by cpv
5300 - self._additional_pkgs = {}
5301 - self.invalids = []
5302 - self.settings = settings
5303 - self._pkg_paths = {}
5304 - self._populating = False
5305 - self._all_directory = os.path.isdir(
5306 - os.path.join(self.pkgdir, "All"))
5307 - self._pkgindex_version = 0
5308 - self._pkgindex_hashes = ["MD5","SHA1"]
5309 - self._pkgindex_file = os.path.join(self.pkgdir, "Packages")
5310 - self._pkgindex_keys = self.dbapi._aux_cache_keys.copy()
5311 - self._pkgindex_keys.update(["CPV", "SIZE"])
5312 - self._pkgindex_aux_keys = \
5313 - ["BASE_URI", "BDEPEND", "BUILD_ID", "BUILD_TIME", "CHOST",
5314 - "DEFINED_PHASES", "DEPEND", "DESCRIPTION", "EAPI", "FETCHCOMMAND",
5315 - "IDEPEND", "IUSE", "KEYWORDS", "LICENSE", "PDEPEND",
5316 - "PKGINDEX_URI", "PROPERTIES", "PROVIDES",
5317 - "RDEPEND", "repository", "REQUIRES", "RESTRICT", "RESUMECOMMAND",
5318 - "SIZE", "SLOT", "USE", "EPREFIX"]
5319 - self._pkgindex_aux_keys = list(self._pkgindex_aux_keys)
5320 - self._pkgindex_use_evaluated_keys = \
5321 - ("BDEPEND", "DEPEND", "IDEPEND", "LICENSE", "RDEPEND",
5322 - "PDEPEND", "PROPERTIES", "RESTRICT")
5323 - self._pkgindex_header = None
5324 - self._pkgindex_header_keys = set([
5325 - "ACCEPT_KEYWORDS", "ACCEPT_LICENSE",
5326 - "ACCEPT_PROPERTIES", "ACCEPT_RESTRICT", "CBUILD",
5327 - "CONFIG_PROTECT", "CONFIG_PROTECT_MASK", "FEATURES",
5328 - "GENTOO_MIRRORS", "INSTALL_MASK", "IUSE_IMPLICIT", "USE",
5329 - "USE_EXPAND", "USE_EXPAND_HIDDEN", "USE_EXPAND_IMPLICIT",
5330 - "USE_EXPAND_UNPREFIXED",
5331 - "EPREFIX"])
5332 - self._pkgindex_default_pkg_data = {
5333 - "BDEPEND" : "",
5334 - "BUILD_ID" : "",
5335 - "BUILD_TIME" : "",
5336 - "DEFINED_PHASES" : "",
5337 - "DEPEND" : "",
5338 - "EAPI" : "0",
5339 - "IDEPEND" : "",
5340 - "IUSE" : "",
5341 - "KEYWORDS": "",
5342 - "LICENSE" : "",
5343 - "PATH" : "",
5344 - "PDEPEND" : "",
5345 - "PROPERTIES" : "",
5346 - "PROVIDES": "",
5347 - "RDEPEND" : "",
5348 - "REQUIRES": "",
5349 - "RESTRICT": "",
5350 - "SLOT" : "0",
5351 - "USE" : "",
5352 - }
5353 - self._pkgindex_inherited_keys = ["CHOST", "repository", "EPREFIX"]
5354 -
5355 - # Populate the header with appropriate defaults.
5356 - self._pkgindex_default_header_data = {
5357 - "CHOST" : self.settings.get("CHOST", ""),
5358 - "repository" : "",
5359 - }
5360 -
5361 - self._pkgindex_translated_keys = (
5362 - ("DESCRIPTION" , "DESC"),
5363 - ("_mtime_" , "MTIME"),
5364 - ("repository" , "REPO"),
5365 - )
5366 -
5367 - self._pkgindex_allowed_pkg_keys = set(chain(
5368 - self._pkgindex_keys,
5369 - self._pkgindex_aux_keys,
5370 - self._pkgindex_hashes,
5371 - self._pkgindex_default_pkg_data,
5372 - self._pkgindex_inherited_keys,
5373 - chain(*self._pkgindex_translated_keys)
5374 - ))
5375 -
5376 - @property
5377 - def root(self):
5378 - warnings.warn("The root attribute of "
5379 - "portage.dbapi.bintree.binarytree"
5380 - " is deprecated. Use "
5381 - "settings['ROOT'] instead.",
5382 - DeprecationWarning, stacklevel=3)
5383 - return self.settings['ROOT']
5384 -
5385 - def move_ent(self, mylist, repo_match=None):
5386 - if not self.populated:
5387 - self.populate()
5388 - origcp = mylist[1]
5389 - newcp = mylist[2]
5390 - # sanity check
5391 - for atom in (origcp, newcp):
5392 - if not isjustname(atom):
5393 - raise InvalidPackageName(str(atom))
5394 - mynewcat = catsplit(newcp)[0]
5395 - origmatches=self.dbapi.cp_list(origcp)
5396 - moves = 0
5397 - if not origmatches:
5398 - return moves
5399 - for mycpv in origmatches:
5400 - mycpv_cp = mycpv.cp
5401 - if mycpv_cp != origcp:
5402 - # Ignore PROVIDE virtual match.
5403 - continue
5404 - if repo_match is not None \
5405 - and not repo_match(mycpv.repo):
5406 - continue
5407 -
5408 - # Use isvalidatom() to check if this move is valid for the
5409 - # EAPI (characters allowed in package names may vary).
5410 - if not isvalidatom(newcp, eapi=mycpv.eapi):
5411 - continue
5412 -
5413 - mynewcpv = mycpv.replace(mycpv_cp, str(newcp), 1)
5414 - myoldpkg = catsplit(mycpv)[1]
5415 - mynewpkg = catsplit(mynewcpv)[1]
5416 -
5417 - # If this update has already been applied to the same
5418 - # package build then silently continue.
5419 - applied = False
5420 - for maybe_applied in self.dbapi.match('={}'.format(mynewcpv)):
5421 - if maybe_applied.build_time == mycpv.build_time:
5422 - applied = True
5423 - break
5424 -
5425 - if applied:
5426 - continue
5427 -
5428 - if (mynewpkg != myoldpkg) and self.dbapi.cpv_exists(mynewcpv):
5429 - writemsg(_("!!! Cannot update binary: Destination exists.\n"),
5430 - noiselevel=-1)
5431 - writemsg("!!! "+mycpv+" -> "+mynewcpv+"\n", noiselevel=-1)
5432 - continue
5433 -
5434 - tbz2path = self.getname(mycpv)
5435 - if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
5436 - writemsg(_("!!! Cannot update readonly binary: %s\n") % mycpv,
5437 - noiselevel=-1)
5438 - continue
5439 -
5440 - moves += 1
5441 - mytbz2 = portage.xpak.tbz2(tbz2path)
5442 - mydata = mytbz2.get_data()
5443 - updated_items = update_dbentries([mylist], mydata, parent=mycpv)
5444 - mydata.update(updated_items)
5445 - mydata[b'PF'] = \
5446 - _unicode_encode(mynewpkg + "\n",
5447 - encoding=_encodings['repo.content'])
5448 - mydata[b'CATEGORY'] = \
5449 - _unicode_encode(mynewcat + "\n",
5450 - encoding=_encodings['repo.content'])
5451 - if mynewpkg != myoldpkg:
5452 - ebuild_data = mydata.pop(_unicode_encode(myoldpkg + '.ebuild',
5453 - encoding=_encodings['repo.content']), None)
5454 - if ebuild_data is not None:
5455 - mydata[_unicode_encode(mynewpkg + '.ebuild',
5456 - encoding=_encodings['repo.content'])] = ebuild_data
5457 -
5458 - metadata = self.dbapi._aux_cache_slot_dict()
5459 - for k in self.dbapi._aux_cache_keys:
5460 - v = mydata.get(_unicode_encode(k))
5461 - if v is not None:
5462 - v = _unicode_decode(v)
5463 - metadata[k] = " ".join(v.split())
5464 -
5465 - # Create a copy of the old version of the package and
5466 - # apply the update to it. Leave behind the old version,
5467 - # assuming that it will be deleted by eclean-pkg when its
5468 - # time comes.
5469 - mynewcpv = _pkg_str(mynewcpv, metadata=metadata, db=self.dbapi)
5470 - update_path = self.getname(mynewcpv, allocate_new=True) + ".partial"
5471 - self._ensure_dir(os.path.dirname(update_path))
5472 - update_path_lock = None
5473 - try:
5474 - update_path_lock = lockfile(update_path, wantnewlockfile=True)
5475 - copyfile(tbz2path, update_path)
5476 - mytbz2 = portage.xpak.tbz2(update_path)
5477 - mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))
5478 - self.inject(mynewcpv, filename=update_path)
5479 - finally:
5480 - if update_path_lock is not None:
5481 - try:
5482 - os.unlink(update_path)
5483 - except OSError:
5484 - pass
5485 - unlockfile(update_path_lock)
5486 -
5487 - return moves
5488 -
5489 - def prevent_collision(self, cpv):
5490 - warnings.warn("The "
5491 - "portage.dbapi.bintree.binarytree.prevent_collision "
5492 - "method is deprecated.",
5493 - DeprecationWarning, stacklevel=2)
5494 -
5495 - def _ensure_dir(self, path):
5496 - """
5497 - Create the specified directory. Also, copy gid and group mode
5498 - bits from self.pkgdir if possible.
5499 - @param cat_dir: Absolute path of the directory to be created.
5500 - @type cat_dir: String
5501 - """
5502 - try:
5503 - pkgdir_st = os.stat(self.pkgdir)
5504 - except OSError:
5505 - ensure_dirs(path)
5506 - return
5507 - pkgdir_gid = pkgdir_st.st_gid
5508 - pkgdir_grp_mode = 0o2070 & pkgdir_st.st_mode
5509 - try:
5510 - ensure_dirs(path, gid=pkgdir_gid, mode=pkgdir_grp_mode, mask=0)
5511 - except PortageException:
5512 - if not os.path.isdir(path):
5513 - raise
5514 -
5515 - def _file_permissions(self, path):
5516 - try:
5517 - pkgdir_st = os.stat(self.pkgdir)
5518 - except OSError:
5519 - pass
5520 - else:
5521 - pkgdir_gid = pkgdir_st.st_gid
5522 - pkgdir_grp_mode = 0o0060 & pkgdir_st.st_mode
5523 - try:
5524 - portage.util.apply_permissions(path, gid=pkgdir_gid,
5525 - mode=pkgdir_grp_mode, mask=0)
5526 - except PortageException:
5527 - pass
5528 -
5529 - def populate(self, getbinpkgs=False, getbinpkg_refresh=True, add_repos=()):
5530 - """
5531 - Populates the binarytree with package metadata.
5532 -
5533 - @param getbinpkgs: include remote packages
5534 - @type getbinpkgs: bool
5535 - @param getbinpkg_refresh: attempt to refresh the cache
5536 - of remote package metadata if getbinpkgs is also True
5537 - @type getbinpkg_refresh: bool
5538 - @param add_repos: additional binary package repositories
5539 - @type add_repos: sequence
5540 - """
5541 -
5542 - if self._populating:
5543 - return
5544 -
5545 - if not os.path.isdir(self.pkgdir) and not (getbinpkgs or add_repos):
5546 - self.populated = True
5547 - return
5548 -
5549 - # Clear all caches in case populate is called multiple times
5550 - # as may be the case when _global_updates calls populate()
5551 - # prior to performing package moves since it only wants to
5552 - # operate on local packages (getbinpkgs=0).
5553 - self._remotepkgs = None
5554 -
5555 - self._populating = True
5556 - try:
5557 - update_pkgindex = self._populate_local(
5558 - reindex='pkgdir-index-trusted' not in self.settings.features)
5559 -
5560 - if update_pkgindex and self.dbapi.writable:
5561 - # If the Packages file needs to be updated, then _populate_local
5562 - # needs to be called once again while the file is locked, so
5563 - # that changes made by a concurrent process cannot be lost. This
5564 - # case is avoided when possible, in order to minimize lock
5565 - # contention.
5566 - pkgindex_lock = None
5567 - try:
5568 - pkgindex_lock = lockfile(self._pkgindex_file,
5569 - wantnewlockfile=True)
5570 - update_pkgindex = self._populate_local()
5571 - if update_pkgindex:
5572 - self._pkgindex_write(update_pkgindex)
5573 - finally:
5574 - if pkgindex_lock:
5575 - unlockfile(pkgindex_lock)
5576 -
5577 - if add_repos:
5578 - self._populate_additional(add_repos)
5579 -
5580 - if getbinpkgs:
5581 - config_path = os.path.join(self.settings['PORTAGE_CONFIGROOT'], BINREPOS_CONF_FILE)
5582 - self._binrepos_conf = BinRepoConfigLoader((config_path,), self.settings)
5583 - if not self._binrepos_conf:
5584 - writemsg(_("!!! %s is missing (or PORTAGE_BINHOST is unset), but use is requested.\n") % (config_path,),
5585 - noiselevel=-1)
5586 - else:
5587 - self._populate_remote(getbinpkg_refresh=getbinpkg_refresh)
5588 -
5589 - finally:
5590 - self._populating = False
5591 -
5592 - self.populated = True
5593 -
5594 - def _populate_local(self, reindex=True):
5595 - """
5596 - Populates the binarytree with local package metadata.
5597 -
5598 - @param reindex: detect added / modified / removed packages and
5599 - regenerate the index file if necessary
5600 - @type reindex: bool
5601 - """
5602 - self.dbapi.clear()
5603 - _instance_key = self.dbapi._instance_key
5604 - # In order to minimize disk I/O, we never compute digests here.
5605 - # Therefore we exclude hashes from the minimum_keys, so that
5606 - # the Packages file will not be needlessly re-written due to
5607 - # missing digests.
5608 - minimum_keys = self._pkgindex_keys.difference(self._pkgindex_hashes)
5609 - if True:
5610 - pkg_paths = {}
5611 - self._pkg_paths = pkg_paths
5612 - dir_files = {}
5613 - if reindex:
5614 - for parent, dir_names, file_names in os.walk(self.pkgdir):
5615 - relative_parent = parent[len(self.pkgdir)+1:]
5616 - dir_files[relative_parent] = file_names
5617 -
5618 - pkgindex = self._load_pkgindex()
5619 - if not self._pkgindex_version_supported(pkgindex):
5620 - pkgindex = self._new_pkgindex()
5621 - metadata = {}
5622 - basename_index = {}
5623 - for d in pkgindex.packages:
5624 - cpv = _pkg_str(d["CPV"], metadata=d,
5625 - settings=self.settings, db=self.dbapi)
5626 - d["CPV"] = cpv
5627 - metadata[_instance_key(cpv)] = d
5628 - path = d.get("PATH")
5629 - if not path:
5630 - path = cpv + ".tbz2"
5631 -
5632 - if reindex:
5633 - basename = os.path.basename(path)
5634 - basename_index.setdefault(basename, []).append(d)
5635 - else:
5636 - instance_key = _instance_key(cpv)
5637 - pkg_paths[instance_key] = path
5638 - self.dbapi.cpv_inject(cpv)
5639 -
5640 - update_pkgindex = False
5641 - for mydir, file_names in dir_files.items():
5642 - try:
5643 - mydir = _unicode_decode(mydir,
5644 - encoding=_encodings["fs"], errors="strict")
5645 - except UnicodeDecodeError:
5646 - continue
5647 - for myfile in file_names:
5648 - try:
5649 - myfile = _unicode_decode(myfile,
5650 - encoding=_encodings["fs"], errors="strict")
5651 - except UnicodeDecodeError:
5652 - continue
5653 - if not myfile.endswith(SUPPORTED_XPAK_EXTENSIONS):
5654 - continue
5655 - mypath = os.path.join(mydir, myfile)
5656 - full_path = os.path.join(self.pkgdir, mypath)
5657 - s = os.lstat(full_path)
5658 -
5659 - if not stat.S_ISREG(s.st_mode):
5660 - continue
5661 -
5662 - # Validate data from the package index and try to avoid
5663 - # reading the xpak if possible.
5664 - possibilities = basename_index.get(myfile)
5665 - if possibilities:
5666 - match = None
5667 - for d in possibilities:
5668 - try:
5669 - if int(d["_mtime_"]) != s[stat.ST_MTIME]:
5670 - continue
5671 - except (KeyError, ValueError):
5672 - continue
5673 - try:
5674 - if int(d["SIZE"]) != int(s.st_size):
5675 - continue
5676 - except (KeyError, ValueError):
5677 - continue
5678 - if not minimum_keys.difference(d):
5679 - match = d
5680 - break
5681 - if match:
5682 - mycpv = match["CPV"]
5683 - instance_key = _instance_key(mycpv)
5684 - pkg_paths[instance_key] = mypath
5685 - # update the path if the package has been moved
5686 - oldpath = d.get("PATH")
5687 - if oldpath and oldpath != mypath:
5688 - update_pkgindex = True
5689 - # Omit PATH if it is the default path for
5690 - # the current Packages format version.
5691 - if mypath != mycpv + ".tbz2":
5692 - d["PATH"] = mypath
5693 - if not oldpath:
5694 - update_pkgindex = True
5695 - else:
5696 - d.pop("PATH", None)
5697 - if oldpath:
5698 - update_pkgindex = True
5699 - self.dbapi.cpv_inject(mycpv)
5700 - continue
5701 - if not os.access(full_path, os.R_OK):
5702 - writemsg(_("!!! Permission denied to read " \
5703 - "binary package: '%s'\n") % full_path,
5704 - noiselevel=-1)
5705 - self.invalids.append(myfile[:-5])
5706 - continue
5707 - pkg_metadata = self._read_metadata(full_path, s,
5708 - keys=chain(self.dbapi._aux_cache_keys,
5709 - ("PF", "CATEGORY")))
5710 - mycat = pkg_metadata.get("CATEGORY", "")
5711 - mypf = pkg_metadata.get("PF", "")
5712 - slot = pkg_metadata.get("SLOT", "")
5713 - mypkg = myfile[:-5]
5714 - if not mycat or not mypf or not slot:
5715 - #old-style or corrupt package
5716 - writemsg(_("\n!!! Invalid binary package: '%s'\n") % full_path,
5717 - noiselevel=-1)
5718 - missing_keys = []
5719 - if not mycat:
5720 - missing_keys.append("CATEGORY")
5721 - if not mypf:
5722 - missing_keys.append("PF")
5723 - if not slot:
5724 - missing_keys.append("SLOT")
5725 - msg = []
5726 - if missing_keys:
5727 - missing_keys.sort()
5728 - msg.append(_("Missing metadata key(s): %s.") % \
5729 - ", ".join(missing_keys))
5730 - msg.append(_(" This binary package is not " \
5731 - "recoverable and should be deleted."))
5732 - for line in textwrap.wrap("".join(msg), 72):
5733 - writemsg("!!! %s\n" % line, noiselevel=-1)
5734 - self.invalids.append(mypkg)
5735 - continue
5736 -
5737 - multi_instance = False
5738 - invalid_name = False
5739 - build_id = None
5740 - if myfile.endswith(".xpak"):
5741 - multi_instance = True
5742 - build_id = self._parse_build_id(myfile)
5743 - if build_id < 1:
5744 - invalid_name = True
5745 - elif myfile != "%s-%s.xpak" % (
5746 - mypf, build_id):
5747 - invalid_name = True
5748 - else:
5749 - mypkg = mypkg[:-len(str(build_id))-1]
5750 - elif myfile != mypf + ".tbz2":
5751 - invalid_name = True
5752 -
5753 - if invalid_name:
5754 - writemsg(_("\n!!! Binary package name is "
5755 - "invalid: '%s'\n") % full_path,
5756 - noiselevel=-1)
5757 - continue
5758 -
5759 - if pkg_metadata.get("BUILD_ID"):
5760 - try:
5761 - build_id = int(pkg_metadata["BUILD_ID"])
5762 - except ValueError:
5763 - writemsg(_("!!! Binary package has "
5764 - "invalid BUILD_ID: '%s'\n") %
5765 - full_path, noiselevel=-1)
5766 - continue
5767 - else:
5768 - build_id = None
5769 -
5770 - if multi_instance:
5771 - name_split = catpkgsplit("%s/%s" %
5772 - (mycat, mypf))
5773 - if (name_split is None or
5774 - tuple(catsplit(mydir)) != name_split[:2]):
5775 - continue
5776 - elif mycat != mydir and mydir != "All":
5777 - continue
5778 - if mypkg != mypf.strip():
5779 - continue
5780 - mycpv = mycat + "/" + mypkg
5781 - if not self.dbapi._category_re.match(mycat):
5782 - writemsg(_("!!! Binary package has an " \
5783 - "unrecognized category: '%s'\n") % full_path,
5784 - noiselevel=-1)
5785 - writemsg(_("!!! '%s' has a category that is not" \
5786 - " listed in %setc/portage/categories\n") % \
5787 - (mycpv, self.settings["PORTAGE_CONFIGROOT"]),
5788 - noiselevel=-1)
5789 - continue
5790 - if build_id is not None:
5791 - pkg_metadata["BUILD_ID"] = str(build_id)
5792 - pkg_metadata["SIZE"] = str(s.st_size)
5793 - # Discard items used only for validation above.
5794 - pkg_metadata.pop("CATEGORY")
5795 - pkg_metadata.pop("PF")
5796 - mycpv = _pkg_str(mycpv,
5797 - metadata=self.dbapi._aux_cache_slot_dict(pkg_metadata),
5798 - db=self.dbapi)
5799 - pkg_paths[_instance_key(mycpv)] = mypath
5800 - self.dbapi.cpv_inject(mycpv)
5801 - update_pkgindex = True
5802 - d = metadata.get(_instance_key(mycpv),
5803 - pkgindex._pkg_slot_dict())
5804 - if d:
5805 - try:
5806 - if int(d["_mtime_"]) != s[stat.ST_MTIME]:
5807 - d.clear()
5808 - except (KeyError, ValueError):
5809 - d.clear()
5810 - if d:
5811 - try:
5812 - if int(d["SIZE"]) != int(s.st_size):
5813 - d.clear()
5814 - except (KeyError, ValueError):
5815 - d.clear()
5816 -
5817 - for k in self._pkgindex_allowed_pkg_keys:
5818 - v = pkg_metadata.get(k)
5819 - if v:
5820 - d[k] = v
5821 - d["CPV"] = mycpv
5822 -
5823 - try:
5824 - self._eval_use_flags(mycpv, d)
5825 - except portage.exception.InvalidDependString:
5826 - writemsg(_("!!! Invalid binary package: '%s'\n") % \
5827 - self.getname(mycpv), noiselevel=-1)
5828 - self.dbapi.cpv_remove(mycpv)
5829 - del pkg_paths[_instance_key(mycpv)]
5830 -
5831 - # record location if it's non-default
5832 - if mypath != mycpv + ".tbz2":
5833 - d["PATH"] = mypath
5834 - else:
5835 - d.pop("PATH", None)
5836 - metadata[_instance_key(mycpv)] = d
5837 -
5838 - if reindex:
5839 - for instance_key in list(metadata):
5840 - if instance_key not in pkg_paths:
5841 - del metadata[instance_key]
5842 -
5843 - if update_pkgindex:
5844 - del pkgindex.packages[:]
5845 - pkgindex.packages.extend(iter(metadata.values()))
5846 - self._update_pkgindex_header(pkgindex.header)
5847 -
5848 - self._pkgindex_header = {}
5849 - self._merge_pkgindex_header(pkgindex.header,
5850 - self._pkgindex_header)
5851 -
5852 - return pkgindex if update_pkgindex else None
5853 -
5854 - def _populate_remote(self, getbinpkg_refresh=True):
5855 -
5856 - self._remote_has_index = False
5857 - self._remotepkgs = {}
5858 - # Order by descending priority.
5859 - for repo in reversed(list(self._binrepos_conf.values())):
5860 - base_url = repo.sync_uri
5861 - parsed_url = urlparse(base_url)
5862 - host = parsed_url.netloc
5863 - port = parsed_url.port
5864 - user = None
5865 - passwd = None
5866 - user_passwd = ""
5867 - if "@" in host:
5868 - user, host = host.split("@", 1)
5869 - user_passwd = user + "@"
5870 - if ":" in user:
5871 - user, passwd = user.split(":", 1)
5872 -
5873 - if port is not None:
5874 - port_str = ":%s" % (port,)
5875 - if host.endswith(port_str):
5876 - host = host[:-len(port_str)]
5877 - pkgindex_file = os.path.join(self.settings["EROOT"], CACHE_PATH, "binhost",
5878 - host, parsed_url.path.lstrip("/"), "Packages")
5879 - pkgindex = self._new_pkgindex()
5880 - try:
5881 - f = io.open(_unicode_encode(pkgindex_file,
5882 - encoding=_encodings['fs'], errors='strict'),
5883 - mode='r', encoding=_encodings['repo.content'],
5884 - errors='replace')
5885 - try:
5886 - pkgindex.read(f)
5887 - finally:
5888 - f.close()
5889 - except EnvironmentError as e:
5890 - if e.errno != errno.ENOENT:
5891 - raise
5892 - local_timestamp = pkgindex.header.get("TIMESTAMP", None)
5893 - try:
5894 - download_timestamp = \
5895 - float(pkgindex.header.get("DOWNLOAD_TIMESTAMP", 0))
5896 - except ValueError:
5897 - download_timestamp = 0
5898 - remote_timestamp = None
5899 - rmt_idx = self._new_pkgindex()
5900 - proc = None
5901 - tmp_filename = None
5902 - try:
5903 - # urlparse.urljoin() only works correctly with recognized
5904 - # protocols and requires the base url to have a trailing
5905 - # slash, so join manually...
5906 - url = base_url.rstrip("/") + "/Packages"
5907 - f = None
5908 -
5909 - if not getbinpkg_refresh and local_timestamp:
5910 - raise UseCachedCopyOfRemoteIndex()
5911 -
5912 - try:
5913 - ttl = float(pkgindex.header.get("TTL", 0))
5914 - except ValueError:
5915 - pass
5916 - else:
5917 - if download_timestamp and ttl and \
5918 - download_timestamp + ttl > time.time():
5919 - raise UseCachedCopyOfRemoteIndex()
5920 -
5921 - # Set proxy settings for _urlopen -> urllib_request
5922 - proxies = {}
5923 - for proto in ('http', 'https'):
5924 - value = self.settings.get(proto + '_proxy')
5925 - if value is not None:
5926 - proxies[proto] = value
5927 -
5928 - # Don't use urlopen for https, unless
5929 - # PEP 476 is supported (bug #469888).
5930 - if repo.fetchcommand is None and (parsed_url.scheme not in ('https',) or _have_pep_476()):
5931 - try:
5932 - f = _urlopen(url, if_modified_since=local_timestamp, proxies=proxies)
5933 - if hasattr(f, 'headers') and f.headers.get('timestamp', ''):
5934 - remote_timestamp = f.headers.get('timestamp')
5935 - except IOError as err:
5936 - if hasattr(err, 'code') and err.code == 304: # not modified (since local_timestamp)
5937 - raise UseCachedCopyOfRemoteIndex()
5938 -
5939 - if parsed_url.scheme in ('ftp', 'http', 'https'):
5940 - # This protocol is supposedly supported by urlopen,
5941 - # so apparently there's a problem with the url
5942 - # or a bug in urlopen.
5943 - if self.settings.get("PORTAGE_DEBUG", "0") != "0":
5944 - traceback.print_exc()
5945 -
5946 - raise
5947 - except ValueError:
5948 - raise ParseError("Invalid Portage BINHOST value '%s'"
5949 - % url.lstrip())
5950 -
5951 - if f is None:
5952 -
5953 - path = parsed_url.path.rstrip("/") + "/Packages"
5954 -
5955 - if repo.fetchcommand is None and parsed_url.scheme == 'ssh':
5956 - # Use a pipe so that we can terminate the download
5957 - # early if we detect that the TIMESTAMP header
5958 - # matches that of the cached Packages file.
5959 - ssh_args = ['ssh']
5960 - if port is not None:
5961 - ssh_args.append("-p%s" % (port,))
5962 - # NOTE: shlex evaluates embedded quotes
5963 - ssh_args.extend(portage.util.shlex_split(
5964 - self.settings.get("PORTAGE_SSH_OPTS", "")))
5965 - ssh_args.append(user_passwd + host)
5966 - ssh_args.append('--')
5967 - ssh_args.append('cat')
5968 - ssh_args.append(path)
5969 -
5970 - proc = subprocess.Popen(ssh_args,
5971 - stdout=subprocess.PIPE)
5972 - f = proc.stdout
5973 - else:
5974 - if repo.fetchcommand is None:
5975 - setting = 'FETCHCOMMAND_' + parsed_url.scheme.upper()
5976 - fcmd = self.settings.get(setting)
5977 - if not fcmd:
5978 - fcmd = self.settings.get('FETCHCOMMAND')
5979 - if not fcmd:
5980 - raise EnvironmentError("FETCHCOMMAND is unset")
5981 - else:
5982 - fcmd = repo.fetchcommand
5983 -
5984 - fd, tmp_filename = tempfile.mkstemp()
5985 - tmp_dirname, tmp_basename = os.path.split(tmp_filename)
5986 - os.close(fd)
5987 -
5988 - fcmd_vars = {
5989 - "DISTDIR": tmp_dirname,
5990 - "FILE": tmp_basename,
5991 - "URI": url
5992 - }
5993 -
5994 - for k in ("PORTAGE_SSH_OPTS",):
5995 - v = self.settings.get(k)
5996 - if v is not None:
5997 - fcmd_vars[k] = v
5998 -
5999 - success = portage.getbinpkg.file_get(
6000 - fcmd=fcmd, fcmd_vars=fcmd_vars)
6001 - if not success:
6002 - raise EnvironmentError("%s failed" % (setting,))
6003 - f = open(tmp_filename, 'rb')
6004 -
6005 - f_dec = codecs.iterdecode(f,
6006 - _encodings['repo.content'], errors='replace')
6007 - try:
6008 - rmt_idx.readHeader(f_dec)
6009 - if not remote_timestamp: # in case it had not been read from HTTP header
6010 - remote_timestamp = rmt_idx.header.get("TIMESTAMP", None)
6011 - if not remote_timestamp:
6012 - # no timestamp in the header, something's wrong
6013 - pkgindex = None
6014 - writemsg(_("\n\n!!! Binhost package index " \
6015 - " has no TIMESTAMP field.\n"), noiselevel=-1)
6016 - else:
6017 - if not self._pkgindex_version_supported(rmt_idx):
6018 - writemsg(_("\n\n!!! Binhost package index version" \
6019 - " is not supported: '%s'\n") % \
6020 - rmt_idx.header.get("VERSION"), noiselevel=-1)
6021 - pkgindex = None
6022 - elif local_timestamp != remote_timestamp:
6023 - rmt_idx.readBody(f_dec)
6024 - pkgindex = rmt_idx
6025 - finally:
6026 - # Timeout after 5 seconds, in case close() blocks
6027 - # indefinitely (see bug #350139).
6028 - try:
6029 - try:
6030 - AlarmSignal.register(5)
6031 - f.close()
6032 - finally:
6033 - AlarmSignal.unregister()
6034 - except AlarmSignal:
6035 - writemsg("\n\n!!! %s\n" % \
6036 - _("Timed out while closing connection to binhost"),
6037 - noiselevel=-1)
6038 - except UseCachedCopyOfRemoteIndex:
6039 - writemsg_stdout("\n")
6040 - writemsg_stdout(
6041 - colorize("GOOD", _("Local copy of remote index is up-to-date and will be used.")) + \
6042 - "\n")
6043 - rmt_idx = pkgindex
6044 - except EnvironmentError as e:
6045 - # This includes URLError which is raised for SSL
6046 - # certificate errors when PEP 476 is supported.
6047 - writemsg(_("\n\n!!! Error fetching binhost package" \
6048 - " info from '%s'\n") % _hide_url_passwd(base_url))
6049 - # With Python 2, the EnvironmentError message may
6050 - # contain bytes or unicode, so use str to ensure
6051 - # safety with all locales (bug #532784).
6052 - try:
6053 - error_msg = str(e)
6054 - except UnicodeDecodeError as uerror:
6055 - error_msg = str(uerror.object,
6056 - encoding='utf_8', errors='replace')
6057 - writemsg("!!! %s\n\n" % error_msg)
6058 - del e
6059 - pkgindex = None
6060 - if proc is not None:
6061 - if proc.poll() is None:
6062 - proc.kill()
6063 - proc.wait()
6064 - proc = None
6065 - if tmp_filename is not None:
6066 - try:
6067 - os.unlink(tmp_filename)
6068 - except OSError:
6069 - pass
6070 - if pkgindex is rmt_idx:
6071 - pkgindex.modified = False # don't update the header
6072 - pkgindex.header["DOWNLOAD_TIMESTAMP"] = "%d" % time.time()
6073 - try:
6074 - ensure_dirs(os.path.dirname(pkgindex_file))
6075 - f = atomic_ofstream(pkgindex_file)
6076 - pkgindex.write(f)
6077 - f.close()
6078 - except (IOError, PortageException):
6079 - if os.access(os.path.dirname(pkgindex_file), os.W_OK):
6080 - raise
6081 - # The current user doesn't have permission to cache the
6082 - # file, but that's alright.
6083 - if pkgindex:
6084 - remote_base_uri = pkgindex.header.get("URI", base_url)
6085 - for d in pkgindex.packages:
6086 - cpv = _pkg_str(d["CPV"], metadata=d,
6087 - settings=self.settings, db=self.dbapi)
6088 - # Local package instances override remote instances
6089 - # with the same instance_key.
6090 - if self.dbapi.cpv_exists(cpv):
6091 - continue
6092 -
6093 - d["CPV"] = cpv
6094 - d["BASE_URI"] = remote_base_uri
6095 - d["PKGINDEX_URI"] = url
6096 - # FETCHCOMMAND and RESUMECOMMAND may be specified
6097 - # by binrepos.conf, and otherwise ensure that they
6098 - # do not propagate from the Packages index since
6099 - # it may be unsafe to execute remotely specified
6100 - # commands.
6101 - if repo.fetchcommand is None:
6102 - d.pop('FETCHCOMMAND', None)
6103 - else:
6104 - d['FETCHCOMMAND'] = repo.fetchcommand
6105 - if repo.resumecommand is None:
6106 - d.pop('RESUMECOMMAND', None)
6107 - else:
6108 - d['RESUMECOMMAND'] = repo.resumecommand
6109 - self._remotepkgs[self.dbapi._instance_key(cpv)] = d
6110 - self.dbapi.cpv_inject(cpv)
6111 -
6112 - self._remote_has_index = True
6113 - self._merge_pkgindex_header(pkgindex.header,
6114 - self._pkgindex_header)
6115 -
6116 - def _populate_additional(self, repos):
6117 - for repo in repos:
6118 - aux_keys = list(set(chain(repo._aux_cache_keys, repo._pkg_str_aux_keys)))
6119 - for cpv in repo.cpv_all():
6120 - metadata = dict(zip(aux_keys, repo.aux_get(cpv, aux_keys)))
6121 - pkg = _pkg_str(cpv, metadata=metadata, settings=repo.settings, db=repo)
6122 - instance_key = self.dbapi._instance_key(pkg)
6123 - self._additional_pkgs[instance_key] = pkg
6124 - self.dbapi.cpv_inject(pkg)
6125 -
6126 - def inject(self, cpv, filename=None):
6127 - """Add a freshly built package to the database. This updates
6128 - $PKGDIR/Packages with the new package metadata (including MD5).
6129 - @param cpv: The cpv of the new package to inject
6130 - @type cpv: string
6131 - @param filename: File path of the package to inject, or None if it's
6132 - already in the location returned by getname()
6133 - @type filename: string
6134 - @rtype: _pkg_str or None
6135 - @return: A _pkg_str instance on success, or None on failure.
6136 - """
6137 - mycat, mypkg = catsplit(cpv)
6138 - if not self.populated:
6139 - self.populate()
6140 - if filename is None:
6141 - full_path = self.getname(cpv)
6142 - else:
6143 - full_path = filename
6144 - try:
6145 - s = os.stat(full_path)
6146 - except OSError as e:
6147 - if e.errno != errno.ENOENT:
6148 - raise
6149 - del e
6150 - writemsg(_("!!! Binary package does not exist: '%s'\n") % full_path,
6151 - noiselevel=-1)
6152 - return
6153 - metadata = self._read_metadata(full_path, s)
6154 - invalid_depend = False
6155 - try:
6156 - self._eval_use_flags(cpv, metadata)
6157 - except portage.exception.InvalidDependString:
6158 - invalid_depend = True
6159 - if invalid_depend or not metadata.get("SLOT"):
6160 - writemsg(_("!!! Invalid binary package: '%s'\n") % full_path,
6161 - noiselevel=-1)
6162 - return
6163 -
6164 - fetched = False
6165 - try:
6166 - build_id = cpv.build_id
6167 - except AttributeError:
6168 - build_id = None
6169 - else:
6170 - instance_key = self.dbapi._instance_key(cpv)
6171 - if instance_key in self.dbapi.cpvdict:
6172 - # This means we've been called by aux_update (or
6173 - # similar). The instance key typically changes (due to
6174 - # file modification), so we need to discard existing
6175 - # instance key references.
6176 - self.dbapi.cpv_remove(cpv)
6177 - self._pkg_paths.pop(instance_key, None)
6178 - if self._remotepkgs is not None:
6179 - fetched = self._remotepkgs.pop(instance_key, None)
6180 -
6181 - cpv = _pkg_str(cpv, metadata=metadata, settings=self.settings,
6182 - db=self.dbapi)
6183 -
6184 - # Reread the Packages index (in case it's been changed by another
6185 - # process) and then updated it, all while holding a lock.
6186 - pkgindex_lock = None
6187 - try:
6188 - os.makedirs(self.pkgdir, exist_ok=True)
6189 - pkgindex_lock = lockfile(self._pkgindex_file,
6190 - wantnewlockfile=1)
6191 - if filename is not None:
6192 - new_filename = self.getname(cpv, allocate_new=True)
6193 - try:
6194 - samefile = os.path.samefile(filename, new_filename)
6195 - except OSError:
6196 - samefile = False
6197 - if not samefile:
6198 - self._ensure_dir(os.path.dirname(new_filename))
6199 - _movefile(filename, new_filename, mysettings=self.settings)
6200 - full_path = new_filename
6201 -
6202 - basename = os.path.basename(full_path)
6203 - pf = catsplit(cpv)[1]
6204 - if (build_id is None and not fetched and
6205 - basename.endswith(".xpak")):
6206 - # Apply the newly assigned BUILD_ID. This is intended
6207 - # to occur only for locally built packages. If the
6208 - # package was fetched, we want to preserve its
6209 - # attributes, so that we can later distinguish that it
6210 - # is identical to its remote counterpart.
6211 - build_id = self._parse_build_id(basename)
6212 - metadata["BUILD_ID"] = str(build_id)
6213 - cpv = _pkg_str(cpv, metadata=metadata,
6214 - settings=self.settings, db=self.dbapi)
6215 - binpkg = portage.xpak.tbz2(full_path)
6216 - binary_data = binpkg.get_data()
6217 - binary_data[b"BUILD_ID"] = _unicode_encode(
6218 - metadata["BUILD_ID"])
6219 - binpkg.recompose_mem(portage.xpak.xpak_mem(binary_data))
6220 -
6221 - self._file_permissions(full_path)
6222 - pkgindex = self._load_pkgindex()
6223 - if not self._pkgindex_version_supported(pkgindex):
6224 - pkgindex = self._new_pkgindex()
6225 -
6226 - d = self._inject_file(pkgindex, cpv, full_path)
6227 - self._update_pkgindex_header(pkgindex.header)
6228 - self._pkgindex_write(pkgindex)
6229 -
6230 - finally:
6231 - if pkgindex_lock:
6232 - unlockfile(pkgindex_lock)
6233 -
6234 - # This is used to record BINPKGMD5 in the installed package
6235 - # database, for a package that has just been built.
6236 - cpv._metadata["MD5"] = d["MD5"]
6237 -
6238 - return cpv
6239 -
6240 - def _read_metadata(self, filename, st, keys=None):
6241 - """
6242 - Read metadata from a binary package. The returned metadata
6243 - dictionary will contain empty strings for any values that
6244 - are undefined (this is important because the _pkg_str class
6245 - distinguishes between missing and undefined values).
6246 -
6247 - @param filename: File path of the binary package
6248 - @type filename: string
6249 - @param st: stat result for the binary package
6250 - @type st: os.stat_result
6251 - @param keys: optional list of specific metadata keys to retrieve
6252 - @type keys: iterable
6253 - @rtype: dict
6254 - @return: package metadata
6255 - """
6256 - if keys is None:
6257 - keys = self.dbapi._aux_cache_keys
6258 - metadata = self.dbapi._aux_cache_slot_dict()
6259 - else:
6260 - metadata = {}
6261 - binary_metadata = portage.xpak.tbz2(filename).get_data()
6262 - for k in keys:
6263 - if k == "_mtime_":
6264 - metadata[k] = str(st[stat.ST_MTIME])
6265 - elif k == "SIZE":
6266 - metadata[k] = str(st.st_size)
6267 - else:
6268 - v = binary_metadata.get(_unicode_encode(k))
6269 - if v is None:
6270 - if k == "EAPI":
6271 - metadata[k] = "0"
6272 - else:
6273 - metadata[k] = ""
6274 - else:
6275 - v = _unicode_decode(v)
6276 - metadata[k] = " ".join(v.split())
6277 - return metadata
6278 -
6279 - def _inject_file(self, pkgindex, cpv, filename):
6280 - """
6281 - Add a package to internal data structures, and add an
6282 - entry to the given pkgindex.
6283 - @param pkgindex: The PackageIndex instance to which an entry
6284 - will be added.
6285 - @type pkgindex: PackageIndex
6286 - @param cpv: A _pkg_str instance corresponding to the package
6287 - being injected.
6288 - @type cpv: _pkg_str
6289 - @param filename: Absolute file path of the package to inject.
6290 - @type filename: string
6291 - @rtype: dict
6292 - @return: A dict corresponding to the new entry which has been
6293 - added to pkgindex. This may be used to access the checksums
6294 - which have just been generated.
6295 - """
6296 - # Update state for future isremote calls.
6297 - instance_key = self.dbapi._instance_key(cpv)
6298 - if self._remotepkgs is not None:
6299 - self._remotepkgs.pop(instance_key, None)
6300 -
6301 - self.dbapi.cpv_inject(cpv)
6302 - self._pkg_paths[instance_key] = filename[len(self.pkgdir)+1:]
6303 - d = self._pkgindex_entry(cpv)
6304 -
6305 - # If found, remove package(s) with duplicate path.
6306 - path = d.get("PATH", "")
6307 - for i in range(len(pkgindex.packages) - 1, -1, -1):
6308 - d2 = pkgindex.packages[i]
6309 - if path and path == d2.get("PATH"):
6310 - # Handle path collisions in $PKGDIR/All
6311 - # when CPV is not identical.
6312 - del pkgindex.packages[i]
6313 - elif cpv == d2.get("CPV"):
6314 - if path == d2.get("PATH", ""):
6315 - del pkgindex.packages[i]
6316 -
6317 - pkgindex.packages.append(d)
6318 - return d
6319 -
6320 - def _pkgindex_write(self, pkgindex):
6321 - contents = codecs.getwriter(_encodings['repo.content'])(io.BytesIO())
6322 - pkgindex.write(contents)
6323 - contents = contents.getvalue()
6324 - atime = mtime = int(pkgindex.header["TIMESTAMP"])
6325 - output_files = [(atomic_ofstream(self._pkgindex_file, mode="wb"),
6326 - self._pkgindex_file, None)]
6327 -
6328 - if "compress-index" in self.settings.features:
6329 - gz_fname = self._pkgindex_file + ".gz"
6330 - fileobj = atomic_ofstream(gz_fname, mode="wb")
6331 - output_files.append((GzipFile(filename='', mode="wb",
6332 - fileobj=fileobj, mtime=mtime), gz_fname, fileobj))
6333 -
6334 - for f, fname, f_close in output_files:
6335 - f.write(contents)
6336 - f.close()
6337 - if f_close is not None:
6338 - f_close.close()
6339 - self._file_permissions(fname)
6340 - # some seconds might have elapsed since TIMESTAMP
6341 - os.utime(fname, (atime, mtime))
6342 -
6343 - def _pkgindex_entry(self, cpv):
6344 - """
6345 - Performs checksums, and gets size and mtime via lstat.
6346 - Raises InvalidDependString if necessary.
6347 - @rtype: dict
6348 - @return: a dict containing entry for the give cpv.
6349 - """
6350 -
6351 - pkg_path = self.getname(cpv)
6352 -
6353 - d = dict(cpv._metadata.items())
6354 - d.update(perform_multiple_checksums(
6355 - pkg_path, hashes=self._pkgindex_hashes))
6356 -
6357 - d["CPV"] = cpv
6358 - st = os.lstat(pkg_path)
6359 - d["_mtime_"] = str(st[stat.ST_MTIME])
6360 - d["SIZE"] = str(st.st_size)
6361 -
6362 - rel_path = pkg_path[len(self.pkgdir)+1:]
6363 - # record location if it's non-default
6364 - if rel_path != cpv + ".tbz2":
6365 - d["PATH"] = rel_path
6366 -
6367 - return d
6368 -
6369 - def _new_pkgindex(self):
6370 - return portage.getbinpkg.PackageIndex(
6371 - allowed_pkg_keys=self._pkgindex_allowed_pkg_keys,
6372 - default_header_data=self._pkgindex_default_header_data,
6373 - default_pkg_data=self._pkgindex_default_pkg_data,
6374 - inherited_keys=self._pkgindex_inherited_keys,
6375 - translated_keys=self._pkgindex_translated_keys)
6376 -
6377 - @staticmethod
6378 - def _merge_pkgindex_header(src, dest):
6379 - """
6380 - Merge Packages header settings from src to dest, in order to
6381 - propagate implicit IUSE and USE_EXPAND settings for use with
6382 - binary and installed packages. Values are appended, so the
6383 - result is a union of elements from src and dest.
6384 -
6385 - Pull in ARCH if it's not defined, since it's used for validation
6386 - by emerge's profile_check function, and also for KEYWORDS logic
6387 - in the _getmaskingstatus function.
6388 -
6389 - @param src: source mapping (read only)
6390 - @type src: Mapping
6391 - @param dest: destination mapping
6392 - @type dest: MutableMapping
6393 - """
6394 - for k, v in iter_iuse_vars(src):
6395 - v_before = dest.get(k)
6396 - if v_before is not None:
6397 - merged_values = set(v_before.split())
6398 - merged_values.update(v.split())
6399 - v = ' '.join(sorted(merged_values))
6400 - dest[k] = v
6401 -
6402 - if 'ARCH' not in dest and 'ARCH' in src:
6403 - dest['ARCH'] = src['ARCH']
6404 -
6405 - def _propagate_config(self, config):
6406 - """
6407 - Propagate implicit IUSE and USE_EXPAND settings from the binary
6408 - package database to a config instance. If settings are not
6409 - available to propagate, then this will do nothing and return
6410 - False.
6411 -
6412 - @param config: config instance
6413 - @type config: portage.config
6414 - @rtype: bool
6415 - @return: True if settings successfully propagated, False if settings
6416 - were not available to propagate.
6417 - """
6418 - if self._pkgindex_header is None:
6419 - return False
6420 -
6421 - self._merge_pkgindex_header(self._pkgindex_header,
6422 - config.configdict['defaults'])
6423 - config.regenerate()
6424 - config._init_iuse()
6425 - return True
6426 -
6427 - def _update_pkgindex_header(self, header):
6428 - """
6429 - Add useful settings to the Packages file header, for use by
6430 - binhost clients.
6431 -
6432 - This will return silently if the current profile is invalid or
6433 - does not have an IUSE_IMPLICIT variable, since it's useful to
6434 - maintain a cache of implicit IUSE settings for use with binary
6435 - packages.
6436 - """
6437 - if not (self.settings.profile_path and
6438 - "IUSE_IMPLICIT" in self.settings):
6439 - header.setdefault("VERSION", str(self._pkgindex_version))
6440 - return
6441 -
6442 - portdir = normalize_path(os.path.realpath(self.settings["PORTDIR"]))
6443 - profiles_base = os.path.join(portdir, "profiles") + os.path.sep
6444 - if self.settings.profile_path:
6445 - profile_path = normalize_path(
6446 - os.path.realpath(self.settings.profile_path))
6447 - if profile_path.startswith(profiles_base):
6448 - profile_path = profile_path[len(profiles_base):]
6449 - header["PROFILE"] = profile_path
6450 - header["VERSION"] = str(self._pkgindex_version)
6451 - base_uri = self.settings.get("PORTAGE_BINHOST_HEADER_URI")
6452 - if base_uri:
6453 - header["URI"] = base_uri
6454 - else:
6455 - header.pop("URI", None)
6456 - for k in list(self._pkgindex_header_keys) + \
6457 - self.settings.get("USE_EXPAND_IMPLICIT", "").split() + \
6458 - self.settings.get("USE_EXPAND_UNPREFIXED", "").split():
6459 - v = self.settings.get(k, None)
6460 - if v:
6461 - header[k] = v
6462 - else:
6463 - header.pop(k, None)
6464 -
6465 - # These values may be useful for using a binhost without
6466 - # having a local copy of the profile (bug #470006).
6467 - for k in self.settings.get("USE_EXPAND_IMPLICIT", "").split():
6468 - k = "USE_EXPAND_VALUES_" + k
6469 - v = self.settings.get(k)
6470 - if v:
6471 - header[k] = v
6472 - else:
6473 - header.pop(k, None)
6474 -
6475 - def _pkgindex_version_supported(self, pkgindex):
6476 - version = pkgindex.header.get("VERSION")
6477 - if version:
6478 - try:
6479 - if int(version) <= self._pkgindex_version:
6480 - return True
6481 - except ValueError:
6482 - pass
6483 - return False
6484 -
6485 - def _eval_use_flags(self, cpv, metadata):
6486 - use = frozenset(metadata.get("USE", "").split())
6487 - for k in self._pkgindex_use_evaluated_keys:
6488 - if k.endswith('DEPEND'):
6489 - token_class = Atom
6490 - else:
6491 - token_class = None
6492 -
6493 - deps = metadata.get(k)
6494 - if deps is None:
6495 - continue
6496 - try:
6497 - deps = use_reduce(deps, uselist=use, token_class=token_class)
6498 - deps = paren_enclose(deps)
6499 - except portage.exception.InvalidDependString as e:
6500 - writemsg("%s: %s\n" % (k, e), noiselevel=-1)
6501 - raise
6502 - metadata[k] = deps
6503 -
6504 - def exists_specific(self, cpv):
6505 - if not self.populated:
6506 - self.populate()
6507 - return self.dbapi.match(
6508 - dep_expand("="+cpv, mydb=self.dbapi, settings=self.settings))
6509 -
6510 - def dep_bestmatch(self, mydep):
6511 - "compatibility method -- all matches, not just visible ones"
6512 - if not self.populated:
6513 - self.populate()
6514 - writemsg("\n\n", 1)
6515 - writemsg("mydep: %s\n" % mydep, 1)
6516 - mydep = dep_expand(mydep, mydb=self.dbapi, settings=self.settings)
6517 - writemsg("mydep: %s\n" % mydep, 1)
6518 - mykey = dep_getkey(mydep)
6519 - writemsg("mykey: %s\n" % mykey, 1)
6520 - mymatch = best(match_from_list(mydep,self.dbapi.cp_list(mykey)))
6521 - writemsg("mymatch: %s\n" % mymatch, 1)
6522 - if mymatch is None:
6523 - return ""
6524 - return mymatch
6525 -
6526 - def getname(self, cpv, allocate_new=None):
6527 - """Returns a file location for this package.
6528 - If cpv has both build_time and build_id attributes, then the
6529 - path to the specific corresponding instance is returned.
6530 - Otherwise, allocate a new path and return that. When allocating
6531 - a new path, behavior depends on the binpkg-multi-instance
6532 - FEATURES setting.
6533 - """
6534 - if not self.populated:
6535 - self.populate()
6536 -
6537 - try:
6538 - cpv.cp
6539 - except AttributeError:
6540 - cpv = _pkg_str(cpv)
6541 -
6542 - filename = None
6543 - if allocate_new:
6544 - filename = self._allocate_filename(cpv)
6545 - elif self._is_specific_instance(cpv):
6546 - instance_key = self.dbapi._instance_key(cpv)
6547 - path = self._pkg_paths.get(instance_key)
6548 - if path is not None:
6549 - filename = os.path.join(self.pkgdir, path)
6550 -
6551 - if filename is None and not allocate_new:
6552 - try:
6553 - instance_key = self.dbapi._instance_key(cpv,
6554 - support_string=True)
6555 - except KeyError:
6556 - pass
6557 - else:
6558 - filename = self._pkg_paths.get(instance_key)
6559 - if filename is not None:
6560 - filename = os.path.join(self.pkgdir, filename)
6561 - elif instance_key in self._additional_pkgs:
6562 - return None
6563 -
6564 - if filename is None:
6565 - if self._multi_instance:
6566 - pf = catsplit(cpv)[1]
6567 - filename = "%s-%s.xpak" % (
6568 - os.path.join(self.pkgdir, cpv.cp, pf), "1")
6569 - else:
6570 - filename = os.path.join(self.pkgdir, cpv + ".tbz2")
6571 -
6572 - return filename
6573 -
6574 - def _is_specific_instance(self, cpv):
6575 - specific = True
6576 - try:
6577 - build_time = cpv.build_time
6578 - build_id = cpv.build_id
6579 - except AttributeError:
6580 - specific = False
6581 - else:
6582 - if build_time is None or build_id is None:
6583 - specific = False
6584 - return specific
6585 -
6586 - def _max_build_id(self, cpv):
6587 - max_build_id = 0
6588 - for x in self.dbapi.cp_list(cpv.cp):
6589 - if (x == cpv and x.build_id is not None and
6590 - x.build_id > max_build_id):
6591 - max_build_id = x.build_id
6592 - return max_build_id
6593 -
6594 - def _allocate_filename(self, cpv):
6595 - return os.path.join(self.pkgdir, cpv + ".tbz2")
6596 -
6597 - def _allocate_filename_multi(self, cpv):
6598 -
6599 - # First, get the max build_id found when _populate was
6600 - # called.
6601 - max_build_id = self._max_build_id(cpv)
6602 -
6603 - # A new package may have been added concurrently since the
6604 - # last _populate call, so use increment build_id until
6605 - # we locate an unused id.
6606 - pf = catsplit(cpv)[1]
6607 - build_id = max_build_id + 1
6608 -
6609 - while True:
6610 - filename = "%s-%s.xpak" % (
6611 - os.path.join(self.pkgdir, cpv.cp, pf), build_id)
6612 - if os.path.exists(filename):
6613 - build_id += 1
6614 - else:
6615 - return filename
6616 -
6617 - @staticmethod
6618 - def _parse_build_id(filename):
6619 - build_id = -1
6620 - suffixlen = len(".xpak")
6621 - hyphen = filename.rfind("-", 0, -(suffixlen + 1))
6622 - if hyphen != -1:
6623 - build_id = filename[hyphen+1:-suffixlen]
6624 - try:
6625 - build_id = int(build_id)
6626 - except ValueError:
6627 - pass
6628 - return build_id
6629 -
6630 - def isremote(self, pkgname):
6631 - """Returns true if the package is kept remotely and it has not been
6632 - downloaded (or it is only partially downloaded)."""
6633 - if self._remotepkgs is None:
6634 - return False
6635 - instance_key = self.dbapi._instance_key(pkgname)
6636 - if instance_key not in self._remotepkgs:
6637 - return False
6638 - if instance_key in self._additional_pkgs:
6639 - return False
6640 - # Presence in self._remotepkgs implies that it's remote. When a
6641 - # package is downloaded, state is updated by self.inject().
6642 - return True
6643 -
6644 - def get_pkgindex_uri(self, cpv):
6645 - """Returns the URI to the Packages file for a given package."""
6646 - uri = None
6647 - if self._remotepkgs is not None:
6648 - metadata = self._remotepkgs.get(self.dbapi._instance_key(cpv))
6649 - if metadata is not None:
6650 - uri = metadata["PKGINDEX_URI"]
6651 - return uri
6652 -
6653 - def gettbz2(self, pkgname):
6654 - """Fetches the package from a remote site, if necessary. Attempts to
6655 - resume if the file appears to be partially downloaded."""
6656 - instance_key = self.dbapi._instance_key(pkgname)
6657 - tbz2_path = self.getname(pkgname)
6658 - tbz2name = os.path.basename(tbz2_path)
6659 - resume = False
6660 - if os.path.exists(tbz2_path):
6661 - if tbz2name[:-5] not in self.invalids:
6662 - return
6663 -
6664 - resume = True
6665 - writemsg(_("Resuming download of this tbz2, but it is possible that it is corrupt.\n"),
6666 - noiselevel=-1)
6667 -
6668 - mydest = os.path.dirname(self.getname(pkgname))
6669 - self._ensure_dir(mydest)
6670 - # urljoin doesn't work correctly with unrecognized protocols like sftp
6671 - if self._remote_has_index:
6672 - rel_url = self._remotepkgs[instance_key].get("PATH")
6673 - if not rel_url:
6674 - rel_url = pkgname + ".tbz2"
6675 - remote_base_uri = self._remotepkgs[instance_key]["BASE_URI"]
6676 - url = remote_base_uri.rstrip("/") + "/" + rel_url.lstrip("/")
6677 - else:
6678 - url = self.settings["PORTAGE_BINHOST"].rstrip("/") + "/" + tbz2name
6679 - protocol = urlparse(url)[0]
6680 - fcmd_prefix = "FETCHCOMMAND"
6681 - if resume:
6682 - fcmd_prefix = "RESUMECOMMAND"
6683 - fcmd = self.settings.get(fcmd_prefix + "_" + protocol.upper())
6684 - if not fcmd:
6685 - fcmd = self.settings.get(fcmd_prefix)
6686 - success = portage.getbinpkg.file_get(url, mydest, fcmd=fcmd)
6687 - if not success:
6688 - try:
6689 - os.unlink(self.getname(pkgname))
6690 - except OSError:
6691 - pass
6692 - raise portage.exception.FileNotFound(mydest)
6693 - self.inject(pkgname)
6694 -
6695 - def _load_pkgindex(self):
6696 - pkgindex = self._new_pkgindex()
6697 - try:
6698 - f = io.open(_unicode_encode(self._pkgindex_file,
6699 - encoding=_encodings['fs'], errors='strict'),
6700 - mode='r', encoding=_encodings['repo.content'],
6701 - errors='replace')
6702 - except EnvironmentError:
6703 - pass
6704 - else:
6705 - try:
6706 - pkgindex.read(f)
6707 - finally:
6708 - f.close()
6709 - return pkgindex
6710 -
6711 - def _get_digests(self, pkg):
6712 -
6713 - try:
6714 - cpv = pkg.cpv
6715 - except AttributeError:
6716 - cpv = pkg
6717 -
6718 - _instance_key = self.dbapi._instance_key
6719 - instance_key = _instance_key(cpv)
6720 - digests = {}
6721 - metadata = (None if self._remotepkgs is None else
6722 - self._remotepkgs.get(instance_key))
6723 - if metadata is None:
6724 - for d in self._load_pkgindex().packages:
6725 - if (d["CPV"] == cpv and
6726 - instance_key == _instance_key(_pkg_str(d["CPV"],
6727 - metadata=d, settings=self.settings))):
6728 - metadata = d
6729 - break
6730 -
6731 - if metadata is None:
6732 - return digests
6733 -
6734 - for k in get_valid_checksum_keys():
6735 - v = metadata.get(k)
6736 - if not v:
6737 - continue
6738 - digests[k] = v
6739 -
6740 - if "SIZE" in metadata:
6741 - try:
6742 - digests["size"] = int(metadata["SIZE"])
6743 - except ValueError:
6744 - writemsg(_("!!! Malformed SIZE attribute in remote " \
6745 - "metadata for '%s'\n") % cpv)
6746 -
6747 - return digests
6748 -
6749 - def digestCheck(self, pkg):
6750 - """
6751 - Verify digests for the given package and raise DigestException
6752 - if verification fails.
6753 - @rtype: bool
6754 - @return: True if digests could be located, False otherwise.
6755 - """
6756 -
6757 - digests = self._get_digests(pkg)
6758 -
6759 - if not digests:
6760 - return False
6761 -
6762 - try:
6763 - cpv = pkg.cpv
6764 - except AttributeError:
6765 - cpv = pkg
6766 -
6767 - pkg_path = self.getname(cpv)
6768 - hash_filter = _hash_filter(
6769 - self.settings.get("PORTAGE_CHECKSUM_FILTER", ""))
6770 - if not hash_filter.transparent:
6771 - digests = _apply_hash_filter(digests, hash_filter)
6772 - eout = EOutput()
6773 - eout.quiet = self.settings.get("PORTAGE_QUIET") == "1"
6774 - ok, st = _check_distfile(pkg_path, digests, eout, show_errors=0)
6775 - if not ok:
6776 - ok, reason = verify_all(pkg_path, digests)
6777 - if not ok:
6778 - raise portage.exception.DigestException(
6779 - (pkg_path,) + tuple(reason))
6780 -
6781 - return True
6782 -
6783 - def getslot(self, mycatpkg):
6784 - "Get a slot for a catpkg; assume it exists."
6785 - myslot = ""
6786 - try:
6787 - myslot = self.dbapi._pkg_str(mycatpkg, None).slot
6788 - except KeyError:
6789 - pass
6790 - return myslot
6791 + "this tree scans for a list of all packages available in PKGDIR"
6792 +
6793 + def __init__(
6794 + self,
6795 + _unused=DeprecationWarning,
6796 + pkgdir=None,
6797 + virtual=DeprecationWarning,
6798 + settings=None,
6799 + ):
6800 +
6801 + if pkgdir is None:
6802 + raise TypeError("pkgdir parameter is required")
6803 +
6804 + if settings is None:
6805 + raise TypeError("settings parameter is required")
6806 +
6807 + if _unused is not DeprecationWarning:
6808 + warnings.warn(
6809 + "The first parameter of the "
6810 + "portage.dbapi.bintree.binarytree"
6811 + " constructor is now unused. Instead "
6812 + "settings['ROOT'] is used.",
6813 + DeprecationWarning,
6814 + stacklevel=2,
6815 + )
6816 +
6817 + if virtual is not DeprecationWarning:
6818 + warnings.warn(
6819 + "The 'virtual' parameter of the "
6820 + "portage.dbapi.bintree.binarytree"
6821 + " constructor is unused",
6822 + DeprecationWarning,
6823 + stacklevel=2,
6824 + )
6825 +
6826 + if True:
6827 + self.pkgdir = normalize_path(pkgdir)
6828 + # NOTE: Event if binpkg-multi-instance is disabled, it's
6829 + # still possible to access a PKGDIR which uses the
6830 + # binpkg-multi-instance layout (or mixed layout).
6831 + self._multi_instance = "binpkg-multi-instance" in settings.features
6832 + if self._multi_instance:
6833 + self._allocate_filename = self._allocate_filename_multi
6834 + self.dbapi = bindbapi(self, settings=settings)
6835 + self.update_ents = self.dbapi.update_ents
6836 + self.move_slot_ent = self.dbapi.move_slot_ent
6837 + self.populated = 0
6838 + self.tree = {}
6839 + self._binrepos_conf = None
6840 + self._remote_has_index = False
6841 + self._remotepkgs = None # remote metadata indexed by cpv
6842 + self._additional_pkgs = {}
6843 + self.invalids = []
6844 + self.settings = settings
6845 + self._pkg_paths = {}
6846 + self._populating = False
6847 + self._all_directory = os.path.isdir(os.path.join(self.pkgdir, "All"))
6848 + self._pkgindex_version = 0
6849 + self._pkgindex_hashes = ["MD5", "SHA1"]
6850 + self._pkgindex_file = os.path.join(self.pkgdir, "Packages")
6851 + self._pkgindex_keys = self.dbapi._aux_cache_keys.copy()
6852 + self._pkgindex_keys.update(["CPV", "SIZE"])
6853 + self._pkgindex_aux_keys = [
6854 + "BASE_URI",
6855 + "BDEPEND",
6856 + "BUILD_ID",
6857 + "BUILD_TIME",
6858 + "CHOST",
6859 + "DEFINED_PHASES",
6860 + "DEPEND",
6861 + "DESCRIPTION",
6862 + "EAPI",
6863 + "FETCHCOMMAND",
6864 + "IDEPEND",
6865 + "IUSE",
6866 + "KEYWORDS",
6867 + "LICENSE",
6868 + "PDEPEND",
6869 + "PKGINDEX_URI",
6870 + "PROPERTIES",
6871 + "PROVIDES",
6872 + "RDEPEND",
6873 + "repository",
6874 + "REQUIRES",
6875 + "RESTRICT",
6876 + "RESUMECOMMAND",
6877 + "SIZE",
6878 + "SLOT",
6879 + "USE",
6880 ++ # PREFIX LOCAL
6881 ++ "EPREFIX",
6882 + ]
6883 + self._pkgindex_aux_keys = list(self._pkgindex_aux_keys)
6884 + self._pkgindex_use_evaluated_keys = (
6885 + "BDEPEND",
6886 + "DEPEND",
6887 + "IDEPEND",
6888 + "LICENSE",
6889 + "RDEPEND",
6890 + "PDEPEND",
6891 + "PROPERTIES",
6892 + "RESTRICT",
6893 + )
6894 + self._pkgindex_header = None
6895 + self._pkgindex_header_keys = set(
6896 + [
6897 + "ACCEPT_KEYWORDS",
6898 + "ACCEPT_LICENSE",
6899 + "ACCEPT_PROPERTIES",
6900 + "ACCEPT_RESTRICT",
6901 + "CBUILD",
6902 + "CONFIG_PROTECT",
6903 + "CONFIG_PROTECT_MASK",
6904 + "FEATURES",
6905 + "GENTOO_MIRRORS",
6906 + "INSTALL_MASK",
6907 + "IUSE_IMPLICIT",
6908 + "USE",
6909 + "USE_EXPAND",
6910 + "USE_EXPAND_HIDDEN",
6911 + "USE_EXPAND_IMPLICIT",
6912 + "USE_EXPAND_UNPREFIXED",
6913 ++ # PREFIX LOCAL
6914 ++ "EPREFIX",
6915 + ]
6916 + )
6917 + self._pkgindex_default_pkg_data = {
6918 + "BDEPEND": "",
6919 + "BUILD_ID": "",
6920 + "BUILD_TIME": "",
6921 + "DEFINED_PHASES": "",
6922 + "DEPEND": "",
6923 + "EAPI": "0",
6924 + "IDEPEND": "",
6925 + "IUSE": "",
6926 + "KEYWORDS": "",
6927 + "LICENSE": "",
6928 + "PATH": "",
6929 + "PDEPEND": "",
6930 + "PROPERTIES": "",
6931 + "PROVIDES": "",
6932 + "RDEPEND": "",
6933 + "REQUIRES": "",
6934 + "RESTRICT": "",
6935 + "SLOT": "0",
6936 + "USE": "",
6937 + }
6938 - self._pkgindex_inherited_keys = ["CHOST", "repository"]
6939 ++ self._pkgindex_inherited_keys = ["CHOST", "repository",
6940 ++ # PREFIX LOCAL
6941 ++ "EPREFIX"]
6942 +
6943 + # Populate the header with appropriate defaults.
6944 + self._pkgindex_default_header_data = {
6945 + "CHOST": self.settings.get("CHOST", ""),
6946 + "repository": "",
6947 + }
6948 +
6949 + self._pkgindex_translated_keys = (
6950 + ("DESCRIPTION", "DESC"),
6951 + ("_mtime_", "MTIME"),
6952 + ("repository", "REPO"),
6953 + )
6954 +
6955 + self._pkgindex_allowed_pkg_keys = set(
6956 + chain(
6957 + self._pkgindex_keys,
6958 + self._pkgindex_aux_keys,
6959 + self._pkgindex_hashes,
6960 + self._pkgindex_default_pkg_data,
6961 + self._pkgindex_inherited_keys,
6962 + chain(*self._pkgindex_translated_keys),
6963 + )
6964 + )
6965 +
6966 + @property
6967 + def root(self):
6968 + warnings.warn(
6969 + "The root attribute of "
6970 + "portage.dbapi.bintree.binarytree"
6971 + " is deprecated. Use "
6972 + "settings['ROOT'] instead.",
6973 + DeprecationWarning,
6974 + stacklevel=3,
6975 + )
6976 + return self.settings["ROOT"]
6977 +
6978 + def move_ent(self, mylist, repo_match=None):
6979 + if not self.populated:
6980 + self.populate()
6981 + origcp = mylist[1]
6982 + newcp = mylist[2]
6983 + # sanity check
6984 + for atom in (origcp, newcp):
6985 + if not isjustname(atom):
6986 + raise InvalidPackageName(str(atom))
6987 + mynewcat = catsplit(newcp)[0]
6988 + origmatches = self.dbapi.cp_list(origcp)
6989 + moves = 0
6990 + if not origmatches:
6991 + return moves
6992 + for mycpv in origmatches:
6993 + mycpv_cp = mycpv.cp
6994 + if mycpv_cp != origcp:
6995 + # Ignore PROVIDE virtual match.
6996 + continue
6997 + if repo_match is not None and not repo_match(mycpv.repo):
6998 + continue
6999 +
7000 + # Use isvalidatom() to check if this move is valid for the
7001 + # EAPI (characters allowed in package names may vary).
7002 + if not isvalidatom(newcp, eapi=mycpv.eapi):
7003 + continue
7004 +
7005 + mynewcpv = mycpv.replace(mycpv_cp, str(newcp), 1)
7006 + myoldpkg = catsplit(mycpv)[1]
7007 + mynewpkg = catsplit(mynewcpv)[1]
7008 +
7009 + # If this update has already been applied to the same
7010 + # package build then silently continue.
7011 + applied = False
7012 + for maybe_applied in self.dbapi.match("={}".format(mynewcpv)):
7013 + if maybe_applied.build_time == mycpv.build_time:
7014 + applied = True
7015 + break
7016 +
7017 + if applied:
7018 + continue
7019 +
7020 + if (mynewpkg != myoldpkg) and self.dbapi.cpv_exists(mynewcpv):
7021 + writemsg(
7022 + _("!!! Cannot update binary: Destination exists.\n"), noiselevel=-1
7023 + )
7024 + writemsg("!!! " + mycpv + " -> " + mynewcpv + "\n", noiselevel=-1)
7025 + continue
7026 +
7027 + tbz2path = self.getname(mycpv)
7028 + if os.path.exists(tbz2path) and not os.access(tbz2path, os.W_OK):
7029 + writemsg(
7030 + _("!!! Cannot update readonly binary: %s\n") % mycpv, noiselevel=-1
7031 + )
7032 + continue
7033 +
7034 + moves += 1
7035 + mytbz2 = portage.xpak.tbz2(tbz2path)
7036 + mydata = mytbz2.get_data()
7037 + updated_items = update_dbentries([mylist], mydata, parent=mycpv)
7038 + mydata.update(updated_items)
7039 + mydata[b"PF"] = _unicode_encode(
7040 + mynewpkg + "\n", encoding=_encodings["repo.content"]
7041 + )
7042 + mydata[b"CATEGORY"] = _unicode_encode(
7043 + mynewcat + "\n", encoding=_encodings["repo.content"]
7044 + )
7045 + if mynewpkg != myoldpkg:
7046 + ebuild_data = mydata.pop(
7047 + _unicode_encode(
7048 + myoldpkg + ".ebuild", encoding=_encodings["repo.content"]
7049 + ),
7050 + None,
7051 + )
7052 + if ebuild_data is not None:
7053 + mydata[
7054 + _unicode_encode(
7055 + mynewpkg + ".ebuild", encoding=_encodings["repo.content"]
7056 + )
7057 + ] = ebuild_data
7058 +
7059 + metadata = self.dbapi._aux_cache_slot_dict()
7060 + for k in self.dbapi._aux_cache_keys:
7061 + v = mydata.get(_unicode_encode(k))
7062 + if v is not None:
7063 + v = _unicode_decode(v)
7064 + metadata[k] = " ".join(v.split())
7065 +
7066 + # Create a copy of the old version of the package and
7067 + # apply the update to it. Leave behind the old version,
7068 + # assuming that it will be deleted by eclean-pkg when its
7069 + # time comes.
7070 + mynewcpv = _pkg_str(mynewcpv, metadata=metadata, db=self.dbapi)
7071 + update_path = self.getname(mynewcpv, allocate_new=True) + ".partial"
7072 + self._ensure_dir(os.path.dirname(update_path))
7073 + update_path_lock = None
7074 + try:
7075 + update_path_lock = lockfile(update_path, wantnewlockfile=True)
7076 + copyfile(tbz2path, update_path)
7077 + mytbz2 = portage.xpak.tbz2(update_path)
7078 + mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))
7079 + self.inject(mynewcpv, filename=update_path)
7080 + finally:
7081 + if update_path_lock is not None:
7082 + try:
7083 + os.unlink(update_path)
7084 + except OSError:
7085 + pass
7086 + unlockfile(update_path_lock)
7087 +
7088 + return moves
7089 +
7090 + def prevent_collision(self, cpv):
7091 + warnings.warn(
7092 + "The "
7093 + "portage.dbapi.bintree.binarytree.prevent_collision "
7094 + "method is deprecated.",
7095 + DeprecationWarning,
7096 + stacklevel=2,
7097 + )
7098 +
7099 + def _ensure_dir(self, path):
7100 + """
7101 + Create the specified directory. Also, copy gid and group mode
7102 + bits from self.pkgdir if possible.
7103 + @param cat_dir: Absolute path of the directory to be created.
7104 + @type cat_dir: String
7105 + """
7106 + try:
7107 + pkgdir_st = os.stat(self.pkgdir)
7108 + except OSError:
7109 + ensure_dirs(path)
7110 + return
7111 + pkgdir_gid = pkgdir_st.st_gid
7112 + pkgdir_grp_mode = 0o2070 & pkgdir_st.st_mode
7113 + try:
7114 + ensure_dirs(path, gid=pkgdir_gid, mode=pkgdir_grp_mode, mask=0)
7115 + except PortageException:
7116 + if not os.path.isdir(path):
7117 + raise
7118 +
7119 + def _file_permissions(self, path):
7120 + try:
7121 + pkgdir_st = os.stat(self.pkgdir)
7122 + except OSError:
7123 + pass
7124 + else:
7125 + pkgdir_gid = pkgdir_st.st_gid
7126 + pkgdir_grp_mode = 0o0060 & pkgdir_st.st_mode
7127 + try:
7128 + portage.util.apply_permissions(
7129 + path, gid=pkgdir_gid, mode=pkgdir_grp_mode, mask=0
7130 + )
7131 + except PortageException:
7132 + pass
7133 +
7134 + def populate(self, getbinpkgs=False, getbinpkg_refresh=True, add_repos=()):
7135 + """
7136 + Populates the binarytree with package metadata.
7137 +
7138 + @param getbinpkgs: include remote packages
7139 + @type getbinpkgs: bool
7140 + @param getbinpkg_refresh: attempt to refresh the cache
7141 + of remote package metadata if getbinpkgs is also True
7142 + @type getbinpkg_refresh: bool
7143 + @param add_repos: additional binary package repositories
7144 + @type add_repos: sequence
7145 + """
7146 +
7147 + if self._populating:
7148 + return
7149 +
7150 + if not os.path.isdir(self.pkgdir) and not (getbinpkgs or add_repos):
7151 + self.populated = True
7152 + return
7153 +
7154 + # Clear all caches in case populate is called multiple times
7155 + # as may be the case when _global_updates calls populate()
7156 + # prior to performing package moves since it only wants to
7157 + # operate on local packages (getbinpkgs=0).
7158 + self._remotepkgs = None
7159 +
7160 + self._populating = True
7161 + try:
7162 + update_pkgindex = self._populate_local(
7163 + reindex="pkgdir-index-trusted" not in self.settings.features
7164 + )
7165 +
7166 + if update_pkgindex and self.dbapi.writable:
7167 + # If the Packages file needs to be updated, then _populate_local
7168 + # needs to be called once again while the file is locked, so
7169 + # that changes made by a concurrent process cannot be lost. This
7170 + # case is avoided when possible, in order to minimize lock
7171 + # contention.
7172 + pkgindex_lock = None
7173 + try:
7174 + pkgindex_lock = lockfile(self._pkgindex_file, wantnewlockfile=True)
7175 + update_pkgindex = self._populate_local()
7176 + if update_pkgindex:
7177 + self._pkgindex_write(update_pkgindex)
7178 + finally:
7179 + if pkgindex_lock:
7180 + unlockfile(pkgindex_lock)
7181 +
7182 + if add_repos:
7183 + self._populate_additional(add_repos)
7184 +
7185 + if getbinpkgs:
7186 + config_path = os.path.join(
7187 + self.settings["PORTAGE_CONFIGROOT"], BINREPOS_CONF_FILE
7188 + )
7189 + self._binrepos_conf = BinRepoConfigLoader((config_path,), self.settings)
7190 + if not self._binrepos_conf:
7191 + writemsg(
7192 + _(
7193 + "!!! %s is missing (or PORTAGE_BINHOST is unset), but use is requested.\n"
7194 + )
7195 + % (config_path,),
7196 + noiselevel=-1,
7197 + )
7198 + else:
7199 + self._populate_remote(getbinpkg_refresh=getbinpkg_refresh)
7200 +
7201 + finally:
7202 + self._populating = False
7203 +
7204 + self.populated = True
7205 +
7206 + def _populate_local(self, reindex=True):
7207 + """
7208 + Populates the binarytree with local package metadata.
7209 +
7210 + @param reindex: detect added / modified / removed packages and
7211 + regenerate the index file if necessary
7212 + @type reindex: bool
7213 + """
7214 + self.dbapi.clear()
7215 + _instance_key = self.dbapi._instance_key
7216 + # In order to minimize disk I/O, we never compute digests here.
7217 + # Therefore we exclude hashes from the minimum_keys, so that
7218 + # the Packages file will not be needlessly re-written due to
7219 + # missing digests.
7220 + minimum_keys = self._pkgindex_keys.difference(self._pkgindex_hashes)
7221 + if True:
7222 + pkg_paths = {}
7223 + self._pkg_paths = pkg_paths
7224 + dir_files = {}
7225 + if reindex:
7226 + for parent, dir_names, file_names in os.walk(self.pkgdir):
7227 + relative_parent = parent[len(self.pkgdir) + 1 :]
7228 + dir_files[relative_parent] = file_names
7229 +
7230 + pkgindex = self._load_pkgindex()
7231 + if not self._pkgindex_version_supported(pkgindex):
7232 + pkgindex = self._new_pkgindex()
7233 + metadata = {}
7234 + basename_index = {}
7235 + for d in pkgindex.packages:
7236 + cpv = _pkg_str(
7237 + d["CPV"], metadata=d, settings=self.settings, db=self.dbapi
7238 + )
7239 + d["CPV"] = cpv
7240 + metadata[_instance_key(cpv)] = d
7241 + path = d.get("PATH")
7242 + if not path:
7243 + path = cpv + ".tbz2"
7244 +
7245 + if reindex:
7246 + basename = os.path.basename(path)
7247 + basename_index.setdefault(basename, []).append(d)
7248 + else:
7249 + instance_key = _instance_key(cpv)
7250 + pkg_paths[instance_key] = path
7251 + self.dbapi.cpv_inject(cpv)
7252 +
7253 + update_pkgindex = False
7254 + for mydir, file_names in dir_files.items():
7255 + try:
7256 + mydir = _unicode_decode(
7257 + mydir, encoding=_encodings["fs"], errors="strict"
7258 + )
7259 + except UnicodeDecodeError:
7260 + continue
7261 + for myfile in file_names:
7262 + try:
7263 + myfile = _unicode_decode(
7264 + myfile, encoding=_encodings["fs"], errors="strict"
7265 + )
7266 + except UnicodeDecodeError:
7267 + continue
7268 + if not myfile.endswith(SUPPORTED_XPAK_EXTENSIONS):
7269 + continue
7270 + mypath = os.path.join(mydir, myfile)
7271 + full_path = os.path.join(self.pkgdir, mypath)
7272 + s = os.lstat(full_path)
7273 +
7274 + if not stat.S_ISREG(s.st_mode):
7275 + continue
7276 +
7277 + # Validate data from the package index and try to avoid
7278 + # reading the xpak if possible.
7279 + possibilities = basename_index.get(myfile)
7280 + if possibilities:
7281 + match = None
7282 + for d in possibilities:
7283 + try:
7284 + if int(d["_mtime_"]) != s[stat.ST_MTIME]:
7285 + continue
7286 + except (KeyError, ValueError):
7287 + continue
7288 + try:
7289 + if int(d["SIZE"]) != int(s.st_size):
7290 + continue
7291 + except (KeyError, ValueError):
7292 + continue
7293 + if not minimum_keys.difference(d):
7294 + match = d
7295 + break
7296 + if match:
7297 + mycpv = match["CPV"]
7298 + instance_key = _instance_key(mycpv)
7299 + pkg_paths[instance_key] = mypath
7300 + # update the path if the package has been moved
7301 + oldpath = d.get("PATH")
7302 + if oldpath and oldpath != mypath:
7303 + update_pkgindex = True
7304 + # Omit PATH if it is the default path for
7305 + # the current Packages format version.
7306 + if mypath != mycpv + ".tbz2":
7307 + d["PATH"] = mypath
7308 + if not oldpath:
7309 + update_pkgindex = True
7310 + else:
7311 + d.pop("PATH", None)
7312 + if oldpath:
7313 + update_pkgindex = True
7314 + self.dbapi.cpv_inject(mycpv)
7315 + continue
7316 + if not os.access(full_path, os.R_OK):
7317 + writemsg(
7318 + _("!!! Permission denied to read " "binary package: '%s'\n")
7319 + % full_path,
7320 + noiselevel=-1,
7321 + )
7322 + self.invalids.append(myfile[:-5])
7323 + continue
7324 + pkg_metadata = self._read_metadata(
7325 + full_path,
7326 + s,
7327 + keys=chain(self.dbapi._aux_cache_keys, ("PF", "CATEGORY")),
7328 + )
7329 + mycat = pkg_metadata.get("CATEGORY", "")
7330 + mypf = pkg_metadata.get("PF", "")
7331 + slot = pkg_metadata.get("SLOT", "")
7332 + mypkg = myfile[:-5]
7333 + if not mycat or not mypf or not slot:
7334 + # old-style or corrupt package
7335 + writemsg(
7336 + _("\n!!! Invalid binary package: '%s'\n") % full_path,
7337 + noiselevel=-1,
7338 + )
7339 + missing_keys = []
7340 + if not mycat:
7341 + missing_keys.append("CATEGORY")
7342 + if not mypf:
7343 + missing_keys.append("PF")
7344 + if not slot:
7345 + missing_keys.append("SLOT")
7346 + msg = []
7347 + if missing_keys:
7348 + missing_keys.sort()
7349 + msg.append(
7350 + _("Missing metadata key(s): %s.")
7351 + % ", ".join(missing_keys)
7352 + )
7353 + msg.append(
7354 + _(
7355 + " This binary package is not "
7356 + "recoverable and should be deleted."
7357 + )
7358 + )
7359 + for line in textwrap.wrap("".join(msg), 72):
7360 + writemsg("!!! %s\n" % line, noiselevel=-1)
7361 + self.invalids.append(mypkg)
7362 + continue
7363 +
7364 + multi_instance = False
7365 + invalid_name = False
7366 + build_id = None
7367 + if myfile.endswith(".xpak"):
7368 + multi_instance = True
7369 + build_id = self._parse_build_id(myfile)
7370 + if build_id < 1:
7371 + invalid_name = True
7372 + elif myfile != "%s-%s.xpak" % (mypf, build_id):
7373 + invalid_name = True
7374 + else:
7375 + mypkg = mypkg[: -len(str(build_id)) - 1]
7376 + elif myfile != mypf + ".tbz2":
7377 + invalid_name = True
7378 +
7379 + if invalid_name:
7380 + writemsg(
7381 + _("\n!!! Binary package name is " "invalid: '%s'\n")
7382 + % full_path,
7383 + noiselevel=-1,
7384 + )
7385 + continue
7386 +
7387 + if pkg_metadata.get("BUILD_ID"):
7388 + try:
7389 + build_id = int(pkg_metadata["BUILD_ID"])
7390 + except ValueError:
7391 + writemsg(
7392 + _("!!! Binary package has " "invalid BUILD_ID: '%s'\n")
7393 + % full_path,
7394 + noiselevel=-1,
7395 + )
7396 + continue
7397 + else:
7398 + build_id = None
7399 +
7400 + if multi_instance:
7401 + name_split = catpkgsplit("%s/%s" % (mycat, mypf))
7402 + if (
7403 + name_split is None
7404 + or tuple(catsplit(mydir)) != name_split[:2]
7405 + ):
7406 + continue
7407 + elif mycat != mydir and mydir != "All":
7408 + continue
7409 + if mypkg != mypf.strip():
7410 + continue
7411 + mycpv = mycat + "/" + mypkg
7412 + if not self.dbapi._category_re.match(mycat):
7413 + writemsg(
7414 + _(
7415 + "!!! Binary package has an "
7416 + "unrecognized category: '%s'\n"
7417 + )
7418 + % full_path,
7419 + noiselevel=-1,
7420 + )
7421 + writemsg(
7422 + _(
7423 + "!!! '%s' has a category that is not"
7424 + " listed in %setc/portage/categories\n"
7425 + )
7426 + % (mycpv, self.settings["PORTAGE_CONFIGROOT"]),
7427 + noiselevel=-1,
7428 + )
7429 + continue
7430 + if build_id is not None:
7431 + pkg_metadata["BUILD_ID"] = str(build_id)
7432 + pkg_metadata["SIZE"] = str(s.st_size)
7433 + # Discard items used only for validation above.
7434 + pkg_metadata.pop("CATEGORY")
7435 + pkg_metadata.pop("PF")
7436 + mycpv = _pkg_str(
7437 + mycpv,
7438 + metadata=self.dbapi._aux_cache_slot_dict(pkg_metadata),
7439 + db=self.dbapi,
7440 + )
7441 + pkg_paths[_instance_key(mycpv)] = mypath
7442 + self.dbapi.cpv_inject(mycpv)
7443 + update_pkgindex = True
7444 + d = metadata.get(_instance_key(mycpv), pkgindex._pkg_slot_dict())
7445 + if d:
7446 + try:
7447 + if int(d["_mtime_"]) != s[stat.ST_MTIME]:
7448 + d.clear()
7449 + except (KeyError, ValueError):
7450 + d.clear()
7451 + if d:
7452 + try:
7453 + if int(d["SIZE"]) != int(s.st_size):
7454 + d.clear()
7455 + except (KeyError, ValueError):
7456 + d.clear()
7457 +
7458 + for k in self._pkgindex_allowed_pkg_keys:
7459 + v = pkg_metadata.get(k)
7460 + if v:
7461 + d[k] = v
7462 + d["CPV"] = mycpv
7463 +
7464 + try:
7465 + self._eval_use_flags(mycpv, d)
7466 + except portage.exception.InvalidDependString:
7467 + writemsg(
7468 + _("!!! Invalid binary package: '%s'\n")
7469 + % self.getname(mycpv),
7470 + noiselevel=-1,
7471 + )
7472 + self.dbapi.cpv_remove(mycpv)
7473 + del pkg_paths[_instance_key(mycpv)]
7474 +
7475 + # record location if it's non-default
7476 + if mypath != mycpv + ".tbz2":
7477 + d["PATH"] = mypath
7478 + else:
7479 + d.pop("PATH", None)
7480 + metadata[_instance_key(mycpv)] = d
7481 +
7482 + if reindex:
7483 + for instance_key in list(metadata):
7484 + if instance_key not in pkg_paths:
7485 + del metadata[instance_key]
7486 +
7487 + if update_pkgindex:
7488 + del pkgindex.packages[:]
7489 + pkgindex.packages.extend(iter(metadata.values()))
7490 + self._update_pkgindex_header(pkgindex.header)
7491 +
7492 + self._pkgindex_header = {}
7493 + self._merge_pkgindex_header(pkgindex.header, self._pkgindex_header)
7494 +
7495 + return pkgindex if update_pkgindex else None
7496 +
7497 + def _populate_remote(self, getbinpkg_refresh=True):
7498 +
7499 + self._remote_has_index = False
7500 + self._remotepkgs = {}
7501 + # Order by descending priority.
7502 + for repo in reversed(list(self._binrepos_conf.values())):
7503 + base_url = repo.sync_uri
7504 + parsed_url = urlparse(base_url)
7505 + host = parsed_url.netloc
7506 + port = parsed_url.port
7507 + user = None
7508 + passwd = None
7509 + user_passwd = ""
7510 + if "@" in host:
7511 + user, host = host.split("@", 1)
7512 + user_passwd = user + "@"
7513 + if ":" in user:
7514 + user, passwd = user.split(":", 1)
7515 +
7516 + if port is not None:
7517 + port_str = ":%s" % (port,)
7518 + if host.endswith(port_str):
7519 + host = host[: -len(port_str)]
7520 + pkgindex_file = os.path.join(
7521 + self.settings["EROOT"],
7522 + CACHE_PATH,
7523 + "binhost",
7524 + host,
7525 + parsed_url.path.lstrip("/"),
7526 + "Packages",
7527 + )
7528 + pkgindex = self._new_pkgindex()
7529 + try:
7530 + f = io.open(
7531 + _unicode_encode(
7532 + pkgindex_file, encoding=_encodings["fs"], errors="strict"
7533 + ),
7534 + mode="r",
7535 + encoding=_encodings["repo.content"],
7536 + errors="replace",
7537 + )
7538 + try:
7539 + pkgindex.read(f)
7540 + finally:
7541 + f.close()
7542 + except EnvironmentError as e:
7543 + if e.errno != errno.ENOENT:
7544 + raise
7545 + local_timestamp = pkgindex.header.get("TIMESTAMP", None)
7546 + try:
7547 + download_timestamp = float(pkgindex.header.get("DOWNLOAD_TIMESTAMP", 0))
7548 + except ValueError:
7549 + download_timestamp = 0
7550 + remote_timestamp = None
7551 + rmt_idx = self._new_pkgindex()
7552 + proc = None
7553 + tmp_filename = None
7554 + try:
7555 + # urlparse.urljoin() only works correctly with recognized
7556 + # protocols and requires the base url to have a trailing
7557 + # slash, so join manually...
7558 + url = base_url.rstrip("/") + "/Packages"
7559 + f = None
7560 +
7561 + if not getbinpkg_refresh and local_timestamp:
7562 + raise UseCachedCopyOfRemoteIndex()
7563 +
7564 + try:
7565 + ttl = float(pkgindex.header.get("TTL", 0))
7566 + except ValueError:
7567 + pass
7568 + else:
7569 + if (
7570 + download_timestamp
7571 + and ttl
7572 + and download_timestamp + ttl > time.time()
7573 + ):
7574 + raise UseCachedCopyOfRemoteIndex()
7575 +
7576 + # Set proxy settings for _urlopen -> urllib_request
7577 + proxies = {}
7578 + for proto in ("http", "https"):
7579 + value = self.settings.get(proto + "_proxy")
7580 + if value is not None:
7581 + proxies[proto] = value
7582 +
7583 + # Don't use urlopen for https, unless
7584 + # PEP 476 is supported (bug #469888).
7585 + if repo.fetchcommand is None and (
7586 + parsed_url.scheme not in ("https",) or _have_pep_476()
7587 + ):
7588 + try:
7589 + f = _urlopen(
7590 + url, if_modified_since=local_timestamp, proxies=proxies
7591 + )
7592 + if hasattr(f, "headers") and f.headers.get("timestamp", ""):
7593 + remote_timestamp = f.headers.get("timestamp")
7594 + except IOError as err:
7595 + if (
7596 + hasattr(err, "code") and err.code == 304
7597 + ): # not modified (since local_timestamp)
7598 + raise UseCachedCopyOfRemoteIndex()
7599 +
7600 + if parsed_url.scheme in ("ftp", "http", "https"):
7601 + # This protocol is supposedly supported by urlopen,
7602 + # so apparently there's a problem with the url
7603 + # or a bug in urlopen.
7604 + if self.settings.get("PORTAGE_DEBUG", "0") != "0":
7605 + traceback.print_exc()
7606 +
7607 + raise
7608 + except ValueError:
7609 + raise ParseError(
7610 + "Invalid Portage BINHOST value '%s'" % url.lstrip()
7611 + )
7612 +
7613 + if f is None:
7614 +
7615 + path = parsed_url.path.rstrip("/") + "/Packages"
7616 +
7617 + if repo.fetchcommand is None and parsed_url.scheme == "ssh":
7618 + # Use a pipe so that we can terminate the download
7619 + # early if we detect that the TIMESTAMP header
7620 + # matches that of the cached Packages file.
7621 + ssh_args = ["ssh"]
7622 + if port is not None:
7623 + ssh_args.append("-p%s" % (port,))
7624 + # NOTE: shlex evaluates embedded quotes
7625 + ssh_args.extend(
7626 + portage.util.shlex_split(
7627 + self.settings.get("PORTAGE_SSH_OPTS", "")
7628 + )
7629 + )
7630 + ssh_args.append(user_passwd + host)
7631 + ssh_args.append("--")
7632 + ssh_args.append("cat")
7633 + ssh_args.append(path)
7634 +
7635 + proc = subprocess.Popen(ssh_args, stdout=subprocess.PIPE)
7636 + f = proc.stdout
7637 + else:
7638 + if repo.fetchcommand is None:
7639 + setting = "FETCHCOMMAND_" + parsed_url.scheme.upper()
7640 + fcmd = self.settings.get(setting)
7641 + if not fcmd:
7642 + fcmd = self.settings.get("FETCHCOMMAND")
7643 + if not fcmd:
7644 + raise EnvironmentError("FETCHCOMMAND is unset")
7645 + else:
7646 + fcmd = repo.fetchcommand
7647 +
7648 + fd, tmp_filename = tempfile.mkstemp()
7649 + tmp_dirname, tmp_basename = os.path.split(tmp_filename)
7650 + os.close(fd)
7651 +
7652 + fcmd_vars = {
7653 + "DISTDIR": tmp_dirname,
7654 + "FILE": tmp_basename,
7655 + "URI": url,
7656 + }
7657 +
7658 + for k in ("PORTAGE_SSH_OPTS",):
7659 + v = self.settings.get(k)
7660 + if v is not None:
7661 + fcmd_vars[k] = v
7662 +
7663 + success = portage.getbinpkg.file_get(
7664 + fcmd=fcmd, fcmd_vars=fcmd_vars
7665 + )
7666 + if not success:
7667 + raise EnvironmentError("%s failed" % (setting,))
7668 + f = open(tmp_filename, "rb")
7669 +
7670 + f_dec = codecs.iterdecode(
7671 + f, _encodings["repo.content"], errors="replace"
7672 + )
7673 + try:
7674 + rmt_idx.readHeader(f_dec)
7675 + if (
7676 + not remote_timestamp
7677 + ): # in case it had not been read from HTTP header
7678 + remote_timestamp = rmt_idx.header.get("TIMESTAMP", None)
7679 + if not remote_timestamp:
7680 + # no timestamp in the header, something's wrong
7681 + pkgindex = None
7682 + writemsg(
7683 + _(
7684 + "\n\n!!! Binhost package index "
7685 + " has no TIMESTAMP field.\n"
7686 + ),
7687 + noiselevel=-1,
7688 + )
7689 + else:
7690 + if not self._pkgindex_version_supported(rmt_idx):
7691 + writemsg(
7692 + _(
7693 + "\n\n!!! Binhost package index version"
7694 + " is not supported: '%s'\n"
7695 + )
7696 + % rmt_idx.header.get("VERSION"),
7697 + noiselevel=-1,
7698 + )
7699 + pkgindex = None
7700 + elif local_timestamp != remote_timestamp:
7701 + rmt_idx.readBody(f_dec)
7702 + pkgindex = rmt_idx
7703 + finally:
7704 + # Timeout after 5 seconds, in case close() blocks
7705 + # indefinitely (see bug #350139).
7706 + try:
7707 + try:
7708 + AlarmSignal.register(5)
7709 + f.close()
7710 + finally:
7711 + AlarmSignal.unregister()
7712 + except AlarmSignal:
7713 + writemsg(
7714 + "\n\n!!! %s\n"
7715 + % _("Timed out while closing connection to binhost"),
7716 + noiselevel=-1,
7717 + )
7718 + except UseCachedCopyOfRemoteIndex:
7719 + writemsg_stdout("\n")
7720 + writemsg_stdout(
7721 + colorize(
7722 + "GOOD",
7723 + _("Local copy of remote index is up-to-date and will be used."),
7724 + )
7725 + + "\n"
7726 + )
7727 + rmt_idx = pkgindex
7728 + except EnvironmentError as e:
7729 + # This includes URLError which is raised for SSL
7730 + # certificate errors when PEP 476 is supported.
7731 + writemsg(
7732 + _("\n\n!!! Error fetching binhost package" " info from '%s'\n")
7733 + % _hide_url_passwd(base_url)
7734 + )
7735 + # With Python 2, the EnvironmentError message may
7736 + # contain bytes or unicode, so use str to ensure
7737 + # safety with all locales (bug #532784).
7738 + try:
7739 + error_msg = str(e)
7740 + except UnicodeDecodeError as uerror:
7741 + error_msg = str(uerror.object, encoding="utf_8", errors="replace")
7742 + writemsg("!!! %s\n\n" % error_msg)
7743 + del e
7744 + pkgindex = None
7745 + if proc is not None:
7746 + if proc.poll() is None:
7747 + proc.kill()
7748 + proc.wait()
7749 + proc = None
7750 + if tmp_filename is not None:
7751 + try:
7752 + os.unlink(tmp_filename)
7753 + except OSError:
7754 + pass
7755 + if pkgindex is rmt_idx:
7756 + pkgindex.modified = False # don't update the header
7757 + pkgindex.header["DOWNLOAD_TIMESTAMP"] = "%d" % time.time()
7758 + try:
7759 + ensure_dirs(os.path.dirname(pkgindex_file))
7760 + f = atomic_ofstream(pkgindex_file)
7761 + pkgindex.write(f)
7762 + f.close()
7763 + except (IOError, PortageException):
7764 + if os.access(os.path.dirname(pkgindex_file), os.W_OK):
7765 + raise
7766 + # The current user doesn't have permission to cache the
7767 + # file, but that's alright.
7768 + if pkgindex:
7769 + remote_base_uri = pkgindex.header.get("URI", base_url)
7770 + for d in pkgindex.packages:
7771 + cpv = _pkg_str(
7772 + d["CPV"], metadata=d, settings=self.settings, db=self.dbapi
7773 + )
7774 + # Local package instances override remote instances
7775 + # with the same instance_key.
7776 + if self.dbapi.cpv_exists(cpv):
7777 + continue
7778 +
7779 + d["CPV"] = cpv
7780 + d["BASE_URI"] = remote_base_uri
7781 + d["PKGINDEX_URI"] = url
7782 + # FETCHCOMMAND and RESUMECOMMAND may be specified
7783 + # by binrepos.conf, and otherwise ensure that they
7784 + # do not propagate from the Packages index since
7785 + # it may be unsafe to execute remotely specified
7786 + # commands.
7787 + if repo.fetchcommand is None:
7788 + d.pop("FETCHCOMMAND", None)
7789 + else:
7790 + d["FETCHCOMMAND"] = repo.fetchcommand
7791 + if repo.resumecommand is None:
7792 + d.pop("RESUMECOMMAND", None)
7793 + else:
7794 + d["RESUMECOMMAND"] = repo.resumecommand
7795 + self._remotepkgs[self.dbapi._instance_key(cpv)] = d
7796 + self.dbapi.cpv_inject(cpv)
7797 +
7798 + self._remote_has_index = True
7799 + self._merge_pkgindex_header(pkgindex.header, self._pkgindex_header)
7800 +
7801 + def _populate_additional(self, repos):
7802 + for repo in repos:
7803 + aux_keys = list(set(chain(repo._aux_cache_keys, repo._pkg_str_aux_keys)))
7804 + for cpv in repo.cpv_all():
7805 + metadata = dict(zip(aux_keys, repo.aux_get(cpv, aux_keys)))
7806 + pkg = _pkg_str(cpv, metadata=metadata, settings=repo.settings, db=repo)
7807 + instance_key = self.dbapi._instance_key(pkg)
7808 + self._additional_pkgs[instance_key] = pkg
7809 + self.dbapi.cpv_inject(pkg)
7810 +
7811 + def inject(self, cpv, filename=None):
7812 + """Add a freshly built package to the database. This updates
7813 + $PKGDIR/Packages with the new package metadata (including MD5).
7814 + @param cpv: The cpv of the new package to inject
7815 + @type cpv: string
7816 + @param filename: File path of the package to inject, or None if it's
7817 + already in the location returned by getname()
7818 + @type filename: string
7819 + @rtype: _pkg_str or None
7820 + @return: A _pkg_str instance on success, or None on failure.
7821 + """
7822 + mycat, mypkg = catsplit(cpv)
7823 + if not self.populated:
7824 + self.populate()
7825 + if filename is None:
7826 + full_path = self.getname(cpv)
7827 + else:
7828 + full_path = filename
7829 + try:
7830 + s = os.stat(full_path)
7831 + except OSError as e:
7832 + if e.errno != errno.ENOENT:
7833 + raise
7834 + del e
7835 + writemsg(
7836 + _("!!! Binary package does not exist: '%s'\n") % full_path,
7837 + noiselevel=-1,
7838 + )
7839 + return
7840 + metadata = self._read_metadata(full_path, s)
7841 + invalid_depend = False
7842 + try:
7843 + self._eval_use_flags(cpv, metadata)
7844 + except portage.exception.InvalidDependString:
7845 + invalid_depend = True
7846 + if invalid_depend or not metadata.get("SLOT"):
7847 + writemsg(_("!!! Invalid binary package: '%s'\n") % full_path, noiselevel=-1)
7848 + return
7849 +
7850 + fetched = False
7851 + try:
7852 + build_id = cpv.build_id
7853 + except AttributeError:
7854 + build_id = None
7855 + else:
7856 + instance_key = self.dbapi._instance_key(cpv)
7857 + if instance_key in self.dbapi.cpvdict:
7858 + # This means we've been called by aux_update (or
7859 + # similar). The instance key typically changes (due to
7860 + # file modification), so we need to discard existing
7861 + # instance key references.
7862 + self.dbapi.cpv_remove(cpv)
7863 + self._pkg_paths.pop(instance_key, None)
7864 + if self._remotepkgs is not None:
7865 + fetched = self._remotepkgs.pop(instance_key, None)
7866 +
7867 + cpv = _pkg_str(cpv, metadata=metadata, settings=self.settings, db=self.dbapi)
7868 +
7869 + # Reread the Packages index (in case it's been changed by another
7870 + # process) and then updated it, all while holding a lock.
7871 + pkgindex_lock = None
7872 + try:
7873 + os.makedirs(self.pkgdir, exist_ok=True)
7874 + pkgindex_lock = lockfile(self._pkgindex_file, wantnewlockfile=1)
7875 + if filename is not None:
7876 + new_filename = self.getname(cpv, allocate_new=True)
7877 + try:
7878 + samefile = os.path.samefile(filename, new_filename)
7879 + except OSError:
7880 + samefile = False
7881 + if not samefile:
7882 + self._ensure_dir(os.path.dirname(new_filename))
7883 + _movefile(filename, new_filename, mysettings=self.settings)
7884 + full_path = new_filename
7885 +
7886 + basename = os.path.basename(full_path)
7887 + pf = catsplit(cpv)[1]
7888 + if build_id is None and not fetched and basename.endswith(".xpak"):
7889 + # Apply the newly assigned BUILD_ID. This is intended
7890 + # to occur only for locally built packages. If the
7891 + # package was fetched, we want to preserve its
7892 + # attributes, so that we can later distinguish that it
7893 + # is identical to its remote counterpart.
7894 + build_id = self._parse_build_id(basename)
7895 + metadata["BUILD_ID"] = str(build_id)
7896 + cpv = _pkg_str(
7897 + cpv, metadata=metadata, settings=self.settings, db=self.dbapi
7898 + )
7899 + binpkg = portage.xpak.tbz2(full_path)
7900 + binary_data = binpkg.get_data()
7901 + binary_data[b"BUILD_ID"] = _unicode_encode(metadata["BUILD_ID"])
7902 + binpkg.recompose_mem(portage.xpak.xpak_mem(binary_data))
7903 +
7904 + self._file_permissions(full_path)
7905 + pkgindex = self._load_pkgindex()
7906 + if not self._pkgindex_version_supported(pkgindex):
7907 + pkgindex = self._new_pkgindex()
7908 +
7909 + d = self._inject_file(pkgindex, cpv, full_path)
7910 + self._update_pkgindex_header(pkgindex.header)
7911 + self._pkgindex_write(pkgindex)
7912 +
7913 + finally:
7914 + if pkgindex_lock:
7915 + unlockfile(pkgindex_lock)
7916 +
7917 + # This is used to record BINPKGMD5 in the installed package
7918 + # database, for a package that has just been built.
7919 + cpv._metadata["MD5"] = d["MD5"]
7920 +
7921 + return cpv
7922 +
7923 + def _read_metadata(self, filename, st, keys=None):
7924 + """
7925 + Read metadata from a binary package. The returned metadata
7926 + dictionary will contain empty strings for any values that
7927 + are undefined (this is important because the _pkg_str class
7928 + distinguishes between missing and undefined values).
7929 +
7930 + @param filename: File path of the binary package
7931 + @type filename: string
7932 + @param st: stat result for the binary package
7933 + @type st: os.stat_result
7934 + @param keys: optional list of specific metadata keys to retrieve
7935 + @type keys: iterable
7936 + @rtype: dict
7937 + @return: package metadata
7938 + """
7939 + if keys is None:
7940 + keys = self.dbapi._aux_cache_keys
7941 + metadata = self.dbapi._aux_cache_slot_dict()
7942 + else:
7943 + metadata = {}
7944 + binary_metadata = portage.xpak.tbz2(filename).get_data()
7945 + for k in keys:
7946 + if k == "_mtime_":
7947 + metadata[k] = str(st[stat.ST_MTIME])
7948 + elif k == "SIZE":
7949 + metadata[k] = str(st.st_size)
7950 + else:
7951 + v = binary_metadata.get(_unicode_encode(k))
7952 + if v is None:
7953 + if k == "EAPI":
7954 + metadata[k] = "0"
7955 + else:
7956 + metadata[k] = ""
7957 + else:
7958 + v = _unicode_decode(v)
7959 + metadata[k] = " ".join(v.split())
7960 + return metadata
7961 +
7962 + def _inject_file(self, pkgindex, cpv, filename):
7963 + """
7964 + Add a package to internal data structures, and add an
7965 + entry to the given pkgindex.
7966 + @param pkgindex: The PackageIndex instance to which an entry
7967 + will be added.
7968 + @type pkgindex: PackageIndex
7969 + @param cpv: A _pkg_str instance corresponding to the package
7970 + being injected.
7971 + @type cpv: _pkg_str
7972 + @param filename: Absolute file path of the package to inject.
7973 + @type filename: string
7974 + @rtype: dict
7975 + @return: A dict corresponding to the new entry which has been
7976 + added to pkgindex. This may be used to access the checksums
7977 + which have just been generated.
7978 + """
7979 + # Update state for future isremote calls.
7980 + instance_key = self.dbapi._instance_key(cpv)
7981 + if self._remotepkgs is not None:
7982 + self._remotepkgs.pop(instance_key, None)
7983 +
7984 + self.dbapi.cpv_inject(cpv)
7985 + self._pkg_paths[instance_key] = filename[len(self.pkgdir) + 1 :]
7986 + d = self._pkgindex_entry(cpv)
7987 +
7988 + # If found, remove package(s) with duplicate path.
7989 + path = d.get("PATH", "")
7990 + for i in range(len(pkgindex.packages) - 1, -1, -1):
7991 + d2 = pkgindex.packages[i]
7992 + if path and path == d2.get("PATH"):
7993 + # Handle path collisions in $PKGDIR/All
7994 + # when CPV is not identical.
7995 + del pkgindex.packages[i]
7996 + elif cpv == d2.get("CPV"):
7997 + if path == d2.get("PATH", ""):
7998 + del pkgindex.packages[i]
7999 +
8000 + pkgindex.packages.append(d)
8001 + return d
8002 +
8003 + def _pkgindex_write(self, pkgindex):
8004 + contents = codecs.getwriter(_encodings["repo.content"])(io.BytesIO())
8005 + pkgindex.write(contents)
8006 + contents = contents.getvalue()
8007 + atime = mtime = int(pkgindex.header["TIMESTAMP"])
8008 + output_files = [
8009 + (atomic_ofstream(self._pkgindex_file, mode="wb"), self._pkgindex_file, None)
8010 + ]
8011 +
8012 + if "compress-index" in self.settings.features:
8013 + gz_fname = self._pkgindex_file + ".gz"
8014 + fileobj = atomic_ofstream(gz_fname, mode="wb")
8015 + output_files.append(
8016 + (
8017 + GzipFile(filename="", mode="wb", fileobj=fileobj, mtime=mtime),
8018 + gz_fname,
8019 + fileobj,
8020 + )
8021 + )
8022 +
8023 + for f, fname, f_close in output_files:
8024 + f.write(contents)
8025 + f.close()
8026 + if f_close is not None:
8027 + f_close.close()
8028 + self._file_permissions(fname)
8029 + # some seconds might have elapsed since TIMESTAMP
8030 + os.utime(fname, (atime, mtime))
8031 +
8032 + def _pkgindex_entry(self, cpv):
8033 + """
8034 + Performs checksums, and gets size and mtime via lstat.
8035 + Raises InvalidDependString if necessary.
8036 + @rtype: dict
8037 + @return: a dict containing entry for the give cpv.
8038 + """
8039 +
8040 + pkg_path = self.getname(cpv)
8041 +
8042 + d = dict(cpv._metadata.items())
8043 + d.update(perform_multiple_checksums(pkg_path, hashes=self._pkgindex_hashes))
8044 +
8045 + d["CPV"] = cpv
8046 + st = os.lstat(pkg_path)
8047 + d["_mtime_"] = str(st[stat.ST_MTIME])
8048 + d["SIZE"] = str(st.st_size)
8049 +
8050 + rel_path = pkg_path[len(self.pkgdir) + 1 :]
8051 + # record location if it's non-default
8052 + if rel_path != cpv + ".tbz2":
8053 + d["PATH"] = rel_path
8054 +
8055 + return d
8056 +
8057 + def _new_pkgindex(self):
8058 + return portage.getbinpkg.PackageIndex(
8059 + allowed_pkg_keys=self._pkgindex_allowed_pkg_keys,
8060 + default_header_data=self._pkgindex_default_header_data,
8061 + default_pkg_data=self._pkgindex_default_pkg_data,
8062 + inherited_keys=self._pkgindex_inherited_keys,
8063 + translated_keys=self._pkgindex_translated_keys,
8064 + )
8065 +
8066 + @staticmethod
8067 + def _merge_pkgindex_header(src, dest):
8068 + """
8069 + Merge Packages header settings from src to dest, in order to
8070 + propagate implicit IUSE and USE_EXPAND settings for use with
8071 + binary and installed packages. Values are appended, so the
8072 + result is a union of elements from src and dest.
8073 +
8074 + Pull in ARCH if it's not defined, since it's used for validation
8075 + by emerge's profile_check function, and also for KEYWORDS logic
8076 + in the _getmaskingstatus function.
8077 +
8078 + @param src: source mapping (read only)
8079 + @type src: Mapping
8080 + @param dest: destination mapping
8081 + @type dest: MutableMapping
8082 + """
8083 + for k, v in iter_iuse_vars(src):
8084 + v_before = dest.get(k)
8085 + if v_before is not None:
8086 + merged_values = set(v_before.split())
8087 + merged_values.update(v.split())
8088 + v = " ".join(sorted(merged_values))
8089 + dest[k] = v
8090 +
8091 + if "ARCH" not in dest and "ARCH" in src:
8092 + dest["ARCH"] = src["ARCH"]
8093 +
8094 + def _propagate_config(self, config):
8095 + """
8096 + Propagate implicit IUSE and USE_EXPAND settings from the binary
8097 + package database to a config instance. If settings are not
8098 + available to propagate, then this will do nothing and return
8099 + False.
8100 +
8101 + @param config: config instance
8102 + @type config: portage.config
8103 + @rtype: bool
8104 + @return: True if settings successfully propagated, False if settings
8105 + were not available to propagate.
8106 + """
8107 + if self._pkgindex_header is None:
8108 + return False
8109 +
8110 + self._merge_pkgindex_header(
8111 + self._pkgindex_header, config.configdict["defaults"]
8112 + )
8113 + config.regenerate()
8114 + config._init_iuse()
8115 + return True
8116 +
8117 + def _update_pkgindex_header(self, header):
8118 + """
8119 + Add useful settings to the Packages file header, for use by
8120 + binhost clients.
8121 +
8122 + This will return silently if the current profile is invalid or
8123 + does not have an IUSE_IMPLICIT variable, since it's useful to
8124 + maintain a cache of implicit IUSE settings for use with binary
8125 + packages.
8126 + """
8127 + if not (self.settings.profile_path and "IUSE_IMPLICIT" in self.settings):
8128 + header.setdefault("VERSION", str(self._pkgindex_version))
8129 + return
8130 +
8131 + portdir = normalize_path(os.path.realpath(self.settings["PORTDIR"]))
8132 + profiles_base = os.path.join(portdir, "profiles") + os.path.sep
8133 + if self.settings.profile_path:
8134 + profile_path = normalize_path(os.path.realpath(self.settings.profile_path))
8135 + if profile_path.startswith(profiles_base):
8136 + profile_path = profile_path[len(profiles_base) :]
8137 + header["PROFILE"] = profile_path
8138 + header["VERSION"] = str(self._pkgindex_version)
8139 + base_uri = self.settings.get("PORTAGE_BINHOST_HEADER_URI")
8140 + if base_uri:
8141 + header["URI"] = base_uri
8142 + else:
8143 + header.pop("URI", None)
8144 + for k in (
8145 + list(self._pkgindex_header_keys)
8146 + + self.settings.get("USE_EXPAND_IMPLICIT", "").split()
8147 + + self.settings.get("USE_EXPAND_UNPREFIXED", "").split()
8148 + ):
8149 + v = self.settings.get(k, None)
8150 + if v:
8151 + header[k] = v
8152 + else:
8153 + header.pop(k, None)
8154 +
8155 + # These values may be useful for using a binhost without
8156 + # having a local copy of the profile (bug #470006).
8157 + for k in self.settings.get("USE_EXPAND_IMPLICIT", "").split():
8158 + k = "USE_EXPAND_VALUES_" + k
8159 + v = self.settings.get(k)
8160 + if v:
8161 + header[k] = v
8162 + else:
8163 + header.pop(k, None)
8164 +
8165 + def _pkgindex_version_supported(self, pkgindex):
8166 + version = pkgindex.header.get("VERSION")
8167 + if version:
8168 + try:
8169 + if int(version) <= self._pkgindex_version:
8170 + return True
8171 + except ValueError:
8172 + pass
8173 + return False
8174 +
8175 + def _eval_use_flags(self, cpv, metadata):
8176 + use = frozenset(metadata.get("USE", "").split())
8177 + for k in self._pkgindex_use_evaluated_keys:
8178 + if k.endswith("DEPEND"):
8179 + token_class = Atom
8180 + else:
8181 + token_class = None
8182 +
8183 + deps = metadata.get(k)
8184 + if deps is None:
8185 + continue
8186 + try:
8187 + deps = use_reduce(deps, uselist=use, token_class=token_class)
8188 + deps = paren_enclose(deps)
8189 + except portage.exception.InvalidDependString as e:
8190 + writemsg("%s: %s\n" % (k, e), noiselevel=-1)
8191 + raise
8192 + metadata[k] = deps
8193 +
8194 + def exists_specific(self, cpv):
8195 + if not self.populated:
8196 + self.populate()
8197 + return self.dbapi.match(
8198 + dep_expand("=" + cpv, mydb=self.dbapi, settings=self.settings)
8199 + )
8200 +
8201 + def dep_bestmatch(self, mydep):
8202 + "compatibility method -- all matches, not just visible ones"
8203 + if not self.populated:
8204 + self.populate()
8205 + writemsg("\n\n", 1)
8206 + writemsg("mydep: %s\n" % mydep, 1)
8207 + mydep = dep_expand(mydep, mydb=self.dbapi, settings=self.settings)
8208 + writemsg("mydep: %s\n" % mydep, 1)
8209 + mykey = dep_getkey(mydep)
8210 + writemsg("mykey: %s\n" % mykey, 1)
8211 + mymatch = best(match_from_list(mydep, self.dbapi.cp_list(mykey)))
8212 + writemsg("mymatch: %s\n" % mymatch, 1)
8213 + if mymatch is None:
8214 + return ""
8215 + return mymatch
8216 +
8217 + def getname(self, cpv, allocate_new=None):
8218 + """Returns a file location for this package.
8219 + If cpv has both build_time and build_id attributes, then the
8220 + path to the specific corresponding instance is returned.
8221 + Otherwise, allocate a new path and return that. When allocating
8222 + a new path, behavior depends on the binpkg-multi-instance
8223 + FEATURES setting.
8224 + """
8225 + if not self.populated:
8226 + self.populate()
8227 +
8228 + try:
8229 + cpv.cp
8230 + except AttributeError:
8231 + cpv = _pkg_str(cpv)
8232 +
8233 + filename = None
8234 + if allocate_new:
8235 + filename = self._allocate_filename(cpv)
8236 + elif self._is_specific_instance(cpv):
8237 + instance_key = self.dbapi._instance_key(cpv)
8238 + path = self._pkg_paths.get(instance_key)
8239 + if path is not None:
8240 + filename = os.path.join(self.pkgdir, path)
8241 +
8242 + if filename is None and not allocate_new:
8243 + try:
8244 + instance_key = self.dbapi._instance_key(cpv, support_string=True)
8245 + except KeyError:
8246 + pass
8247 + else:
8248 + filename = self._pkg_paths.get(instance_key)
8249 + if filename is not None:
8250 + filename = os.path.join(self.pkgdir, filename)
8251 + elif instance_key in self._additional_pkgs:
8252 + return None
8253 +
8254 + if filename is None:
8255 + if self._multi_instance:
8256 + pf = catsplit(cpv)[1]
8257 + filename = "%s-%s.xpak" % (os.path.join(self.pkgdir, cpv.cp, pf), "1")
8258 + else:
8259 + filename = os.path.join(self.pkgdir, cpv + ".tbz2")
8260 +
8261 + return filename
8262 +
8263 + def _is_specific_instance(self, cpv):
8264 + specific = True
8265 + try:
8266 + build_time = cpv.build_time
8267 + build_id = cpv.build_id
8268 + except AttributeError:
8269 + specific = False
8270 + else:
8271 + if build_time is None or build_id is None:
8272 + specific = False
8273 + return specific
8274 +
8275 + def _max_build_id(self, cpv):
8276 + max_build_id = 0
8277 + for x in self.dbapi.cp_list(cpv.cp):
8278 + if x == cpv and x.build_id is not None and x.build_id > max_build_id:
8279 + max_build_id = x.build_id
8280 + return max_build_id
8281 +
8282 + def _allocate_filename(self, cpv):
8283 + return os.path.join(self.pkgdir, cpv + ".tbz2")
8284 +
8285 + def _allocate_filename_multi(self, cpv):
8286 +
8287 + # First, get the max build_id found when _populate was
8288 + # called.
8289 + max_build_id = self._max_build_id(cpv)
8290 +
8291 + # A new package may have been added concurrently since the
8292 + # last _populate call, so use increment build_id until
8293 + # we locate an unused id.
8294 + pf = catsplit(cpv)[1]
8295 + build_id = max_build_id + 1
8296 +
8297 + while True:
8298 + filename = "%s-%s.xpak" % (os.path.join(self.pkgdir, cpv.cp, pf), build_id)
8299 + if os.path.exists(filename):
8300 + build_id += 1
8301 + else:
8302 + return filename
8303 +
8304 + @staticmethod
8305 + def _parse_build_id(filename):
8306 + build_id = -1
8307 + suffixlen = len(".xpak")
8308 + hyphen = filename.rfind("-", 0, -(suffixlen + 1))
8309 + if hyphen != -1:
8310 + try:
8311 + build_id = int(filename[hyphen + 1 : -suffixlen])
8312 + except ValueError:
8313 + pass
8314 + return build_id
8315 +
8316 + def isremote(self, pkgname):
8317 + """Returns true if the package is kept remotely and it has not been
8318 + downloaded (or it is only partially downloaded)."""
8319 + if self._remotepkgs is None:
8320 + return False
8321 + instance_key = self.dbapi._instance_key(pkgname)
8322 + if instance_key not in self._remotepkgs:
8323 + return False
8324 + if instance_key in self._additional_pkgs:
8325 + return False
8326 + # Presence in self._remotepkgs implies that it's remote. When a
8327 + # package is downloaded, state is updated by self.inject().
8328 + return True
8329 +
8330 + def get_pkgindex_uri(self, cpv):
8331 + """Returns the URI to the Packages file for a given package."""
8332 + uri = None
8333 + if self._remotepkgs is not None:
8334 + metadata = self._remotepkgs.get(self.dbapi._instance_key(cpv))
8335 + if metadata is not None:
8336 + uri = metadata["PKGINDEX_URI"]
8337 + return uri
8338 +
8339 + def gettbz2(self, pkgname):
8340 + """Fetches the package from a remote site, if necessary. Attempts to
8341 + resume if the file appears to be partially downloaded."""
8342 + instance_key = self.dbapi._instance_key(pkgname)
8343 + tbz2_path = self.getname(pkgname)
8344 + tbz2name = os.path.basename(tbz2_path)
8345 + resume = False
8346 + if os.path.exists(tbz2_path):
8347 + if tbz2name[:-5] not in self.invalids:
8348 + return
8349 +
8350 + resume = True
8351 + writemsg(
8352 + _(
8353 + "Resuming download of this tbz2, but it is possible that it is corrupt.\n"
8354 + ),
8355 + noiselevel=-1,
8356 + )
8357 +
8358 + mydest = os.path.dirname(self.getname(pkgname))
8359 + self._ensure_dir(mydest)
8360 + # urljoin doesn't work correctly with unrecognized protocols like sftp
8361 + if self._remote_has_index:
8362 + rel_url = self._remotepkgs[instance_key].get("PATH")
8363 + if not rel_url:
8364 + rel_url = pkgname + ".tbz2"
8365 + remote_base_uri = self._remotepkgs[instance_key]["BASE_URI"]
8366 + url = remote_base_uri.rstrip("/") + "/" + rel_url.lstrip("/")
8367 + else:
8368 + url = self.settings["PORTAGE_BINHOST"].rstrip("/") + "/" + tbz2name
8369 + protocol = urlparse(url)[0]
8370 + fcmd_prefix = "FETCHCOMMAND"
8371 + if resume:
8372 + fcmd_prefix = "RESUMECOMMAND"
8373 + fcmd = self.settings.get(fcmd_prefix + "_" + protocol.upper())
8374 + if not fcmd:
8375 + fcmd = self.settings.get(fcmd_prefix)
8376 + success = portage.getbinpkg.file_get(url, mydest, fcmd=fcmd)
8377 + if not success:
8378 + try:
8379 + os.unlink(self.getname(pkgname))
8380 + except OSError:
8381 + pass
8382 + raise portage.exception.FileNotFound(mydest)
8383 + self.inject(pkgname)
8384 +
8385 + def _load_pkgindex(self):
8386 + pkgindex = self._new_pkgindex()
8387 + try:
8388 + f = io.open(
8389 + _unicode_encode(
8390 + self._pkgindex_file, encoding=_encodings["fs"], errors="strict"
8391 + ),
8392 + mode="r",
8393 + encoding=_encodings["repo.content"],
8394 + errors="replace",
8395 + )
8396 + except EnvironmentError:
8397 + pass
8398 + else:
8399 + try:
8400 + pkgindex.read(f)
8401 + finally:
8402 + f.close()
8403 + return pkgindex
8404 +
8405 + def _get_digests(self, pkg):
8406 +
8407 + try:
8408 + cpv = pkg.cpv
8409 + except AttributeError:
8410 + cpv = pkg
8411 +
8412 + _instance_key = self.dbapi._instance_key
8413 + instance_key = _instance_key(cpv)
8414 + digests = {}
8415 + metadata = (
8416 + None if self._remotepkgs is None else self._remotepkgs.get(instance_key)
8417 + )
8418 + if metadata is None:
8419 + for d in self._load_pkgindex().packages:
8420 + if d["CPV"] == cpv and instance_key == _instance_key(
8421 + _pkg_str(d["CPV"], metadata=d, settings=self.settings)
8422 + ):
8423 + metadata = d
8424 + break
8425 +
8426 + if metadata is None:
8427 + return digests
8428 +
8429 + for k in get_valid_checksum_keys():
8430 + v = metadata.get(k)
8431 + if not v:
8432 + continue
8433 + digests[k] = v
8434 +
8435 + if "SIZE" in metadata:
8436 + try:
8437 + digests["size"] = int(metadata["SIZE"])
8438 + except ValueError:
8439 + writemsg(
8440 + _("!!! Malformed SIZE attribute in remote " "metadata for '%s'\n")
8441 + % cpv
8442 + )
8443 +
8444 + return digests
8445 +
8446 + def digestCheck(self, pkg):
8447 + """
8448 + Verify digests for the given package and raise DigestException
8449 + if verification fails.
8450 + @rtype: bool
8451 + @return: True if digests could be located, False otherwise.
8452 + """
8453 +
8454 + digests = self._get_digests(pkg)
8455 +
8456 + if not digests:
8457 + return False
8458 +
8459 + try:
8460 + cpv = pkg.cpv
8461 + except AttributeError:
8462 + cpv = pkg
8463 +
8464 + pkg_path = self.getname(cpv)
8465 + hash_filter = _hash_filter(self.settings.get("PORTAGE_CHECKSUM_FILTER", ""))
8466 + if not hash_filter.transparent:
8467 + digests = _apply_hash_filter(digests, hash_filter)
8468 + eout = EOutput()
8469 + eout.quiet = self.settings.get("PORTAGE_QUIET") == "1"
8470 + ok, st = _check_distfile(pkg_path, digests, eout, show_errors=0)
8471 + if not ok:
8472 + ok, reason = verify_all(pkg_path, digests)
8473 + if not ok:
8474 + raise portage.exception.DigestException((pkg_path,) + tuple(reason))
8475 +
8476 + return True
8477 +
8478 + def getslot(self, mycatpkg):
8479 + "Get a slot for a catpkg; assume it exists."
8480 + myslot = ""
8481 + try:
8482 + myslot = self.dbapi._pkg_str(mycatpkg, None).slot
8483 + except KeyError:
8484 + pass
8485 + return myslot
8486 diff --cc lib/portage/dbapi/vartree.py
8487 index 749963fa9,8ffb23b1c..73202f625
8488 --- a/lib/portage/dbapi/vartree.py
8489 +++ b/lib/portage/dbapi/vartree.py
8490 @@@ -1,62 -1,70 +1,72 @@@
8491 # Copyright 1998-2021 Gentoo Authors
8492 # Distributed under the terms of the GNU General Public License v2
8493
8494 - __all__ = [
8495 - "vardbapi", "vartree", "dblink"] + \
8496 - ["write_contents", "tar_contents"]
8497 + __all__ = ["vardbapi", "vartree", "dblink"] + ["write_contents", "tar_contents"]
8498
8499 import portage
8500 - portage.proxy.lazyimport.lazyimport(globals(),
8501 - 'hashlib:md5',
8502 - 'portage.checksum:_perform_md5_merge@perform_md5',
8503 - 'portage.data:portage_gid,portage_uid,secpass',
8504 - 'portage.dbapi.dep_expand:dep_expand',
8505 - 'portage.dbapi._MergeProcess:MergeProcess',
8506 - 'portage.dbapi._SyncfsProcess:SyncfsProcess',
8507 - 'portage.dep:dep_getkey,isjustname,isvalidatom,match_from_list,' + \
8508 - 'use_reduce,_slot_separator,_repo_separator',
8509 - 'portage.eapi:_get_eapi_attrs',
8510 - 'portage.elog:collect_ebuild_messages,collect_messages,' + \
8511 - 'elog_process,_merge_logentries',
8512 - 'portage.locks:lockdir,unlockdir,lockfile,unlockfile',
8513 - 'portage.output:bold,colorize',
8514 - 'portage.package.ebuild.doebuild:doebuild_environment,' + \
8515 - '_merge_unicode_error',
8516 - 'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
8517 - 'portage.package.ebuild._ipc.QueryCommand:QueryCommand',
8518 - 'portage.process:find_binary',
8519 - 'portage.util:apply_secpass_permissions,ConfigProtect,ensure_dirs,' + \
8520 - 'writemsg,writemsg_level,write_atomic,atomic_ofstream,writedict,' + \
8521 - 'grabdict,normalize_path,new_protect_filename',
8522 - 'portage.util._compare_files:compare_files',
8523 - 'portage.util.digraph:digraph',
8524 - 'portage.util.env_update:env_update',
8525 - 'portage.util.install_mask:install_mask_dir,InstallMask,_raise_exc',
8526 - 'portage.util.listdir:dircache,listdir',
8527 - 'portage.util.movefile:movefile',
8528 - 'portage.util.path:first_existing,iter_parents',
8529 - 'portage.util.writeable_check:get_ro_checker',
8530 - 'portage.util._xattr:xattr',
8531 - 'portage.util._dyn_libs.PreservedLibsRegistry:PreservedLibsRegistry',
8532 - 'portage.util._dyn_libs.LinkageMapELF:LinkageMapELF@LinkageMap',
8533 - 'portage.util._dyn_libs.LinkageMapMachO:LinkageMapMachO',
8534 - 'portage.util._dyn_libs.LinkageMapPeCoff:LinkageMapPeCoff',
8535 - 'portage.util._dyn_libs.LinkageMapXCoff:LinkageMapXCoff',
8536 - 'portage.util._dyn_libs.NeededEntry:NeededEntry',
8537 - 'portage.util._async.SchedulerInterface:SchedulerInterface',
8538 - 'portage.util._eventloop.global_event_loop:global_event_loop',
8539 - 'portage.versions:best,catpkgsplit,catsplit,cpv_getkey,vercmp,' + \
8540 - '_get_slot_re,_pkgsplit@pkgsplit,_pkg_str,_unknown_repo',
8541 - 'subprocess',
8542 - 'tarfile',
8543 +
8544 + portage.proxy.lazyimport.lazyimport(
8545 + globals(),
8546 + "hashlib:md5",
8547 + "portage.checksum:_perform_md5_merge@perform_md5",
8548 + "portage.data:portage_gid,portage_uid,secpass",
8549 + "portage.dbapi.dep_expand:dep_expand",
8550 + "portage.dbapi._MergeProcess:MergeProcess",
8551 + "portage.dbapi._SyncfsProcess:SyncfsProcess",
8552 + "portage.dep:dep_getkey,isjustname,isvalidatom,match_from_list,"
8553 + + "use_reduce,_slot_separator,_repo_separator",
8554 + "portage.eapi:_get_eapi_attrs",
8555 + "portage.elog:collect_ebuild_messages,collect_messages,"
8556 + + "elog_process,_merge_logentries",
8557 + "portage.locks:lockdir,unlockdir,lockfile,unlockfile",
8558 + "portage.output:bold,colorize",
8559 + "portage.package.ebuild.doebuild:doebuild_environment," + "_merge_unicode_error",
8560 + "portage.package.ebuild.prepare_build_dirs:prepare_build_dirs",
8561 + "portage.package.ebuild._ipc.QueryCommand:QueryCommand",
8562 + "portage.process:find_binary",
8563 + "portage.util:apply_secpass_permissions,ConfigProtect,ensure_dirs,"
8564 + + "writemsg,writemsg_level,write_atomic,atomic_ofstream,writedict,"
8565 + + "grabdict,normalize_path,new_protect_filename",
8566 + "portage.util._compare_files:compare_files",
8567 + "portage.util.digraph:digraph",
8568 + "portage.util.env_update:env_update",
8569 + "portage.util.install_mask:install_mask_dir,InstallMask,_raise_exc",
8570 + "portage.util.listdir:dircache,listdir",
8571 + "portage.util.movefile:movefile",
8572 + "portage.util.path:first_existing,iter_parents",
8573 + "portage.util.writeable_check:get_ro_checker",
8574 + "portage.util._xattr:xattr",
8575 + "portage.util._dyn_libs.PreservedLibsRegistry:PreservedLibsRegistry",
8576 + "portage.util._dyn_libs.LinkageMapELF:LinkageMapELF@LinkageMap",
8577 + "portage.util._dyn_libs.NeededEntry:NeededEntry",
8578 + "portage.util._async.SchedulerInterface:SchedulerInterface",
8579 + "portage.util._eventloop.global_event_loop:global_event_loop",
8580 + "portage.versions:best,catpkgsplit,catsplit,cpv_getkey,vercmp,"
8581 + + "_get_slot_re,_pkgsplit@pkgsplit,_pkg_str,_unknown_repo",
8582 + "subprocess",
8583 + "tarfile",
8584 )
8585
8586 - from portage.const import CACHE_PATH, CONFIG_MEMORY_FILE, \
8587 - MERGING_IDENTIFIER, PORTAGE_PACKAGE_ATOM, PRIVATE_PATH, VDB_PATH, EPREFIX
8588 + from portage.const import (
8589 + CACHE_PATH,
8590 + CONFIG_MEMORY_FILE,
8591 + MERGING_IDENTIFIER,
8592 + PORTAGE_PACKAGE_ATOM,
8593 + PRIVATE_PATH,
8594 + VDB_PATH,
8595 ++ # PREFIX LOCAL
8596 ++ EPREFIX,
8597 + )
8598 from portage.dbapi import dbapi
8599 - from portage.exception import CommandNotFound, \
8600 - InvalidData, InvalidLocation, InvalidPackageName, \
8601 - FileNotFound, PermissionDenied, UnsupportedAPIException
8602 + from portage.exception import (
8603 + CommandNotFound,
8604 + InvalidData,
8605 + InvalidLocation,
8606 + InvalidPackageName,
8607 + FileNotFound,
8608 + PermissionDenied,
8609 + UnsupportedAPIException,
8610 + )
8611 from portage.localization import _
8612 from portage.util.futures import asyncio
8613
8614 @@@ -105,5697 -112,6397 +114,6415 @@@ import warning
8615
8616 class vardbapi(dbapi):
8617
8618 - _excluded_dirs = ["CVS", "lost+found"]
8619 - _excluded_dirs = [re.escape(x) for x in _excluded_dirs]
8620 - _excluded_dirs = re.compile(r'^(\..*|' + MERGING_IDENTIFIER + '.*|' + \
8621 - "|".join(_excluded_dirs) + r')$')
8622 -
8623 - _aux_cache_version = "1"
8624 - _owners_cache_version = "1"
8625 -
8626 - # Number of uncached packages to trigger cache update, since
8627 - # it's wasteful to update it for every vdb change.
8628 - _aux_cache_threshold = 5
8629 -
8630 - _aux_cache_keys_re = re.compile(r'^NEEDED\..*$')
8631 - _aux_multi_line_re = re.compile(r'^(CONTENTS|NEEDED\..*)$')
8632 - _pkg_str_aux_keys = dbapi._pkg_str_aux_keys + ("BUILD_ID", "BUILD_TIME", "_mtime_")
8633 -
8634 - def __init__(self, _unused_param=DeprecationWarning,
8635 - categories=None, settings=None, vartree=None):
8636 - """
8637 - The categories parameter is unused since the dbapi class
8638 - now has a categories property that is generated from the
8639 - available packages.
8640 - """
8641 -
8642 - # Used by emerge to check whether any packages
8643 - # have been added or removed.
8644 - self._pkgs_changed = False
8645 -
8646 - # The _aux_cache_threshold doesn't work as designed
8647 - # if the cache is flushed from a subprocess, so we
8648 - # use this to avoid waste vdb cache updates.
8649 - self._flush_cache_enabled = True
8650 -
8651 - #cache for category directory mtimes
8652 - self.mtdircache = {}
8653 -
8654 - #cache for dependency checks
8655 - self.matchcache = {}
8656 -
8657 - #cache for cp_list results
8658 - self.cpcache = {}
8659 -
8660 - self.blockers = None
8661 - if settings is None:
8662 - settings = portage.settings
8663 - self.settings = settings
8664 -
8665 - if _unused_param is not DeprecationWarning:
8666 - warnings.warn("The first parameter of the "
8667 - "portage.dbapi.vartree.vardbapi"
8668 - " constructor is now unused. Instead "
8669 - "settings['ROOT'] is used.",
8670 - DeprecationWarning, stacklevel=2)
8671 -
8672 - self._eroot = settings['EROOT']
8673 - self._dbroot = self._eroot + VDB_PATH
8674 - self._lock = None
8675 - self._lock_count = 0
8676 -
8677 - self._conf_mem_file = self._eroot + CONFIG_MEMORY_FILE
8678 - self._fs_lock_obj = None
8679 - self._fs_lock_count = 0
8680 - self._slot_locks = {}
8681 -
8682 - if vartree is None:
8683 - vartree = portage.db[settings['EROOT']]['vartree']
8684 - self.vartree = vartree
8685 - self._aux_cache_keys = set(
8686 - ["BDEPEND", "BUILD_TIME", "CHOST", "COUNTER", "DEPEND",
8687 - "DESCRIPTION", "EAPI", "HOMEPAGE",
8688 - "BUILD_ID", "IDEPEND", "IUSE", "KEYWORDS",
8689 - "LICENSE", "PDEPEND", "PROPERTIES", "RDEPEND",
8690 - "repository", "RESTRICT" , "SLOT", "USE", "DEFINED_PHASES",
8691 - "PROVIDES", "REQUIRES"
8692 - ])
8693 - self._aux_cache_obj = None
8694 - self._aux_cache_filename = os.path.join(self._eroot,
8695 - CACHE_PATH, "vdb_metadata.pickle")
8696 - self._cache_delta_filename = os.path.join(self._eroot,
8697 - CACHE_PATH, "vdb_metadata_delta.json")
8698 - self._cache_delta = VdbMetadataDelta(self)
8699 - self._counter_path = os.path.join(self._eroot,
8700 - CACHE_PATH, "counter")
8701 -
8702 - self._plib_registry = PreservedLibsRegistry(settings["ROOT"],
8703 - os.path.join(self._eroot, PRIVATE_PATH, "preserved_libs_registry"))
8704 - self._linkmap = LinkageMap(self)
8705 - chost = self.settings.get('CHOST')
8706 - if not chost:
8707 - chost = 'lunix?' # this happens when profiles are not available
8708 - if chost.find('darwin') >= 0:
8709 - self._linkmap = LinkageMapMachO(self)
8710 - elif chost.find('interix') >= 0 or chost.find('winnt') >= 0:
8711 - self._linkmap = LinkageMapPeCoff(self)
8712 - elif chost.find('aix') >= 0:
8713 - self._linkmap = LinkageMapXCoff(self)
8714 - else:
8715 - self._linkmap = LinkageMap(self)
8716 - self._owners = self._owners_db(self)
8717 -
8718 - self._cached_counter = None
8719 -
8720 - @property
8721 - def writable(self):
8722 - """
8723 - Check if var/db/pkg is writable, or permissions are sufficient
8724 - to create it if it does not exist yet.
8725 - @rtype: bool
8726 - @return: True if var/db/pkg is writable or can be created,
8727 - False otherwise
8728 - """
8729 - return os.access(first_existing(self._dbroot), os.W_OK)
8730 -
8731 - @property
8732 - def root(self):
8733 - warnings.warn("The root attribute of "
8734 - "portage.dbapi.vartree.vardbapi"
8735 - " is deprecated. Use "
8736 - "settings['ROOT'] instead.",
8737 - DeprecationWarning, stacklevel=3)
8738 - return self.settings['ROOT']
8739 -
8740 - def getpath(self, mykey, filename=None):
8741 - # This is an optimized hotspot, so don't use unicode-wrapped
8742 - # os module and don't use os.path.join().
8743 - rValue = self._eroot + VDB_PATH + _os.sep + mykey
8744 - if filename is not None:
8745 - # If filename is always relative, we can do just
8746 - # rValue += _os.sep + filename
8747 - rValue = _os.path.join(rValue, filename)
8748 - return rValue
8749 -
8750 - def lock(self):
8751 - """
8752 - Acquire a reentrant lock, blocking, for cooperation with concurrent
8753 - processes. State is inherited by subprocesses, allowing subprocesses
8754 - to reenter a lock that was acquired by a parent process. However,
8755 - a lock can be released only by the same process that acquired it.
8756 - """
8757 - if self._lock_count:
8758 - self._lock_count += 1
8759 - else:
8760 - if self._lock is not None:
8761 - raise AssertionError("already locked")
8762 - # At least the parent needs to exist for the lock file.
8763 - ensure_dirs(self._dbroot)
8764 - self._lock = lockdir(self._dbroot)
8765 - self._lock_count += 1
8766 -
8767 - def unlock(self):
8768 - """
8769 - Release a lock, decrementing the recursion level. Each unlock() call
8770 - must be matched with a prior lock() call, or else an AssertionError
8771 - will be raised if unlock() is called while not locked.
8772 - """
8773 - if self._lock_count > 1:
8774 - self._lock_count -= 1
8775 - else:
8776 - if self._lock is None:
8777 - raise AssertionError("not locked")
8778 - self._lock_count = 0
8779 - unlockdir(self._lock)
8780 - self._lock = None
8781 -
8782 - def _fs_lock(self):
8783 - """
8784 - Acquire a reentrant lock, blocking, for cooperation with concurrent
8785 - processes.
8786 - """
8787 - if self._fs_lock_count < 1:
8788 - if self._fs_lock_obj is not None:
8789 - raise AssertionError("already locked")
8790 - try:
8791 - self._fs_lock_obj = lockfile(self._conf_mem_file)
8792 - except InvalidLocation:
8793 - self.settings._init_dirs()
8794 - self._fs_lock_obj = lockfile(self._conf_mem_file)
8795 - self._fs_lock_count += 1
8796 -
8797 - def _fs_unlock(self):
8798 - """
8799 - Release a lock, decrementing the recursion level.
8800 - """
8801 - if self._fs_lock_count <= 1:
8802 - if self._fs_lock_obj is None:
8803 - raise AssertionError("not locked")
8804 - unlockfile(self._fs_lock_obj)
8805 - self._fs_lock_obj = None
8806 - self._fs_lock_count -= 1
8807 -
8808 - def _slot_lock(self, slot_atom):
8809 - """
8810 - Acquire a slot lock (reentrant).
8811 -
8812 - WARNING: The varbapi._slot_lock method is not safe to call
8813 - in the main process when that process is scheduling
8814 - install/uninstall tasks in parallel, since the locks would
8815 - be inherited by child processes. In order to avoid this sort
8816 - of problem, this method should be called in a subprocess
8817 - (typically spawned by the MergeProcess class).
8818 - """
8819 - lock, counter = self._slot_locks.get(slot_atom, (None, 0))
8820 - if lock is None:
8821 - lock_path = self.getpath("%s:%s" % (slot_atom.cp, slot_atom.slot))
8822 - ensure_dirs(os.path.dirname(lock_path))
8823 - lock = lockfile(lock_path, wantnewlockfile=True)
8824 - self._slot_locks[slot_atom] = (lock, counter + 1)
8825 -
8826 - def _slot_unlock(self, slot_atom):
8827 - """
8828 - Release a slot lock (or decrementing recursion level).
8829 - """
8830 - lock, counter = self._slot_locks.get(slot_atom, (None, 0))
8831 - if lock is None:
8832 - raise AssertionError("not locked")
8833 - counter -= 1
8834 - if counter == 0:
8835 - unlockfile(lock)
8836 - del self._slot_locks[slot_atom]
8837 - else:
8838 - self._slot_locks[slot_atom] = (lock, counter)
8839 -
8840 - def _bump_mtime(self, cpv):
8841 - """
8842 - This is called before an after any modifications, so that consumers
8843 - can use directory mtimes to validate caches. See bug #290428.
8844 - """
8845 - base = self._eroot + VDB_PATH
8846 - cat = catsplit(cpv)[0]
8847 - catdir = base + _os.sep + cat
8848 - t = time.time()
8849 - t = (t, t)
8850 - try:
8851 - for x in (catdir, base):
8852 - os.utime(x, t)
8853 - except OSError:
8854 - ensure_dirs(catdir)
8855 -
8856 - def cpv_exists(self, mykey, myrepo=None):
8857 - "Tells us whether an actual ebuild exists on disk (no masking)"
8858 - return os.path.exists(self.getpath(mykey))
8859 -
8860 - def cpv_counter(self, mycpv):
8861 - "This method will grab the COUNTER. Returns a counter value."
8862 - try:
8863 - return int(self.aux_get(mycpv, ["COUNTER"])[0])
8864 - except (KeyError, ValueError):
8865 - pass
8866 - writemsg_level(_("portage: COUNTER for %s was corrupted; " \
8867 - "resetting to value of 0\n") % (mycpv,),
8868 - level=logging.ERROR, noiselevel=-1)
8869 - return 0
8870 -
8871 - def cpv_inject(self, mycpv):
8872 - "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
8873 - ensure_dirs(self.getpath(mycpv))
8874 - counter = self.counter_tick(mycpv=mycpv)
8875 - # write local package counter so that emerge clean does the right thing
8876 - write_atomic(self.getpath(mycpv, filename="COUNTER"), str(counter))
8877 -
8878 - def isInjected(self, mycpv):
8879 - if self.cpv_exists(mycpv):
8880 - if os.path.exists(self.getpath(mycpv, filename="INJECTED")):
8881 - return True
8882 - if not os.path.exists(self.getpath(mycpv, filename="CONTENTS")):
8883 - return True
8884 - return False
8885 -
8886 - def move_ent(self, mylist, repo_match=None):
8887 - origcp = mylist[1]
8888 - newcp = mylist[2]
8889 -
8890 - # sanity check
8891 - for atom in (origcp, newcp):
8892 - if not isjustname(atom):
8893 - raise InvalidPackageName(str(atom))
8894 - origmatches = self.match(origcp, use_cache=0)
8895 - moves = 0
8896 - if not origmatches:
8897 - return moves
8898 - for mycpv in origmatches:
8899 - mycpv_cp = mycpv.cp
8900 - if mycpv_cp != origcp:
8901 - # Ignore PROVIDE virtual match.
8902 - continue
8903 - if repo_match is not None \
8904 - and not repo_match(mycpv.repo):
8905 - continue
8906 -
8907 - # Use isvalidatom() to check if this move is valid for the
8908 - # EAPI (characters allowed in package names may vary).
8909 - if not isvalidatom(newcp, eapi=mycpv.eapi):
8910 - continue
8911 -
8912 - mynewcpv = mycpv.replace(mycpv_cp, str(newcp), 1)
8913 - mynewcat = catsplit(newcp)[0]
8914 - origpath = self.getpath(mycpv)
8915 - if not os.path.exists(origpath):
8916 - continue
8917 - moves += 1
8918 - if not os.path.exists(self.getpath(mynewcat)):
8919 - #create the directory
8920 - ensure_dirs(self.getpath(mynewcat))
8921 - newpath = self.getpath(mynewcpv)
8922 - if os.path.exists(newpath):
8923 - #dest already exists; keep this puppy where it is.
8924 - continue
8925 - _movefile(origpath, newpath, mysettings=self.settings)
8926 - self._clear_pkg_cache(self._dblink(mycpv))
8927 - self._clear_pkg_cache(self._dblink(mynewcpv))
8928 -
8929 - # We need to rename the ebuild now.
8930 - old_pf = catsplit(mycpv)[1]
8931 - new_pf = catsplit(mynewcpv)[1]
8932 - if new_pf != old_pf:
8933 - try:
8934 - os.rename(os.path.join(newpath, old_pf + ".ebuild"),
8935 - os.path.join(newpath, new_pf + ".ebuild"))
8936 - except EnvironmentError as e:
8937 - if e.errno != errno.ENOENT:
8938 - raise
8939 - del e
8940 - write_atomic(os.path.join(newpath, "PF"), new_pf+"\n")
8941 - write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
8942 -
8943 - return moves
8944 -
8945 - def cp_list(self, mycp, use_cache=1):
8946 - mysplit=catsplit(mycp)
8947 - if mysplit[0] == '*':
8948 - mysplit[0] = mysplit[0][1:]
8949 - try:
8950 - mystat = os.stat(self.getpath(mysplit[0])).st_mtime_ns
8951 - except OSError:
8952 - mystat = 0
8953 - if use_cache and mycp in self.cpcache:
8954 - cpc = self.cpcache[mycp]
8955 - if cpc[0] == mystat:
8956 - return cpc[1][:]
8957 - cat_dir = self.getpath(mysplit[0])
8958 - try:
8959 - dir_list = os.listdir(cat_dir)
8960 - except EnvironmentError as e:
8961 - if e.errno == PermissionDenied.errno:
8962 - raise PermissionDenied(cat_dir)
8963 - del e
8964 - dir_list = []
8965 -
8966 - returnme = []
8967 - for x in dir_list:
8968 - if self._excluded_dirs.match(x) is not None:
8969 - continue
8970 - ps = pkgsplit(x)
8971 - if not ps:
8972 - self.invalidentry(os.path.join(self.getpath(mysplit[0]), x))
8973 - continue
8974 - if len(mysplit) > 1:
8975 - if ps[0] == mysplit[1]:
8976 - cpv = "%s/%s" % (mysplit[0], x)
8977 - metadata = dict(zip(self._aux_cache_keys,
8978 - self.aux_get(cpv, self._aux_cache_keys)))
8979 - returnme.append(_pkg_str(cpv, metadata=metadata,
8980 - settings=self.settings, db=self))
8981 - self._cpv_sort_ascending(returnme)
8982 - if use_cache:
8983 - self.cpcache[mycp] = [mystat, returnme[:]]
8984 - elif mycp in self.cpcache:
8985 - del self.cpcache[mycp]
8986 - return returnme
8987 -
8988 - def cpv_all(self, use_cache=1):
8989 - """
8990 - Set use_cache=0 to bypass the portage.cachedir() cache in cases
8991 - when the accuracy of mtime staleness checks should not be trusted
8992 - (generally this is only necessary in critical sections that
8993 - involve merge or unmerge of packages).
8994 - """
8995 - return list(self._iter_cpv_all(use_cache=use_cache))
8996 -
8997 - def _iter_cpv_all(self, use_cache=True, sort=False):
8998 - returnme = []
8999 - basepath = os.path.join(self._eroot, VDB_PATH) + os.path.sep
9000 -
9001 - if use_cache:
9002 - from portage import listdir
9003 - else:
9004 - def listdir(p, **kwargs):
9005 - try:
9006 - return [x for x in os.listdir(p) \
9007 - if os.path.isdir(os.path.join(p, x))]
9008 - except EnvironmentError as e:
9009 - if e.errno == PermissionDenied.errno:
9010 - raise PermissionDenied(p)
9011 - del e
9012 - return []
9013 -
9014 - catdirs = listdir(basepath, EmptyOnError=1, ignorecvs=1, dirsonly=1)
9015 - if sort:
9016 - catdirs.sort()
9017 -
9018 - for x in catdirs:
9019 - if self._excluded_dirs.match(x) is not None:
9020 - continue
9021 - if not self._category_re.match(x):
9022 - continue
9023 -
9024 - pkgdirs = listdir(basepath + x, EmptyOnError=1, dirsonly=1)
9025 - if sort:
9026 - pkgdirs.sort()
9027 -
9028 - for y in pkgdirs:
9029 - if self._excluded_dirs.match(y) is not None:
9030 - continue
9031 - subpath = x + "/" + y
9032 - # -MERGING- should never be a cpv, nor should files.
9033 - try:
9034 - subpath = _pkg_str(subpath, db=self)
9035 - except InvalidData:
9036 - self.invalidentry(self.getpath(subpath))
9037 - continue
9038 -
9039 - yield subpath
9040 -
9041 - def cp_all(self, use_cache=1, sort=False):
9042 - mylist = self.cpv_all(use_cache=use_cache)
9043 - d={}
9044 - for y in mylist:
9045 - if y[0] == '*':
9046 - y = y[1:]
9047 - try:
9048 - mysplit = catpkgsplit(y)
9049 - except InvalidData:
9050 - self.invalidentry(self.getpath(y))
9051 - continue
9052 - if not mysplit:
9053 - self.invalidentry(self.getpath(y))
9054 - continue
9055 - d[mysplit[0]+"/"+mysplit[1]] = None
9056 - return sorted(d) if sort else list(d)
9057 -
9058 - def checkblockers(self, origdep):
9059 - pass
9060 -
9061 - def _clear_cache(self):
9062 - self.mtdircache.clear()
9063 - self.matchcache.clear()
9064 - self.cpcache.clear()
9065 - self._aux_cache_obj = None
9066 -
9067 - def _add(self, pkg_dblink):
9068 - self._pkgs_changed = True
9069 - self._clear_pkg_cache(pkg_dblink)
9070 -
9071 - def _remove(self, pkg_dblink):
9072 - self._pkgs_changed = True
9073 - self._clear_pkg_cache(pkg_dblink)
9074 -
9075 - def _clear_pkg_cache(self, pkg_dblink):
9076 - # Due to 1 second mtime granularity in <python-2.5, mtime checks
9077 - # are not always sufficient to invalidate vardbapi caches. Therefore,
9078 - # the caches need to be actively invalidated here.
9079 - self.mtdircache.pop(pkg_dblink.cat, None)
9080 - self.matchcache.pop(pkg_dblink.cat, None)
9081 - self.cpcache.pop(pkg_dblink.mysplit[0], None)
9082 - dircache.pop(pkg_dblink.dbcatdir, None)
9083 -
9084 - def match(self, origdep, use_cache=1):
9085 - "caching match function"
9086 - mydep = dep_expand(
9087 - origdep, mydb=self, use_cache=use_cache, settings=self.settings)
9088 - cache_key = (mydep, mydep.unevaluated_atom)
9089 - mykey = dep_getkey(mydep)
9090 - mycat = catsplit(mykey)[0]
9091 - if not use_cache:
9092 - if mycat in self.matchcache:
9093 - del self.mtdircache[mycat]
9094 - del self.matchcache[mycat]
9095 - return list(self._iter_match(mydep,
9096 - self.cp_list(mydep.cp, use_cache=use_cache)))
9097 - try:
9098 - curmtime = os.stat(os.path.join(self._eroot, VDB_PATH, mycat)).st_mtime_ns
9099 - except (IOError, OSError):
9100 - curmtime=0
9101 -
9102 - if mycat not in self.matchcache or \
9103 - self.mtdircache[mycat] != curmtime:
9104 - # clear cache entry
9105 - self.mtdircache[mycat] = curmtime
9106 - self.matchcache[mycat] = {}
9107 - if mydep not in self.matchcache[mycat]:
9108 - mymatch = list(self._iter_match(mydep,
9109 - self.cp_list(mydep.cp, use_cache=use_cache)))
9110 - self.matchcache[mycat][cache_key] = mymatch
9111 - return self.matchcache[mycat][cache_key][:]
9112 -
9113 - def findname(self, mycpv, myrepo=None):
9114 - return self.getpath(str(mycpv), filename=catsplit(mycpv)[1]+".ebuild")
9115 -
9116 - def flush_cache(self):
9117 - """If the current user has permission and the internal aux_get cache has
9118 - been updated, save it to disk and mark it unmodified. This is called
9119 - by emerge after it has loaded the full vdb for use in dependency
9120 - calculations. Currently, the cache is only written if the user has
9121 - superuser privileges (since that's required to obtain a lock), but all
9122 - users have read access and benefit from faster metadata lookups (as
9123 - long as at least part of the cache is still valid)."""
9124 - if self._flush_cache_enabled and \
9125 - self._aux_cache is not None and \
9126 - secpass >= 2 and \
9127 - (len(self._aux_cache["modified"]) >= self._aux_cache_threshold or
9128 - not os.path.exists(self._cache_delta_filename)):
9129 -
9130 - ensure_dirs(os.path.dirname(self._aux_cache_filename))
9131 -
9132 - self._owners.populate() # index any unindexed contents
9133 - valid_nodes = set(self.cpv_all())
9134 - for cpv in list(self._aux_cache["packages"]):
9135 - if cpv not in valid_nodes:
9136 - del self._aux_cache["packages"][cpv]
9137 - del self._aux_cache["modified"]
9138 - timestamp = time.time()
9139 - self._aux_cache["timestamp"] = timestamp
9140 -
9141 - with atomic_ofstream(self._aux_cache_filename, 'wb') as f:
9142 - pickle.dump(self._aux_cache, f, protocol=2)
9143 -
9144 - apply_secpass_permissions(
9145 - self._aux_cache_filename, mode=0o644)
9146 -
9147 - self._cache_delta.initialize(timestamp)
9148 - apply_secpass_permissions(
9149 - self._cache_delta_filename, mode=0o644)
9150 -
9151 - self._aux_cache["modified"] = set()
9152 -
9153 - @property
9154 - def _aux_cache(self):
9155 - if self._aux_cache_obj is None:
9156 - self._aux_cache_init()
9157 - return self._aux_cache_obj
9158 -
9159 - def _aux_cache_init(self):
9160 - aux_cache = None
9161 - open_kwargs = {}
9162 - try:
9163 - with open(_unicode_encode(self._aux_cache_filename,
9164 - encoding=_encodings['fs'], errors='strict'),
9165 - mode='rb', **open_kwargs) as f:
9166 - mypickle = pickle.Unpickler(f)
9167 - try:
9168 - mypickle.find_global = None
9169 - except AttributeError:
9170 - # TODO: If py3k, override Unpickler.find_class().
9171 - pass
9172 - aux_cache = mypickle.load()
9173 - except (SystemExit, KeyboardInterrupt):
9174 - raise
9175 - except Exception as e:
9176 - if isinstance(e, EnvironmentError) and \
9177 - getattr(e, 'errno', None) in (errno.ENOENT, errno.EACCES):
9178 - pass
9179 - else:
9180 - writemsg(_("!!! Error loading '%s': %s\n") % \
9181 - (self._aux_cache_filename, e), noiselevel=-1)
9182 - del e
9183 -
9184 - if not aux_cache or \
9185 - not isinstance(aux_cache, dict) or \
9186 - aux_cache.get("version") != self._aux_cache_version or \
9187 - not aux_cache.get("packages"):
9188 - aux_cache = {"version": self._aux_cache_version}
9189 - aux_cache["packages"] = {}
9190 -
9191 - owners = aux_cache.get("owners")
9192 - if owners is not None:
9193 - if not isinstance(owners, dict):
9194 - owners = None
9195 - elif "version" not in owners:
9196 - owners = None
9197 - elif owners["version"] != self._owners_cache_version:
9198 - owners = None
9199 - elif "base_names" not in owners:
9200 - owners = None
9201 - elif not isinstance(owners["base_names"], dict):
9202 - owners = None
9203 -
9204 - if owners is None:
9205 - owners = {
9206 - "base_names" : {},
9207 - "version" : self._owners_cache_version
9208 - }
9209 - aux_cache["owners"] = owners
9210 -
9211 - aux_cache["modified"] = set()
9212 - self._aux_cache_obj = aux_cache
9213 -
9214 - def aux_get(self, mycpv, wants, myrepo = None):
9215 - """This automatically caches selected keys that are frequently needed
9216 - by emerge for dependency calculations. The cached metadata is
9217 - considered valid if the mtime of the package directory has not changed
9218 - since the data was cached. The cache is stored in a pickled dict
9219 - object with the following format:
9220 -
9221 - {version:"1", "packages":{cpv1:(mtime,{k1,v1, k2,v2, ...}), cpv2...}}
9222 -
9223 - If an error occurs while loading the cache pickle or the version is
9224 - unrecognized, the cache will simple be recreated from scratch (it is
9225 - completely disposable).
9226 - """
9227 - cache_these_wants = self._aux_cache_keys.intersection(wants)
9228 - for x in wants:
9229 - if self._aux_cache_keys_re.match(x) is not None:
9230 - cache_these_wants.add(x)
9231 -
9232 - if not cache_these_wants:
9233 - mydata = self._aux_get(mycpv, wants)
9234 - return [mydata[x] for x in wants]
9235 -
9236 - cache_these = set(self._aux_cache_keys)
9237 - cache_these.update(cache_these_wants)
9238 -
9239 - mydir = self.getpath(mycpv)
9240 - mydir_stat = None
9241 - try:
9242 - mydir_stat = os.stat(mydir)
9243 - except OSError as e:
9244 - if e.errno != errno.ENOENT:
9245 - raise
9246 - raise KeyError(mycpv)
9247 - # Use float mtime when available.
9248 - mydir_mtime = mydir_stat.st_mtime
9249 - pkg_data = self._aux_cache["packages"].get(mycpv)
9250 - pull_me = cache_these.union(wants)
9251 - mydata = {"_mtime_" : mydir_mtime}
9252 - cache_valid = False
9253 - cache_incomplete = False
9254 - cache_mtime = None
9255 - metadata = None
9256 - if pkg_data is not None:
9257 - if not isinstance(pkg_data, tuple) or len(pkg_data) != 2:
9258 - pkg_data = None
9259 - else:
9260 - cache_mtime, metadata = pkg_data
9261 - if not isinstance(cache_mtime, (float, int)) or \
9262 - not isinstance(metadata, dict):
9263 - pkg_data = None
9264 -
9265 - if pkg_data:
9266 - cache_mtime, metadata = pkg_data
9267 - if isinstance(cache_mtime, float):
9268 - if cache_mtime == mydir_stat.st_mtime:
9269 - cache_valid = True
9270 -
9271 - # Handle truncated mtime in order to avoid cache
9272 - # invalidation for livecd squashfs (bug 564222).
9273 - elif int(cache_mtime) == mydir_stat.st_mtime:
9274 - cache_valid = True
9275 - else:
9276 - # Cache may contain integer mtime.
9277 - cache_valid = cache_mtime == mydir_stat[stat.ST_MTIME]
9278 -
9279 - if cache_valid:
9280 - # Migrate old metadata to unicode.
9281 - for k, v in metadata.items():
9282 - metadata[k] = _unicode_decode(v,
9283 - encoding=_encodings['repo.content'], errors='replace')
9284 -
9285 - mydata.update(metadata)
9286 - pull_me.difference_update(mydata)
9287 -
9288 - if pull_me:
9289 - # pull any needed data and cache it
9290 - aux_keys = list(pull_me)
9291 - mydata.update(self._aux_get(mycpv, aux_keys, st=mydir_stat))
9292 - if not cache_valid or cache_these.difference(metadata):
9293 - cache_data = {}
9294 - if cache_valid and metadata:
9295 - cache_data.update(metadata)
9296 - for aux_key in cache_these:
9297 - cache_data[aux_key] = mydata[aux_key]
9298 - self._aux_cache["packages"][str(mycpv)] = \
9299 - (mydir_mtime, cache_data)
9300 - self._aux_cache["modified"].add(mycpv)
9301 -
9302 - eapi_attrs = _get_eapi_attrs(mydata['EAPI'])
9303 - if _get_slot_re(eapi_attrs).match(mydata['SLOT']) is None:
9304 - # Empty or invalid slot triggers InvalidAtom exceptions when
9305 - # generating slot atoms for packages, so translate it to '0' here.
9306 - mydata['SLOT'] = '0'
9307 -
9308 - return [mydata[x] for x in wants]
9309 -
9310 - def _aux_get(self, mycpv, wants, st=None):
9311 - mydir = self.getpath(mycpv)
9312 - if st is None:
9313 - try:
9314 - st = os.stat(mydir)
9315 - except OSError as e:
9316 - if e.errno == errno.ENOENT:
9317 - raise KeyError(mycpv)
9318 - elif e.errno == PermissionDenied.errno:
9319 - raise PermissionDenied(mydir)
9320 - else:
9321 - raise
9322 - if not stat.S_ISDIR(st.st_mode):
9323 - raise KeyError(mycpv)
9324 - results = {}
9325 - env_keys = []
9326 - for x in wants:
9327 - if x == "_mtime_":
9328 - results[x] = st[stat.ST_MTIME]
9329 - continue
9330 - try:
9331 - with io.open(
9332 - _unicode_encode(os.path.join(mydir, x),
9333 - encoding=_encodings['fs'], errors='strict'),
9334 - mode='r', encoding=_encodings['repo.content'],
9335 - errors='replace') as f:
9336 - myd = f.read()
9337 - except IOError:
9338 - if x not in self._aux_cache_keys and \
9339 - self._aux_cache_keys_re.match(x) is None:
9340 - env_keys.append(x)
9341 - continue
9342 - myd = ''
9343 -
9344 - # Preserve \n for metadata that is known to
9345 - # contain multiple lines.
9346 - if self._aux_multi_line_re.match(x) is None:
9347 - myd = " ".join(myd.split())
9348 -
9349 - results[x] = myd
9350 -
9351 - if env_keys:
9352 - env_results = self._aux_env_search(mycpv, env_keys)
9353 - for k in env_keys:
9354 - v = env_results.get(k)
9355 - if v is None:
9356 - v = ''
9357 - if self._aux_multi_line_re.match(k) is None:
9358 - v = " ".join(v.split())
9359 - results[k] = v
9360 -
9361 - if results.get("EAPI") == "":
9362 - results["EAPI"] = '0'
9363 -
9364 - return results
9365 -
9366 - def _aux_env_search(self, cpv, variables):
9367 - """
9368 - Search environment.bz2 for the specified variables. Returns
9369 - a dict mapping variables to values, and any variables not
9370 - found in the environment will not be included in the dict.
9371 - This is useful for querying variables like ${SRC_URI} and
9372 - ${A}, which are not saved in separate files but are available
9373 - in environment.bz2 (see bug #395463).
9374 - """
9375 - env_file = self.getpath(cpv, filename="environment.bz2")
9376 - if not os.path.isfile(env_file):
9377 - return {}
9378 - bunzip2_cmd = portage.util.shlex_split(
9379 - self.settings.get("PORTAGE_BUNZIP2_COMMAND", ""))
9380 - if not bunzip2_cmd:
9381 - bunzip2_cmd = portage.util.shlex_split(
9382 - self.settings["PORTAGE_BZIP2_COMMAND"])
9383 - bunzip2_cmd.append("-d")
9384 - args = bunzip2_cmd + ["-c", env_file]
9385 - try:
9386 - proc = subprocess.Popen(args, stdout=subprocess.PIPE)
9387 - except EnvironmentError as e:
9388 - if e.errno != errno.ENOENT:
9389 - raise
9390 - raise portage.exception.CommandNotFound(args[0])
9391 -
9392 - # Parts of the following code are borrowed from
9393 - # filter-bash-environment.py (keep them in sync).
9394 - var_assign_re = re.compile(r'(^|^declare\s+-\S+\s+|^declare\s+|^export\s+)([^=\s]+)=("|\')?(.*)$')
9395 - close_quote_re = re.compile(r'(\\"|"|\')\s*$')
9396 - def have_end_quote(quote, line):
9397 - close_quote_match = close_quote_re.search(line)
9398 - return close_quote_match is not None and \
9399 - close_quote_match.group(1) == quote
9400 -
9401 - variables = frozenset(variables)
9402 - results = {}
9403 - for line in proc.stdout:
9404 - line = _unicode_decode(line,
9405 - encoding=_encodings['content'], errors='replace')
9406 - var_assign_match = var_assign_re.match(line)
9407 - if var_assign_match is not None:
9408 - key = var_assign_match.group(2)
9409 - quote = var_assign_match.group(3)
9410 - if quote is not None:
9411 - if have_end_quote(quote,
9412 - line[var_assign_match.end(2)+2:]):
9413 - value = var_assign_match.group(4)
9414 - else:
9415 - value = [var_assign_match.group(4)]
9416 - for line in proc.stdout:
9417 - line = _unicode_decode(line,
9418 - encoding=_encodings['content'],
9419 - errors='replace')
9420 - value.append(line)
9421 - if have_end_quote(quote, line):
9422 - break
9423 - value = ''.join(value)
9424 - # remove trailing quote and whitespace
9425 - value = value.rstrip()[:-1]
9426 - else:
9427 - value = var_assign_match.group(4).rstrip()
9428 -
9429 - if key in variables:
9430 - results[key] = value
9431 -
9432 - proc.wait()
9433 - proc.stdout.close()
9434 - return results
9435 -
9436 - def aux_update(self, cpv, values):
9437 - mylink = self._dblink(cpv)
9438 - if not mylink.exists():
9439 - raise KeyError(cpv)
9440 - self._bump_mtime(cpv)
9441 - self._clear_pkg_cache(mylink)
9442 - for k, v in values.items():
9443 - if v:
9444 - mylink.setfile(k, v)
9445 - else:
9446 - try:
9447 - os.unlink(os.path.join(self.getpath(cpv), k))
9448 - except EnvironmentError:
9449 - pass
9450 - self._bump_mtime(cpv)
9451 -
9452 - @coroutine
9453 - def unpack_metadata(self, pkg, dest_dir, loop=None):
9454 - """
9455 - Unpack package metadata to a directory. This method is a coroutine.
9456 -
9457 - @param pkg: package to unpack
9458 - @type pkg: _pkg_str or portage.config
9459 - @param dest_dir: destination directory
9460 - @type dest_dir: str
9461 - """
9462 - loop = asyncio._wrap_loop(loop)
9463 - if not isinstance(pkg, portage.config):
9464 - cpv = pkg
9465 - else:
9466 - cpv = pkg.mycpv
9467 - dbdir = self.getpath(cpv)
9468 - def async_copy():
9469 - for parent, dirs, files in os.walk(dbdir, onerror=_raise_exc):
9470 - for key in files:
9471 - shutil.copy(os.path.join(parent, key),
9472 - os.path.join(dest_dir, key))
9473 - break
9474 - yield loop.run_in_executor(ForkExecutor(loop=loop), async_copy)
9475 -
9476 - @coroutine
9477 - def unpack_contents(self, pkg, dest_dir,
9478 - include_config=None, include_unmodified_config=None, loop=None):
9479 - """
9480 - Unpack package contents to a directory. This method is a coroutine.
9481 -
9482 - This copies files from the installed system, in the same way
9483 - as the quickpkg(1) command. Default behavior for handling
9484 - of protected configuration files is controlled by the
9485 - QUICKPKG_DEFAULT_OPTS variable. The relevant quickpkg options
9486 - are --include-config and --include-unmodified-config. When
9487 - a configuration file is not included because it is protected,
9488 - an ewarn message is logged.
9489 -
9490 - @param pkg: package to unpack
9491 - @type pkg: _pkg_str or portage.config
9492 - @param dest_dir: destination directory
9493 - @type dest_dir: str
9494 - @param include_config: Include all files protected by
9495 - CONFIG_PROTECT (as a security precaution, default is False
9496 - unless modified by QUICKPKG_DEFAULT_OPTS).
9497 - @type include_config: bool
9498 - @param include_unmodified_config: Include files protected by
9499 - CONFIG_PROTECT that have not been modified since installation
9500 - (as a security precaution, default is False unless modified
9501 - by QUICKPKG_DEFAULT_OPTS).
9502 - @type include_unmodified_config: bool
9503 - """
9504 - loop = asyncio._wrap_loop(loop)
9505 - if not isinstance(pkg, portage.config):
9506 - settings = self.settings
9507 - cpv = pkg
9508 - else:
9509 - settings = pkg
9510 - cpv = settings.mycpv
9511 -
9512 - scheduler = SchedulerInterface(loop)
9513 - parser = argparse.ArgumentParser()
9514 - parser.add_argument('--include-config',
9515 - choices=('y', 'n'),
9516 - default='n')
9517 - parser.add_argument('--include-unmodified-config',
9518 - choices=('y', 'n'),
9519 - default='n')
9520 -
9521 - # Method parameters may override QUICKPKG_DEFAULT_OPTS.
9522 - opts_list = portage.util.shlex_split(settings.get('QUICKPKG_DEFAULT_OPTS', ''))
9523 - if include_config is not None:
9524 - opts_list.append('--include-config={}'.format(
9525 - 'y' if include_config else 'n'))
9526 - if include_unmodified_config is not None:
9527 - opts_list.append('--include-unmodified-config={}'.format(
9528 - 'y' if include_unmodified_config else 'n'))
9529 -
9530 - opts, args = parser.parse_known_args(opts_list)
9531 -
9532 - tar_cmd = ('tar', '-x', '--xattrs', '--xattrs-include=*', '-C', dest_dir)
9533 - pr, pw = os.pipe()
9534 - proc = (yield asyncio.create_subprocess_exec(*tar_cmd, stdin=pr))
9535 - os.close(pr)
9536 - with os.fdopen(pw, 'wb', 0) as pw_file:
9537 - excluded_config_files = (yield loop.run_in_executor(ForkExecutor(loop=loop),
9538 - functools.partial(self._dblink(cpv).quickpkg,
9539 - pw_file,
9540 - include_config=opts.include_config == 'y',
9541 - include_unmodified_config=opts.include_unmodified_config == 'y')))
9542 - yield proc.wait()
9543 - if proc.returncode != os.EX_OK:
9544 - raise PortageException('command failed: {}'.format(tar_cmd))
9545 -
9546 - if excluded_config_files:
9547 - log_lines = ([_("Config files excluded by QUICKPKG_DEFAULT_OPTS (see quickpkg(1) man page):")] +
9548 - ['\t{}'.format(name) for name in excluded_config_files])
9549 - out = io.StringIO()
9550 - for line in log_lines:
9551 - portage.elog.messages.ewarn(line, phase='install', key=cpv, out=out)
9552 - scheduler.output(out.getvalue(),
9553 - background=self.settings.get("PORTAGE_BACKGROUND") == "1",
9554 - log_path=settings.get("PORTAGE_LOG_FILE"))
9555 -
9556 - def counter_tick(self, myroot=None, mycpv=None):
9557 - """
9558 - @param myroot: ignored, self._eroot is used instead
9559 - """
9560 - return self.counter_tick_core(incrementing=1, mycpv=mycpv)
9561 -
9562 - def get_counter_tick_core(self, myroot=None, mycpv=None):
9563 - """
9564 - Use this method to retrieve the counter instead
9565 - of having to trust the value of a global counter
9566 - file that can lead to invalid COUNTER
9567 - generation. When cache is valid, the package COUNTER
9568 - files are not read and we rely on the timestamp of
9569 - the package directory to validate cache. The stat
9570 - calls should only take a short time, so performance
9571 - is sufficient without having to rely on a potentially
9572 - corrupt global counter file.
9573 -
9574 - The global counter file located at
9575 - $CACHE_PATH/counter serves to record the
9576 - counter of the last installed package and
9577 - it also corresponds to the total number of
9578 - installation actions that have occurred in
9579 - the history of this package database.
9580 -
9581 - @param myroot: ignored, self._eroot is used instead
9582 - """
9583 - del myroot
9584 - counter = -1
9585 - try:
9586 - with io.open(
9587 - _unicode_encode(self._counter_path,
9588 - encoding=_encodings['fs'], errors='strict'),
9589 - mode='r', encoding=_encodings['repo.content'],
9590 - errors='replace') as f:
9591 - try:
9592 - counter = int(f.readline().strip())
9593 - except (OverflowError, ValueError) as e:
9594 - writemsg(_("!!! COUNTER file is corrupt: '%s'\n") %
9595 - self._counter_path, noiselevel=-1)
9596 - writemsg("!!! %s\n" % (e,), noiselevel=-1)
9597 - except EnvironmentError as e:
9598 - # Silently allow ENOENT since files under
9599 - # /var/cache/ are allowed to disappear.
9600 - if e.errno != errno.ENOENT:
9601 - writemsg(_("!!! Unable to read COUNTER file: '%s'\n") % \
9602 - self._counter_path, noiselevel=-1)
9603 - writemsg("!!! %s\n" % str(e), noiselevel=-1)
9604 - del e
9605 -
9606 - if self._cached_counter == counter:
9607 - max_counter = counter
9608 - else:
9609 - # We must ensure that we return a counter
9610 - # value that is at least as large as the
9611 - # highest one from the installed packages,
9612 - # since having a corrupt value that is too low
9613 - # can trigger incorrect AUTOCLEAN behavior due
9614 - # to newly installed packages having lower
9615 - # COUNTERs than the previous version in the
9616 - # same slot.
9617 - max_counter = counter
9618 - for cpv in self.cpv_all():
9619 - try:
9620 - pkg_counter = int(self.aux_get(cpv, ["COUNTER"])[0])
9621 - except (KeyError, OverflowError, ValueError):
9622 - continue
9623 - if pkg_counter > max_counter:
9624 - max_counter = pkg_counter
9625 -
9626 - return max_counter + 1
9627 -
9628 - def counter_tick_core(self, myroot=None, incrementing=1, mycpv=None):
9629 - """
9630 - This method will grab the next COUNTER value and record it back
9631 - to the global file. Note that every package install must have
9632 - a unique counter, since a slotmove update can move two packages
9633 - into the same SLOT and in that case it's important that both
9634 - packages have different COUNTER metadata.
9635 -
9636 - @param myroot: ignored, self._eroot is used instead
9637 - @param mycpv: ignored
9638 - @rtype: int
9639 - @return: new counter value
9640 - """
9641 - myroot = None
9642 - mycpv = None
9643 - self.lock()
9644 - try:
9645 - counter = self.get_counter_tick_core() - 1
9646 - if incrementing:
9647 - #increment counter
9648 - counter += 1
9649 - # update new global counter file
9650 - try:
9651 - write_atomic(self._counter_path, str(counter))
9652 - except InvalidLocation:
9653 - self.settings._init_dirs()
9654 - write_atomic(self._counter_path, str(counter))
9655 - self._cached_counter = counter
9656 -
9657 - # Since we hold a lock, this is a good opportunity
9658 - # to flush the cache. Note that this will only
9659 - # flush the cache periodically in the main process
9660 - # when _aux_cache_threshold is exceeded.
9661 - self.flush_cache()
9662 - finally:
9663 - self.unlock()
9664 -
9665 - return counter
9666 -
9667 - def _dblink(self, cpv):
9668 - category, pf = catsplit(cpv)
9669 - return dblink(category, pf, settings=self.settings,
9670 - vartree=self.vartree, treetype="vartree")
9671 -
9672 - def removeFromContents(self, pkg, paths, relative_paths=True):
9673 - """
9674 - @param pkg: cpv for an installed package
9675 - @type pkg: string
9676 - @param paths: paths of files to remove from contents
9677 - @type paths: iterable
9678 - """
9679 - if not hasattr(pkg, "getcontents"):
9680 - pkg = self._dblink(pkg)
9681 - root = self.settings['ROOT']
9682 - root_len = len(root) - 1
9683 - new_contents = pkg.getcontents().copy()
9684 - removed = 0
9685 -
9686 - for filename in paths:
9687 - filename = _unicode_decode(filename,
9688 - encoding=_encodings['content'], errors='strict')
9689 - filename = normalize_path(filename)
9690 - if relative_paths:
9691 - relative_filename = filename
9692 - else:
9693 - relative_filename = filename[root_len:]
9694 - contents_key = pkg._match_contents(relative_filename)
9695 - if contents_key:
9696 - # It's possible for two different paths to refer to the same
9697 - # contents_key, due to directory symlinks. Therefore, pass a
9698 - # default value to pop, in order to avoid a KeyError which
9699 - # could otherwise be triggered (see bug #454400).
9700 - new_contents.pop(contents_key, None)
9701 - removed += 1
9702 -
9703 - if removed:
9704 - # Also remove corresponding NEEDED lines, so that they do
9705 - # no corrupt LinkageMap data for preserve-libs.
9706 - needed_filename = os.path.join(pkg.dbdir, LinkageMap._needed_aux_key)
9707 - new_needed = None
9708 - try:
9709 - with io.open(_unicode_encode(needed_filename,
9710 - encoding=_encodings['fs'], errors='strict'),
9711 - mode='r', encoding=_encodings['repo.content'],
9712 - errors='replace') as f:
9713 - needed_lines = f.readlines()
9714 - except IOError as e:
9715 - if e.errno not in (errno.ENOENT, errno.ESTALE):
9716 - raise
9717 - else:
9718 - new_needed = []
9719 - for l in needed_lines:
9720 - l = l.rstrip("\n")
9721 - if not l:
9722 - continue
9723 - try:
9724 - entry = NeededEntry.parse(needed_filename, l)
9725 - except InvalidData as e:
9726 - writemsg_level("\n%s\n\n" % (e,),
9727 - level=logging.ERROR, noiselevel=-1)
9728 - continue
9729 -
9730 - filename = os.path.join(root, entry.filename.lstrip(os.sep))
9731 - if filename in new_contents:
9732 - new_needed.append(entry)
9733 -
9734 - self.writeContentsToContentsFile(pkg, new_contents, new_needed=new_needed)
9735 -
9736 - def writeContentsToContentsFile(self, pkg, new_contents, new_needed=None):
9737 - """
9738 - @param pkg: package to write contents file for
9739 - @type pkg: dblink
9740 - @param new_contents: contents to write to CONTENTS file
9741 - @type new_contents: contents dictionary of the form
9742 - {u'/path/to/file' : (contents_attribute 1, ...), ...}
9743 - @param new_needed: new NEEDED entries
9744 - @type new_needed: list of NeededEntry
9745 - """
9746 - root = self.settings['ROOT']
9747 - self._bump_mtime(pkg.mycpv)
9748 - if new_needed is not None:
9749 - f = atomic_ofstream(os.path.join(pkg.dbdir, LinkageMap._needed_aux_key))
9750 - for entry in new_needed:
9751 - f.write(str(entry))
9752 - f.close()
9753 - f = atomic_ofstream(os.path.join(pkg.dbdir, "CONTENTS"))
9754 - write_contents(new_contents, root, f)
9755 - f.close()
9756 - self._bump_mtime(pkg.mycpv)
9757 - pkg._clear_contents_cache()
9758 -
9759 - class _owners_cache:
9760 - """
9761 - This class maintains an hash table that serves to index package
9762 - contents by mapping the basename of file to a list of possible
9763 - packages that own it. This is used to optimize owner lookups
9764 - by narrowing the search down to a smaller number of packages.
9765 - """
9766 - _new_hash = md5
9767 - _hash_bits = 16
9768 - _hex_chars = _hash_bits // 4
9769 -
9770 - def __init__(self, vardb):
9771 - self._vardb = vardb
9772 -
9773 - def add(self, cpv):
9774 - eroot_len = len(self._vardb._eroot)
9775 - pkg_hash = self._hash_pkg(cpv)
9776 - db = self._vardb._dblink(cpv)
9777 - if not db.getcontents():
9778 - # Empty path is a code used to represent empty contents.
9779 - self._add_path("", pkg_hash)
9780 -
9781 - for x in db._contents.keys():
9782 - self._add_path(x[eroot_len:], pkg_hash)
9783 -
9784 - self._vardb._aux_cache["modified"].add(cpv)
9785 -
9786 - def _add_path(self, path, pkg_hash):
9787 - """
9788 - Empty path is a code that represents empty contents.
9789 - """
9790 - if path:
9791 - name = os.path.basename(path.rstrip(os.path.sep))
9792 - if not name:
9793 - return
9794 - else:
9795 - name = path
9796 - name_hash = self._hash_str(name)
9797 - base_names = self._vardb._aux_cache["owners"]["base_names"]
9798 - pkgs = base_names.get(name_hash)
9799 - if pkgs is None:
9800 - pkgs = {}
9801 - base_names[name_hash] = pkgs
9802 - pkgs[pkg_hash] = None
9803 -
9804 - def _hash_str(self, s):
9805 - h = self._new_hash()
9806 - # Always use a constant utf_8 encoding here, since
9807 - # the "default" encoding can change.
9808 - h.update(_unicode_encode(s,
9809 - encoding=_encodings['repo.content'],
9810 - errors='backslashreplace'))
9811 - h = h.hexdigest()
9812 - h = h[-self._hex_chars:]
9813 - h = int(h, 16)
9814 - return h
9815 -
9816 - def _hash_pkg(self, cpv):
9817 - counter, mtime = self._vardb.aux_get(
9818 - cpv, ["COUNTER", "_mtime_"])
9819 - try:
9820 - counter = int(counter)
9821 - except ValueError:
9822 - counter = 0
9823 - return (str(cpv), counter, mtime)
9824 -
9825 - class _owners_db:
9826 -
9827 - def __init__(self, vardb):
9828 - self._vardb = vardb
9829 -
9830 - def populate(self):
9831 - self._populate()
9832 -
9833 - def _populate(self):
9834 - owners_cache = vardbapi._owners_cache(self._vardb)
9835 - cached_hashes = set()
9836 - base_names = self._vardb._aux_cache["owners"]["base_names"]
9837 -
9838 - # Take inventory of all cached package hashes.
9839 - for name, hash_values in list(base_names.items()):
9840 - if not isinstance(hash_values, dict):
9841 - del base_names[name]
9842 - continue
9843 - cached_hashes.update(hash_values)
9844 -
9845 - # Create sets of valid package hashes and uncached packages.
9846 - uncached_pkgs = set()
9847 - hash_pkg = owners_cache._hash_pkg
9848 - valid_pkg_hashes = set()
9849 - for cpv in self._vardb.cpv_all():
9850 - hash_value = hash_pkg(cpv)
9851 - valid_pkg_hashes.add(hash_value)
9852 - if hash_value not in cached_hashes:
9853 - uncached_pkgs.add(cpv)
9854 -
9855 - # Cache any missing packages.
9856 - for cpv in uncached_pkgs:
9857 - owners_cache.add(cpv)
9858 -
9859 - # Delete any stale cache.
9860 - stale_hashes = cached_hashes.difference(valid_pkg_hashes)
9861 - if stale_hashes:
9862 - for base_name_hash, bucket in list(base_names.items()):
9863 - for hash_value in stale_hashes.intersection(bucket):
9864 - del bucket[hash_value]
9865 - if not bucket:
9866 - del base_names[base_name_hash]
9867 -
9868 - return owners_cache
9869 -
9870 - def get_owners(self, path_iter):
9871 - """
9872 - @return the owners as a dblink -> set(files) mapping.
9873 - """
9874 - owners = {}
9875 - for owner, f in self.iter_owners(path_iter):
9876 - owned_files = owners.get(owner)
9877 - if owned_files is None:
9878 - owned_files = set()
9879 - owners[owner] = owned_files
9880 - owned_files.add(f)
9881 - return owners
9882 -
9883 - def getFileOwnerMap(self, path_iter):
9884 - owners = self.get_owners(path_iter)
9885 - file_owners = {}
9886 - for pkg_dblink, files in owners.items():
9887 - for f in files:
9888 - owner_set = file_owners.get(f)
9889 - if owner_set is None:
9890 - owner_set = set()
9891 - file_owners[f] = owner_set
9892 - owner_set.add(pkg_dblink)
9893 - return file_owners
9894 -
9895 - def iter_owners(self, path_iter):
9896 - """
9897 - Iterate over tuples of (dblink, path). In order to avoid
9898 - consuming too many resources for too much time, resources
9899 - are only allocated for the duration of a given iter_owners()
9900 - call. Therefore, to maximize reuse of resources when searching
9901 - for multiple files, it's best to search for them all in a single
9902 - call.
9903 - """
9904 -
9905 - if not isinstance(path_iter, list):
9906 - path_iter = list(path_iter)
9907 - owners_cache = self._populate()
9908 - vardb = self._vardb
9909 - root = vardb._eroot
9910 - hash_pkg = owners_cache._hash_pkg
9911 - hash_str = owners_cache._hash_str
9912 - base_names = self._vardb._aux_cache["owners"]["base_names"]
9913 - case_insensitive = "case-insensitive-fs" \
9914 - in vardb.settings.features
9915 -
9916 - dblink_cache = {}
9917 -
9918 - def dblink(cpv):
9919 - x = dblink_cache.get(cpv)
9920 - if x is None:
9921 - if len(dblink_cache) > 20:
9922 - # Ensure that we don't run out of memory.
9923 - raise StopIteration()
9924 - x = self._vardb._dblink(cpv)
9925 - dblink_cache[cpv] = x
9926 - return x
9927 -
9928 - while path_iter:
9929 -
9930 - path = path_iter.pop()
9931 - if case_insensitive:
9932 - path = path.lower()
9933 - is_basename = os.sep != path[:1]
9934 - if is_basename:
9935 - name = path
9936 - else:
9937 - name = os.path.basename(path.rstrip(os.path.sep))
9938 -
9939 - if not name:
9940 - continue
9941 -
9942 - name_hash = hash_str(name)
9943 - pkgs = base_names.get(name_hash)
9944 - owners = []
9945 - if pkgs is not None:
9946 - try:
9947 - for hash_value in pkgs:
9948 - if not isinstance(hash_value, tuple) or \
9949 - len(hash_value) != 3:
9950 - continue
9951 - cpv, counter, mtime = hash_value
9952 - if not isinstance(cpv, str):
9953 - continue
9954 - try:
9955 - current_hash = hash_pkg(cpv)
9956 - except KeyError:
9957 - continue
9958 -
9959 - if current_hash != hash_value:
9960 - continue
9961 -
9962 - if is_basename:
9963 - for p in dblink(cpv)._contents.keys():
9964 - if os.path.basename(p) == name:
9965 - owners.append((cpv, dblink(cpv).
9966 - _contents.unmap_key(
9967 - p)[len(root):]))
9968 - else:
9969 - key = dblink(cpv)._match_contents(path)
9970 - if key is not False:
9971 - owners.append(
9972 - (cpv, key[len(root):]))
9973 -
9974 - except StopIteration:
9975 - path_iter.append(path)
9976 - del owners[:]
9977 - dblink_cache.clear()
9978 - gc.collect()
9979 - for x in self._iter_owners_low_mem(path_iter):
9980 - yield x
9981 - return
9982 - else:
9983 - for cpv, p in owners:
9984 - yield (dblink(cpv), p)
9985 -
9986 - def _iter_owners_low_mem(self, path_list):
9987 - """
9988 - This implemention will make a short-lived dblink instance (and
9989 - parse CONTENTS) for every single installed package. This is
9990 - slower and but uses less memory than the method which uses the
9991 - basename cache.
9992 - """
9993 -
9994 - if not path_list:
9995 - return
9996 -
9997 - case_insensitive = "case-insensitive-fs" \
9998 - in self._vardb.settings.features
9999 - path_info_list = []
10000 - for path in path_list:
10001 - if case_insensitive:
10002 - path = path.lower()
10003 - is_basename = os.sep != path[:1]
10004 - if is_basename:
10005 - name = path
10006 - else:
10007 - name = os.path.basename(path.rstrip(os.path.sep))
10008 - path_info_list.append((path, name, is_basename))
10009 -
10010 - # Do work via the global event loop, so that it can be used
10011 - # for indication of progress during the search (bug #461412).
10012 - event_loop = asyncio._safe_loop()
10013 - root = self._vardb._eroot
10014 -
10015 - def search_pkg(cpv, search_future):
10016 - dblnk = self._vardb._dblink(cpv)
10017 - results = []
10018 - for path, name, is_basename in path_info_list:
10019 - if is_basename:
10020 - for p in dblnk._contents.keys():
10021 - if os.path.basename(p) == name:
10022 - results.append((dblnk,
10023 - dblnk._contents.unmap_key(
10024 - p)[len(root):]))
10025 - else:
10026 - key = dblnk._match_contents(path)
10027 - if key is not False:
10028 - results.append(
10029 - (dblnk, key[len(root):]))
10030 - search_future.set_result(results)
10031 -
10032 - for cpv in self._vardb.cpv_all():
10033 - search_future = event_loop.create_future()
10034 - event_loop.call_soon(search_pkg, cpv, search_future)
10035 - event_loop.run_until_complete(search_future)
10036 - for result in search_future.result():
10037 - yield result
10038 + _excluded_dirs = ["CVS", "lost+found"]
10039 + _excluded_dirs = [re.escape(x) for x in _excluded_dirs]
10040 + _excluded_dirs = re.compile(
10041 + r"^(\..*|" + MERGING_IDENTIFIER + ".*|" + "|".join(_excluded_dirs) + r")$"
10042 + )
10043 +
10044 + _aux_cache_version = "1"
10045 + _owners_cache_version = "1"
10046 +
10047 + # Number of uncached packages to trigger cache update, since
10048 + # it's wasteful to update it for every vdb change.
10049 + _aux_cache_threshold = 5
10050 +
10051 + _aux_cache_keys_re = re.compile(r"^NEEDED\..*$")
10052 + _aux_multi_line_re = re.compile(r"^(CONTENTS|NEEDED\..*)$")
10053 + _pkg_str_aux_keys = dbapi._pkg_str_aux_keys + ("BUILD_ID", "BUILD_TIME", "_mtime_")
10054 +
10055 + def __init__(
10056 + self,
10057 + _unused_param=DeprecationWarning,
10058 + categories=None,
10059 + settings=None,
10060 + vartree=None,
10061 + ):
10062 + """
10063 + The categories parameter is unused since the dbapi class
10064 + now has a categories property that is generated from the
10065 + available packages.
10066 + """
10067 +
10068 + # Used by emerge to check whether any packages
10069 + # have been added or removed.
10070 + self._pkgs_changed = False
10071 +
10072 + # The _aux_cache_threshold doesn't work as designed
10073 + # if the cache is flushed from a subprocess, so we
10074 + # use this to avoid waste vdb cache updates.
10075 + self._flush_cache_enabled = True
10076 +
10077 + # cache for category directory mtimes
10078 + self.mtdircache = {}
10079 +
10080 + # cache for dependency checks
10081 + self.matchcache = {}
10082 +
10083 + # cache for cp_list results
10084 + self.cpcache = {}
10085 +
10086 + self.blockers = None
10087 + if settings is None:
10088 + settings = portage.settings
10089 + self.settings = settings
10090 +
10091 + if _unused_param is not DeprecationWarning:
10092 + warnings.warn(
10093 + "The first parameter of the "
10094 + "portage.dbapi.vartree.vardbapi"
10095 + " constructor is now unused. Instead "
10096 + "settings['ROOT'] is used.",
10097 + DeprecationWarning,
10098 + stacklevel=2,
10099 + )
10100 +
10101 + self._eroot = settings["EROOT"]
10102 + self._dbroot = self._eroot + VDB_PATH
10103 + self._lock = None
10104 + self._lock_count = 0
10105 +
10106 + self._conf_mem_file = self._eroot + CONFIG_MEMORY_FILE
10107 + self._fs_lock_obj = None
10108 + self._fs_lock_count = 0
10109 + self._slot_locks = {}
10110 +
10111 + if vartree is None:
10112 + vartree = portage.db[settings["EROOT"]]["vartree"]
10113 + self.vartree = vartree
10114 + self._aux_cache_keys = set(
10115 + [
10116 + "BDEPEND",
10117 + "BUILD_TIME",
10118 + "CHOST",
10119 + "COUNTER",
10120 + "DEPEND",
10121 + "DESCRIPTION",
10122 + "EAPI",
10123 + "HOMEPAGE",
10124 + "BUILD_ID",
10125 + "IDEPEND",
10126 + "IUSE",
10127 + "KEYWORDS",
10128 + "LICENSE",
10129 + "PDEPEND",
10130 + "PROPERTIES",
10131 + "RDEPEND",
10132 + "repository",
10133 + "RESTRICT",
10134 + "SLOT",
10135 + "USE",
10136 + "DEFINED_PHASES",
10137 + "PROVIDES",
10138 + "REQUIRES",
10139 + ]
10140 + )
10141 + self._aux_cache_obj = None
10142 + self._aux_cache_filename = os.path.join(
10143 + self._eroot, CACHE_PATH, "vdb_metadata.pickle"
10144 + )
10145 + self._cache_delta_filename = os.path.join(
10146 + self._eroot, CACHE_PATH, "vdb_metadata_delta.json"
10147 + )
10148 + self._cache_delta = VdbMetadataDelta(self)
10149 + self._counter_path = os.path.join(self._eroot, CACHE_PATH, "counter")
10150 +
10151 + self._plib_registry = PreservedLibsRegistry(
10152 + settings["ROOT"],
10153 + os.path.join(self._eroot, PRIVATE_PATH, "preserved_libs_registry"),
10154 + )
10155 - self._linkmap = LinkageMap(self)
10156 ++ chost = self.settings.get('CHOST')
10157 ++ if not chost:
10158 ++ chost = 'lunix?' # this happens when profiles are not available
10159 ++ if chost.find('darwin') >= 0:
10160 ++ self._linkmap = LinkageMapMachO(self)
10161 ++ elif chost.find('interix') >= 0 or chost.find('winnt') >= 0:
10162 ++ self._linkmap = LinkageMapPeCoff(self)
10163 ++ elif chost.find('aix') >= 0:
10164 ++ self._linkmap = LinkageMapXCoff(self)
10165 ++ else:
10166 ++ self._linkmap = LinkageMap(self)
10167 + self._owners = self._owners_db(self)
10168 +
10169 + self._cached_counter = None
10170 +
10171 + @property
10172 + def writable(self):
10173 + """
10174 + Check if var/db/pkg is writable, or permissions are sufficient
10175 + to create it if it does not exist yet.
10176 + @rtype: bool
10177 + @return: True if var/db/pkg is writable or can be created,
10178 + False otherwise
10179 + """
10180 + return os.access(first_existing(self._dbroot), os.W_OK)
10181 +
10182 + @property
10183 + def root(self):
10184 + warnings.warn(
10185 + "The root attribute of "
10186 + "portage.dbapi.vartree.vardbapi"
10187 + " is deprecated. Use "
10188 + "settings['ROOT'] instead.",
10189 + DeprecationWarning,
10190 + stacklevel=3,
10191 + )
10192 + return self.settings["ROOT"]
10193 +
10194 + def getpath(self, mykey, filename=None):
10195 + # This is an optimized hotspot, so don't use unicode-wrapped
10196 + # os module and don't use os.path.join().
10197 + rValue = self._eroot + VDB_PATH + _os.sep + mykey
10198 + if filename is not None:
10199 + # If filename is always relative, we can do just
10200 + # rValue += _os.sep + filename
10201 + rValue = _os.path.join(rValue, filename)
10202 + return rValue
10203 +
10204 + def lock(self):
10205 + """
10206 + Acquire a reentrant lock, blocking, for cooperation with concurrent
10207 + processes. State is inherited by subprocesses, allowing subprocesses
10208 + to reenter a lock that was acquired by a parent process. However,
10209 + a lock can be released only by the same process that acquired it.
10210 + """
10211 + if self._lock_count:
10212 + self._lock_count += 1
10213 + else:
10214 + if self._lock is not None:
10215 + raise AssertionError("already locked")
10216 + # At least the parent needs to exist for the lock file.
10217 + ensure_dirs(self._dbroot)
10218 + self._lock = lockdir(self._dbroot)
10219 + self._lock_count += 1
10220 +
10221 + def unlock(self):
10222 + """
10223 + Release a lock, decrementing the recursion level. Each unlock() call
10224 + must be matched with a prior lock() call, or else an AssertionError
10225 + will be raised if unlock() is called while not locked.
10226 + """
10227 + if self._lock_count > 1:
10228 + self._lock_count -= 1
10229 + else:
10230 + if self._lock is None:
10231 + raise AssertionError("not locked")
10232 + self._lock_count = 0
10233 + unlockdir(self._lock)
10234 + self._lock = None
10235 +
10236 + def _fs_lock(self):
10237 + """
10238 + Acquire a reentrant lock, blocking, for cooperation with concurrent
10239 + processes.
10240 + """
10241 + if self._fs_lock_count < 1:
10242 + if self._fs_lock_obj is not None:
10243 + raise AssertionError("already locked")
10244 + try:
10245 + self._fs_lock_obj = lockfile(self._conf_mem_file)
10246 + except InvalidLocation:
10247 + self.settings._init_dirs()
10248 + self._fs_lock_obj = lockfile(self._conf_mem_file)
10249 + self._fs_lock_count += 1
10250 +
10251 + def _fs_unlock(self):
10252 + """
10253 + Release a lock, decrementing the recursion level.
10254 + """
10255 + if self._fs_lock_count <= 1:
10256 + if self._fs_lock_obj is None:
10257 + raise AssertionError("not locked")
10258 + unlockfile(self._fs_lock_obj)
10259 + self._fs_lock_obj = None
10260 + self._fs_lock_count -= 1
10261 +
10262 + def _slot_lock(self, slot_atom):
10263 + """
10264 + Acquire a slot lock (reentrant).
10265 +
10266 + WARNING: The varbapi._slot_lock method is not safe to call
10267 + in the main process when that process is scheduling
10268 + install/uninstall tasks in parallel, since the locks would
10269 + be inherited by child processes. In order to avoid this sort
10270 + of problem, this method should be called in a subprocess
10271 + (typically spawned by the MergeProcess class).
10272 + """
10273 + lock, counter = self._slot_locks.get(slot_atom, (None, 0))
10274 + if lock is None:
10275 + lock_path = self.getpath("%s:%s" % (slot_atom.cp, slot_atom.slot))
10276 + ensure_dirs(os.path.dirname(lock_path))
10277 + lock = lockfile(lock_path, wantnewlockfile=True)
10278 + self._slot_locks[slot_atom] = (lock, counter + 1)
10279 +
10280 + def _slot_unlock(self, slot_atom):
10281 + """
10282 + Release a slot lock (or decrementing recursion level).
10283 + """
10284 + lock, counter = self._slot_locks.get(slot_atom, (None, 0))
10285 + if lock is None:
10286 + raise AssertionError("not locked")
10287 + counter -= 1
10288 + if counter == 0:
10289 + unlockfile(lock)
10290 + del self._slot_locks[slot_atom]
10291 + else:
10292 + self._slot_locks[slot_atom] = (lock, counter)
10293 +
10294 + def _bump_mtime(self, cpv):
10295 + """
10296 + This is called before an after any modifications, so that consumers
10297 + can use directory mtimes to validate caches. See bug #290428.
10298 + """
10299 + base = self._eroot + VDB_PATH
10300 + cat = catsplit(cpv)[0]
10301 + catdir = base + _os.sep + cat
10302 + t = time.time()
10303 + t = (t, t)
10304 + try:
10305 + for x in (catdir, base):
10306 + os.utime(x, t)
10307 + except OSError:
10308 + ensure_dirs(catdir)
10309 +
10310 + def cpv_exists(self, mykey, myrepo=None):
10311 + "Tells us whether an actual ebuild exists on disk (no masking)"
10312 + return os.path.exists(self.getpath(mykey))
10313 +
10314 + def cpv_counter(self, mycpv):
10315 + "This method will grab the COUNTER. Returns a counter value."
10316 + try:
10317 + return int(self.aux_get(mycpv, ["COUNTER"])[0])
10318 + except (KeyError, ValueError):
10319 + pass
10320 + writemsg_level(
10321 + _("portage: COUNTER for %s was corrupted; " "resetting to value of 0\n")
10322 + % (mycpv,),
10323 + level=logging.ERROR,
10324 + noiselevel=-1,
10325 + )
10326 + return 0
10327 +
10328 + def cpv_inject(self, mycpv):
10329 + "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
10330 + ensure_dirs(self.getpath(mycpv))
10331 + counter = self.counter_tick(mycpv=mycpv)
10332 + # write local package counter so that emerge clean does the right thing
10333 + write_atomic(self.getpath(mycpv, filename="COUNTER"), str(counter))
10334 +
10335 + def isInjected(self, mycpv):
10336 + if self.cpv_exists(mycpv):
10337 + if os.path.exists(self.getpath(mycpv, filename="INJECTED")):
10338 + return True
10339 + if not os.path.exists(self.getpath(mycpv, filename="CONTENTS")):
10340 + return True
10341 + return False
10342 +
10343 + def move_ent(self, mylist, repo_match=None):
10344 + origcp = mylist[1]
10345 + newcp = mylist[2]
10346 +
10347 + # sanity check
10348 + for atom in (origcp, newcp):
10349 + if not isjustname(atom):
10350 + raise InvalidPackageName(str(atom))
10351 + origmatches = self.match(origcp, use_cache=0)
10352 + moves = 0
10353 + if not origmatches:
10354 + return moves
10355 + for mycpv in origmatches:
10356 + mycpv_cp = mycpv.cp
10357 + if mycpv_cp != origcp:
10358 + # Ignore PROVIDE virtual match.
10359 + continue
10360 + if repo_match is not None and not repo_match(mycpv.repo):
10361 + continue
10362 +
10363 + # Use isvalidatom() to check if this move is valid for the
10364 + # EAPI (characters allowed in package names may vary).
10365 + if not isvalidatom(newcp, eapi=mycpv.eapi):
10366 + continue
10367 +
10368 + mynewcpv = mycpv.replace(mycpv_cp, str(newcp), 1)
10369 + mynewcat = catsplit(newcp)[0]
10370 + origpath = self.getpath(mycpv)
10371 + if not os.path.exists(origpath):
10372 + continue
10373 + moves += 1
10374 + if not os.path.exists(self.getpath(mynewcat)):
10375 + # create the directory
10376 + ensure_dirs(self.getpath(mynewcat))
10377 + newpath = self.getpath(mynewcpv)
10378 + if os.path.exists(newpath):
10379 + # dest already exists; keep this puppy where it is.
10380 + continue
10381 + _movefile(origpath, newpath, mysettings=self.settings)
10382 + self._clear_pkg_cache(self._dblink(mycpv))
10383 + self._clear_pkg_cache(self._dblink(mynewcpv))
10384 +
10385 + # We need to rename the ebuild now.
10386 + old_pf = catsplit(mycpv)[1]
10387 + new_pf = catsplit(mynewcpv)[1]
10388 + if new_pf != old_pf:
10389 + try:
10390 + os.rename(
10391 + os.path.join(newpath, old_pf + ".ebuild"),
10392 + os.path.join(newpath, new_pf + ".ebuild"),
10393 + )
10394 + except EnvironmentError as e:
10395 + if e.errno != errno.ENOENT:
10396 + raise
10397 + del e
10398 + write_atomic(os.path.join(newpath, "PF"), new_pf + "\n")
10399 + write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat + "\n")
10400 +
10401 + return moves
10402 +
10403 + def cp_list(self, mycp, use_cache=1):
10404 + mysplit = catsplit(mycp)
10405 + if mysplit[0] == "*":
10406 + mysplit[0] = mysplit[0][1:]
10407 + try:
10408 + mystat = os.stat(self.getpath(mysplit[0])).st_mtime_ns
10409 + except OSError:
10410 + mystat = 0
10411 + if use_cache and mycp in self.cpcache:
10412 + cpc = self.cpcache[mycp]
10413 + if cpc[0] == mystat:
10414 + return cpc[1][:]
10415 + cat_dir = self.getpath(mysplit[0])
10416 + try:
10417 + dir_list = os.listdir(cat_dir)
10418 + except EnvironmentError as e:
10419 + if e.errno == PermissionDenied.errno:
10420 + raise PermissionDenied(cat_dir)
10421 + del e
10422 + dir_list = []
10423 +
10424 + returnme = []
10425 + for x in dir_list:
10426 + if self._excluded_dirs.match(x) is not None:
10427 + continue
10428 + ps = pkgsplit(x)
10429 + if not ps:
10430 + self.invalidentry(os.path.join(self.getpath(mysplit[0]), x))
10431 + continue
10432 + if len(mysplit) > 1:
10433 + if ps[0] == mysplit[1]:
10434 + cpv = "%s/%s" % (mysplit[0], x)
10435 + metadata = dict(
10436 + zip(
10437 + self._aux_cache_keys,
10438 + self.aux_get(cpv, self._aux_cache_keys),
10439 + )
10440 + )
10441 + returnme.append(
10442 + _pkg_str(
10443 + cpv, metadata=metadata, settings=self.settings, db=self
10444 + )
10445 + )
10446 + self._cpv_sort_ascending(returnme)
10447 + if use_cache:
10448 + self.cpcache[mycp] = [mystat, returnme[:]]
10449 + elif mycp in self.cpcache:
10450 + del self.cpcache[mycp]
10451 + return returnme
10452 +
10453 + def cpv_all(self, use_cache=1):
10454 + """
10455 + Set use_cache=0 to bypass the portage.cachedir() cache in cases
10456 + when the accuracy of mtime staleness checks should not be trusted
10457 + (generally this is only necessary in critical sections that
10458 + involve merge or unmerge of packages).
10459 + """
10460 + return list(self._iter_cpv_all(use_cache=use_cache))
10461 +
10462 + def _iter_cpv_all(self, use_cache=True, sort=False):
10463 + returnme = []
10464 + basepath = os.path.join(self._eroot, VDB_PATH) + os.path.sep
10465 +
10466 + if use_cache:
10467 + from portage import listdir
10468 + else:
10469 +
10470 + def listdir(p, **kwargs):
10471 + try:
10472 + return [
10473 + x for x in os.listdir(p) if os.path.isdir(os.path.join(p, x))
10474 + ]
10475 + except EnvironmentError as e:
10476 + if e.errno == PermissionDenied.errno:
10477 + raise PermissionDenied(p)
10478 + del e
10479 + return []
10480 +
10481 + catdirs = listdir(basepath, EmptyOnError=1, ignorecvs=1, dirsonly=1)
10482 + if sort:
10483 + catdirs.sort()
10484 +
10485 + for x in catdirs:
10486 + if self._excluded_dirs.match(x) is not None:
10487 + continue
10488 + if not self._category_re.match(x):
10489 + continue
10490 +
10491 + pkgdirs = listdir(basepath + x, EmptyOnError=1, dirsonly=1)
10492 + if sort:
10493 + pkgdirs.sort()
10494 +
10495 + for y in pkgdirs:
10496 + if self._excluded_dirs.match(y) is not None:
10497 + continue
10498 + subpath = x + "/" + y
10499 + # -MERGING- should never be a cpv, nor should files.
10500 + try:
10501 + subpath = _pkg_str(subpath, db=self)
10502 + except InvalidData:
10503 + self.invalidentry(self.getpath(subpath))
10504 + continue
10505 +
10506 + yield subpath
10507 +
10508 + def cp_all(self, use_cache=1, sort=False):
10509 + mylist = self.cpv_all(use_cache=use_cache)
10510 + d = {}
10511 + for y in mylist:
10512 + if y[0] == "*":
10513 + y = y[1:]
10514 + try:
10515 + mysplit = catpkgsplit(y)
10516 + except InvalidData:
10517 + self.invalidentry(self.getpath(y))
10518 + continue
10519 + if not mysplit:
10520 + self.invalidentry(self.getpath(y))
10521 + continue
10522 + d[mysplit[0] + "/" + mysplit[1]] = None
10523 + return sorted(d) if sort else list(d)
10524 +
10525 + def checkblockers(self, origdep):
10526 + pass
10527 +
10528 + def _clear_cache(self):
10529 + self.mtdircache.clear()
10530 + self.matchcache.clear()
10531 + self.cpcache.clear()
10532 + self._aux_cache_obj = None
10533 +
10534 + def _add(self, pkg_dblink):
10535 + self._pkgs_changed = True
10536 + self._clear_pkg_cache(pkg_dblink)
10537 +
10538 + def _remove(self, pkg_dblink):
10539 + self._pkgs_changed = True
10540 + self._clear_pkg_cache(pkg_dblink)
10541 +
10542 + def _clear_pkg_cache(self, pkg_dblink):
10543 + # Due to 1 second mtime granularity in <python-2.5, mtime checks
10544 + # are not always sufficient to invalidate vardbapi caches. Therefore,
10545 + # the caches need to be actively invalidated here.
10546 + self.mtdircache.pop(pkg_dblink.cat, None)
10547 + self.matchcache.pop(pkg_dblink.cat, None)
10548 + self.cpcache.pop(pkg_dblink.mysplit[0], None)
10549 + dircache.pop(pkg_dblink.dbcatdir, None)
10550 +
10551 + def match(self, origdep, use_cache=1):
10552 + "caching match function"
10553 + mydep = dep_expand(
10554 + origdep, mydb=self, use_cache=use_cache, settings=self.settings
10555 + )
10556 + cache_key = (mydep, mydep.unevaluated_atom)
10557 + mykey = dep_getkey(mydep)
10558 + mycat = catsplit(mykey)[0]
10559 + if not use_cache:
10560 + if mycat in self.matchcache:
10561 + del self.mtdircache[mycat]
10562 + del self.matchcache[mycat]
10563 + return list(
10564 + self._iter_match(mydep, self.cp_list(mydep.cp, use_cache=use_cache))
10565 + )
10566 + try:
10567 + curmtime = os.stat(os.path.join(self._eroot, VDB_PATH, mycat)).st_mtime_ns
10568 + except (IOError, OSError):
10569 + curmtime = 0
10570 +
10571 + if mycat not in self.matchcache or self.mtdircache[mycat] != curmtime:
10572 + # clear cache entry
10573 + self.mtdircache[mycat] = curmtime
10574 + self.matchcache[mycat] = {}
10575 + if mydep not in self.matchcache[mycat]:
10576 + mymatch = list(
10577 + self._iter_match(mydep, self.cp_list(mydep.cp, use_cache=use_cache))
10578 + )
10579 + self.matchcache[mycat][cache_key] = mymatch
10580 + return self.matchcache[mycat][cache_key][:]
10581 +
10582 + def findname(self, mycpv, myrepo=None):
10583 + return self.getpath(str(mycpv), filename=catsplit(mycpv)[1] + ".ebuild")
10584 +
10585 + def flush_cache(self):
10586 + """If the current user has permission and the internal aux_get cache has
10587 + been updated, save it to disk and mark it unmodified. This is called
10588 + by emerge after it has loaded the full vdb for use in dependency
10589 + calculations. Currently, the cache is only written if the user has
10590 + superuser privileges (since that's required to obtain a lock), but all
10591 + users have read access and benefit from faster metadata lookups (as
10592 + long as at least part of the cache is still valid)."""
10593 + if (
10594 + self._flush_cache_enabled
10595 + and self._aux_cache is not None
10596 + and secpass >= 2
10597 + and (
10598 + len(self._aux_cache["modified"]) >= self._aux_cache_threshold
10599 + or not os.path.exists(self._cache_delta_filename)
10600 + )
10601 + ):
10602 +
10603 + ensure_dirs(os.path.dirname(self._aux_cache_filename))
10604 +
10605 + self._owners.populate() # index any unindexed contents
10606 + valid_nodes = set(self.cpv_all())
10607 + for cpv in list(self._aux_cache["packages"]):
10608 + if cpv not in valid_nodes:
10609 + del self._aux_cache["packages"][cpv]
10610 + del self._aux_cache["modified"]
10611 + timestamp = time.time()
10612 + self._aux_cache["timestamp"] = timestamp
10613 +
10614 + with atomic_ofstream(self._aux_cache_filename, "wb") as f:
10615 + pickle.dump(self._aux_cache, f, protocol=2)
10616 +
10617 + apply_secpass_permissions(self._aux_cache_filename, mode=0o644)
10618 +
10619 + self._cache_delta.initialize(timestamp)
10620 + apply_secpass_permissions(self._cache_delta_filename, mode=0o644)
10621 +
10622 + self._aux_cache["modified"] = set()
10623 +
10624 + @property
10625 + def _aux_cache(self):
10626 + if self._aux_cache_obj is None:
10627 + self._aux_cache_init()
10628 + return self._aux_cache_obj
10629 +
10630 + def _aux_cache_init(self):
10631 + aux_cache = None
10632 + open_kwargs = {}
10633 + try:
10634 + with open(
10635 + _unicode_encode(
10636 + self._aux_cache_filename, encoding=_encodings["fs"], errors="strict"
10637 + ),
10638 + mode="rb",
10639 + **open_kwargs
10640 + ) as f:
10641 + mypickle = pickle.Unpickler(f)
10642 + try:
10643 + mypickle.find_global = None
10644 + except AttributeError:
10645 + # TODO: If py3k, override Unpickler.find_class().
10646 + pass
10647 + aux_cache = mypickle.load()
10648 + except (SystemExit, KeyboardInterrupt):
10649 + raise
10650 + except Exception as e:
10651 + if isinstance(e, EnvironmentError) and getattr(e, "errno", None) in (
10652 + errno.ENOENT,
10653 + errno.EACCES,
10654 + ):
10655 + pass
10656 + else:
10657 + writemsg(
10658 + _("!!! Error loading '%s': %s\n") % (self._aux_cache_filename, e),
10659 + noiselevel=-1,
10660 + )
10661 + del e
10662 +
10663 + if (
10664 + not aux_cache
10665 + or not isinstance(aux_cache, dict)
10666 + or aux_cache.get("version") != self._aux_cache_version
10667 + or not aux_cache.get("packages")
10668 + ):
10669 + aux_cache = {"version": self._aux_cache_version}
10670 + aux_cache["packages"] = {}
10671 +
10672 + owners = aux_cache.get("owners")
10673 + if owners is not None:
10674 + if not isinstance(owners, dict):
10675 + owners = None
10676 + elif "version" not in owners:
10677 + owners = None
10678 + elif owners["version"] != self._owners_cache_version:
10679 + owners = None
10680 + elif "base_names" not in owners:
10681 + owners = None
10682 + elif not isinstance(owners["base_names"], dict):
10683 + owners = None
10684 +
10685 + if owners is None:
10686 + owners = {"base_names": {}, "version": self._owners_cache_version}
10687 + aux_cache["owners"] = owners
10688 +
10689 + aux_cache["modified"] = set()
10690 + self._aux_cache_obj = aux_cache
10691 +
10692 + def aux_get(self, mycpv, wants, myrepo=None):
10693 + """This automatically caches selected keys that are frequently needed
10694 + by emerge for dependency calculations. The cached metadata is
10695 + considered valid if the mtime of the package directory has not changed
10696 + since the data was cached. The cache is stored in a pickled dict
10697 + object with the following format:
10698 +
10699 + {version:"1", "packages":{cpv1:(mtime,{k1,v1, k2,v2, ...}), cpv2...}}
10700 +
10701 + If an error occurs while loading the cache pickle or the version is
10702 + unrecognized, the cache will simple be recreated from scratch (it is
10703 + completely disposable).
10704 + """
10705 + cache_these_wants = self._aux_cache_keys.intersection(wants)
10706 + for x in wants:
10707 + if self._aux_cache_keys_re.match(x) is not None:
10708 + cache_these_wants.add(x)
10709 +
10710 + if not cache_these_wants:
10711 + mydata = self._aux_get(mycpv, wants)
10712 + return [mydata[x] for x in wants]
10713 +
10714 + cache_these = set(self._aux_cache_keys)
10715 + cache_these.update(cache_these_wants)
10716 +
10717 + mydir = self.getpath(mycpv)
10718 + mydir_stat = None
10719 + try:
10720 + mydir_stat = os.stat(mydir)
10721 + except OSError as e:
10722 + if e.errno != errno.ENOENT:
10723 + raise
10724 + raise KeyError(mycpv)
10725 + # Use float mtime when available.
10726 + mydir_mtime = mydir_stat.st_mtime
10727 + pkg_data = self._aux_cache["packages"].get(mycpv)
10728 + pull_me = cache_these.union(wants)
10729 + mydata = {"_mtime_": mydir_mtime}
10730 + cache_valid = False
10731 + cache_incomplete = False
10732 + cache_mtime = None
10733 + metadata = None
10734 + if pkg_data is not None:
10735 + if not isinstance(pkg_data, tuple) or len(pkg_data) != 2:
10736 + pkg_data = None
10737 + else:
10738 + cache_mtime, metadata = pkg_data
10739 + if not isinstance(cache_mtime, (float, int)) or not isinstance(
10740 + metadata, dict
10741 + ):
10742 + pkg_data = None
10743 +
10744 + if pkg_data:
10745 + cache_mtime, metadata = pkg_data
10746 + if isinstance(cache_mtime, float):
10747 + if cache_mtime == mydir_stat.st_mtime:
10748 + cache_valid = True
10749 +
10750 + # Handle truncated mtime in order to avoid cache
10751 + # invalidation for livecd squashfs (bug 564222).
10752 + elif int(cache_mtime) == mydir_stat.st_mtime:
10753 + cache_valid = True
10754 + else:
10755 + # Cache may contain integer mtime.
10756 + cache_valid = cache_mtime == mydir_stat[stat.ST_MTIME]
10757 +
10758 + if cache_valid:
10759 + # Migrate old metadata to unicode.
10760 + for k, v in metadata.items():
10761 + metadata[k] = _unicode_decode(
10762 + v, encoding=_encodings["repo.content"], errors="replace"
10763 + )
10764 +
10765 + mydata.update(metadata)
10766 + pull_me.difference_update(mydata)
10767 +
10768 + if pull_me:
10769 + # pull any needed data and cache it
10770 + aux_keys = list(pull_me)
10771 + mydata.update(self._aux_get(mycpv, aux_keys, st=mydir_stat))
10772 + if not cache_valid or cache_these.difference(metadata):
10773 + cache_data = {}
10774 + if cache_valid and metadata:
10775 + cache_data.update(metadata)
10776 + for aux_key in cache_these:
10777 + cache_data[aux_key] = mydata[aux_key]
10778 + self._aux_cache["packages"][str(mycpv)] = (mydir_mtime, cache_data)
10779 + self._aux_cache["modified"].add(mycpv)
10780 +
10781 + eapi_attrs = _get_eapi_attrs(mydata["EAPI"])
10782 + if _get_slot_re(eapi_attrs).match(mydata["SLOT"]) is None:
10783 + # Empty or invalid slot triggers InvalidAtom exceptions when
10784 + # generating slot atoms for packages, so translate it to '0' here.
10785 + mydata["SLOT"] = "0"
10786 +
10787 + return [mydata[x] for x in wants]
10788 +
10789 + def _aux_get(self, mycpv, wants, st=None):
10790 + mydir = self.getpath(mycpv)
10791 + if st is None:
10792 + try:
10793 + st = os.stat(mydir)
10794 + except OSError as e:
10795 + if e.errno == errno.ENOENT:
10796 + raise KeyError(mycpv)
10797 + elif e.errno == PermissionDenied.errno:
10798 + raise PermissionDenied(mydir)
10799 + else:
10800 + raise
10801 + if not stat.S_ISDIR(st.st_mode):
10802 + raise KeyError(mycpv)
10803 + results = {}
10804 + env_keys = []
10805 + for x in wants:
10806 + if x == "_mtime_":
10807 + results[x] = st[stat.ST_MTIME]
10808 + continue
10809 + try:
10810 + with io.open(
10811 + _unicode_encode(
10812 + os.path.join(mydir, x),
10813 + encoding=_encodings["fs"],
10814 + errors="strict",
10815 + ),
10816 + mode="r",
10817 + encoding=_encodings["repo.content"],
10818 + errors="replace",
10819 + ) as f:
10820 + myd = f.read()
10821 + except IOError:
10822 + if (
10823 + x not in self._aux_cache_keys
10824 + and self._aux_cache_keys_re.match(x) is None
10825 + ):
10826 + env_keys.append(x)
10827 + continue
10828 + myd = ""
10829 +
10830 + # Preserve \n for metadata that is known to
10831 + # contain multiple lines.
10832 + if self._aux_multi_line_re.match(x) is None:
10833 + myd = " ".join(myd.split())
10834 +
10835 + results[x] = myd
10836 +
10837 + if env_keys:
10838 + env_results = self._aux_env_search(mycpv, env_keys)
10839 + for k in env_keys:
10840 + v = env_results.get(k)
10841 + if v is None:
10842 + v = ""
10843 + if self._aux_multi_line_re.match(k) is None:
10844 + v = " ".join(v.split())
10845 + results[k] = v
10846 +
10847 + if results.get("EAPI") == "":
10848 + results["EAPI"] = "0"
10849 +
10850 + return results
10851 +
10852 + def _aux_env_search(self, cpv, variables):
10853 + """
10854 + Search environment.bz2 for the specified variables. Returns
10855 + a dict mapping variables to values, and any variables not
10856 + found in the environment will not be included in the dict.
10857 + This is useful for querying variables like ${SRC_URI} and
10858 + ${A}, which are not saved in separate files but are available
10859 + in environment.bz2 (see bug #395463).
10860 + """
10861 + env_file = self.getpath(cpv, filename="environment.bz2")
10862 + if not os.path.isfile(env_file):
10863 + return {}
10864 + bunzip2_cmd = portage.util.shlex_split(
10865 + self.settings.get("PORTAGE_BUNZIP2_COMMAND", "")
10866 + )
10867 + if not bunzip2_cmd:
10868 + bunzip2_cmd = portage.util.shlex_split(
10869 + self.settings["PORTAGE_BZIP2_COMMAND"]
10870 + )
10871 + bunzip2_cmd.append("-d")
10872 + args = bunzip2_cmd + ["-c", env_file]
10873 + try:
10874 + proc = subprocess.Popen(args, stdout=subprocess.PIPE)
10875 + except EnvironmentError as e:
10876 + if e.errno != errno.ENOENT:
10877 + raise
10878 + raise portage.exception.CommandNotFound(args[0])
10879 +
10880 + # Parts of the following code are borrowed from
10881 + # filter-bash-environment.py (keep them in sync).
10882 + var_assign_re = re.compile(
10883 + r'(^|^declare\s+-\S+\s+|^declare\s+|^export\s+)([^=\s]+)=("|\')?(.*)$'
10884 + )
10885 + close_quote_re = re.compile(r'(\\"|"|\')\s*$')
10886 +
10887 + def have_end_quote(quote, line):
10888 + close_quote_match = close_quote_re.search(line)
10889 + return close_quote_match is not None and close_quote_match.group(1) == quote
10890 +
10891 + variables = frozenset(variables)
10892 + results = {}
10893 + for line in proc.stdout:
10894 + line = _unicode_decode(
10895 + line, encoding=_encodings["content"], errors="replace"
10896 + )
10897 + var_assign_match = var_assign_re.match(line)
10898 + if var_assign_match is not None:
10899 + key = var_assign_match.group(2)
10900 + quote = var_assign_match.group(3)
10901 + if quote is not None:
10902 + if have_end_quote(quote, line[var_assign_match.end(2) + 2 :]):
10903 + value = var_assign_match.group(4)
10904 + else:
10905 + value = [var_assign_match.group(4)]
10906 + for line in proc.stdout:
10907 + line = _unicode_decode(
10908 + line, encoding=_encodings["content"], errors="replace"
10909 + )
10910 + value.append(line)
10911 + if have_end_quote(quote, line):
10912 + break
10913 + value = "".join(value)
10914 + # remove trailing quote and whitespace
10915 + value = value.rstrip()[:-1]
10916 + else:
10917 + value = var_assign_match.group(4).rstrip()
10918 +
10919 + if key in variables:
10920 + results[key] = value
10921 +
10922 + proc.wait()
10923 + proc.stdout.close()
10924 + return results
10925 +
10926 + def aux_update(self, cpv, values):
10927 + mylink = self._dblink(cpv)
10928 + if not mylink.exists():
10929 + raise KeyError(cpv)
10930 + self._bump_mtime(cpv)
10931 + self._clear_pkg_cache(mylink)
10932 + for k, v in values.items():
10933 + if v:
10934 + mylink.setfile(k, v)
10935 + else:
10936 + try:
10937 + os.unlink(os.path.join(self.getpath(cpv), k))
10938 + except EnvironmentError:
10939 + pass
10940 + self._bump_mtime(cpv)
10941 +
10942 + async def unpack_metadata(self, pkg, dest_dir, loop=None):
10943 + """
10944 + Unpack package metadata to a directory. This method is a coroutine.
10945 +
10946 + @param pkg: package to unpack
10947 + @type pkg: _pkg_str or portage.config
10948 + @param dest_dir: destination directory
10949 + @type dest_dir: str
10950 + """
10951 + loop = asyncio._wrap_loop(loop)
10952 + if not isinstance(pkg, portage.config):
10953 + cpv = pkg
10954 + else:
10955 + cpv = pkg.mycpv
10956 + dbdir = self.getpath(cpv)
10957 +
10958 + def async_copy():
10959 + for parent, dirs, files in os.walk(dbdir, onerror=_raise_exc):
10960 + for key in files:
10961 + shutil.copy(os.path.join(parent, key), os.path.join(dest_dir, key))
10962 + break
10963 +
10964 + await loop.run_in_executor(ForkExecutor(loop=loop), async_copy)
10965 +
10966 + async def unpack_contents(
10967 + self,
10968 + pkg,
10969 + dest_dir,
10970 + include_config=None,
10971 + include_unmodified_config=None,
10972 + loop=None,
10973 + ):
10974 + """
10975 + Unpack package contents to a directory. This method is a coroutine.
10976 +
10977 + This copies files from the installed system, in the same way
10978 + as the quickpkg(1) command. Default behavior for handling
10979 + of protected configuration files is controlled by the
10980 + QUICKPKG_DEFAULT_OPTS variable. The relevant quickpkg options
10981 + are --include-config and --include-unmodified-config. When
10982 + a configuration file is not included because it is protected,
10983 + an ewarn message is logged.
10984 +
10985 + @param pkg: package to unpack
10986 + @type pkg: _pkg_str or portage.config
10987 + @param dest_dir: destination directory
10988 + @type dest_dir: str
10989 + @param include_config: Include all files protected by
10990 + CONFIG_PROTECT (as a security precaution, default is False
10991 + unless modified by QUICKPKG_DEFAULT_OPTS).
10992 + @type include_config: bool
10993 + @param include_unmodified_config: Include files protected by
10994 + CONFIG_PROTECT that have not been modified since installation
10995 + (as a security precaution, default is False unless modified
10996 + by QUICKPKG_DEFAULT_OPTS).
10997 + @type include_unmodified_config: bool
10998 + """
10999 + loop = asyncio._wrap_loop(loop)
11000 + if not isinstance(pkg, portage.config):
11001 + settings = self.settings
11002 + cpv = pkg
11003 + else:
11004 + settings = pkg
11005 + cpv = settings.mycpv
11006 +
11007 + scheduler = SchedulerInterface(loop)
11008 + parser = argparse.ArgumentParser()
11009 + parser.add_argument("--include-config", choices=("y", "n"), default="n")
11010 + parser.add_argument(
11011 + "--include-unmodified-config", choices=("y", "n"), default="n"
11012 + )
11013 +
11014 + # Method parameters may override QUICKPKG_DEFAULT_OPTS.
11015 + opts_list = portage.util.shlex_split(settings.get("QUICKPKG_DEFAULT_OPTS", ""))
11016 + if include_config is not None:
11017 + opts_list.append(
11018 + "--include-config={}".format("y" if include_config else "n")
11019 + )
11020 + if include_unmodified_config is not None:
11021 + opts_list.append(
11022 + "--include-unmodified-config={}".format(
11023 + "y" if include_unmodified_config else "n"
11024 + )
11025 + )
11026 +
11027 + opts, args = parser.parse_known_args(opts_list)
11028 +
11029 + tar_cmd = ("tar", "-x", "--xattrs", "--xattrs-include=*", "-C", dest_dir)
11030 + pr, pw = os.pipe()
11031 + proc = await asyncio.create_subprocess_exec(*tar_cmd, stdin=pr)
11032 + os.close(pr)
11033 + with os.fdopen(pw, "wb", 0) as pw_file:
11034 + excluded_config_files = await loop.run_in_executor(
11035 + ForkExecutor(loop=loop),
11036 + functools.partial(
11037 + self._dblink(cpv).quickpkg,
11038 + pw_file,
11039 + include_config=opts.include_config == "y",
11040 + include_unmodified_config=opts.include_unmodified_config == "y",
11041 + ),
11042 + )
11043 + await proc.wait()
11044 + if proc.returncode != os.EX_OK:
11045 + raise PortageException("command failed: {}".format(tar_cmd))
11046 +
11047 + if excluded_config_files:
11048 + log_lines = [
11049 + _(
11050 + "Config files excluded by QUICKPKG_DEFAULT_OPTS (see quickpkg(1) man page):"
11051 + )
11052 + ] + ["\t{}".format(name) for name in excluded_config_files]
11053 + out = io.StringIO()
11054 + for line in log_lines:
11055 + portage.elog.messages.ewarn(line, phase="install", key=cpv, out=out)
11056 + scheduler.output(
11057 + out.getvalue(),
11058 + background=self.settings.get("PORTAGE_BACKGROUND") == "1",
11059 + log_path=settings.get("PORTAGE_LOG_FILE"),
11060 + )
11061 +
11062 + def counter_tick(self, myroot=None, mycpv=None):
11063 + """
11064 + @param myroot: ignored, self._eroot is used instead
11065 + """
11066 + return self.counter_tick_core(incrementing=1, mycpv=mycpv)
11067 +
11068 + def get_counter_tick_core(self, myroot=None, mycpv=None):
11069 + """
11070 + Use this method to retrieve the counter instead
11071 + of having to trust the value of a global counter
11072 + file that can lead to invalid COUNTER
11073 + generation. When cache is valid, the package COUNTER
11074 + files are not read and we rely on the timestamp of
11075 + the package directory to validate cache. The stat
11076 + calls should only take a short time, so performance
11077 + is sufficient without having to rely on a potentially
11078 + corrupt global counter file.
11079 +
11080 + The global counter file located at
11081 + $CACHE_PATH/counter serves to record the
11082 + counter of the last installed package and
11083 + it also corresponds to the total number of
11084 + installation actions that have occurred in
11085 + the history of this package database.
11086 +
11087 + @param myroot: ignored, self._eroot is used instead
11088 + """
11089 + del myroot
11090 + counter = -1
11091 + try:
11092 + with io.open(
11093 + _unicode_encode(
11094 + self._counter_path, encoding=_encodings["fs"], errors="strict"
11095 + ),
11096 + mode="r",
11097 + encoding=_encodings["repo.content"],
11098 + errors="replace",
11099 + ) as f:
11100 + try:
11101 + counter = int(f.readline().strip())
11102 + except (OverflowError, ValueError) as e:
11103 + writemsg(
11104 + _("!!! COUNTER file is corrupt: '%s'\n") % self._counter_path,
11105 + noiselevel=-1,
11106 + )
11107 + writemsg("!!! %s\n" % (e,), noiselevel=-1)
11108 + except EnvironmentError as e:
11109 + # Silently allow ENOENT since files under
11110 + # /var/cache/ are allowed to disappear.
11111 + if e.errno != errno.ENOENT:
11112 + writemsg(
11113 + _("!!! Unable to read COUNTER file: '%s'\n") % self._counter_path,
11114 + noiselevel=-1,
11115 + )
11116 + writemsg("!!! %s\n" % str(e), noiselevel=-1)
11117 + del e
11118 +
11119 + if self._cached_counter == counter:
11120 + max_counter = counter
11121 + else:
11122 + # We must ensure that we return a counter
11123 + # value that is at least as large as the
11124 + # highest one from the installed packages,
11125 + # since having a corrupt value that is too low
11126 + # can trigger incorrect AUTOCLEAN behavior due
11127 + # to newly installed packages having lower
11128 + # COUNTERs than the previous version in the
11129 + # same slot.
11130 + max_counter = counter
11131 + for cpv in self.cpv_all():
11132 + try:
11133 + pkg_counter = int(self.aux_get(cpv, ["COUNTER"])[0])
11134 + except (KeyError, OverflowError, ValueError):
11135 + continue
11136 + if pkg_counter > max_counter:
11137 + max_counter = pkg_counter
11138 +
11139 + return max_counter + 1
11140 +
11141 + def counter_tick_core(self, myroot=None, incrementing=1, mycpv=None):
11142 + """
11143 + This method will grab the next COUNTER value and record it back
11144 + to the global file. Note that every package install must have
11145 + a unique counter, since a slotmove update can move two packages
11146 + into the same SLOT and in that case it's important that both
11147 + packages have different COUNTER metadata.
11148 +
11149 + @param myroot: ignored, self._eroot is used instead
11150 + @param mycpv: ignored
11151 + @rtype: int
11152 + @return: new counter value
11153 + """
11154 + myroot = None
11155 + mycpv = None
11156 + self.lock()
11157 + try:
11158 + counter = self.get_counter_tick_core() - 1
11159 + if incrementing:
11160 + # increment counter
11161 + counter += 1
11162 + # update new global counter file
11163 + try:
11164 + write_atomic(self._counter_path, str(counter))
11165 + except InvalidLocation:
11166 + self.settings._init_dirs()
11167 + write_atomic(self._counter_path, str(counter))
11168 + self._cached_counter = counter
11169 +
11170 + # Since we hold a lock, this is a good opportunity
11171 + # to flush the cache. Note that this will only
11172 + # flush the cache periodically in the main process
11173 + # when _aux_cache_threshold is exceeded.
11174 + self.flush_cache()
11175 + finally:
11176 + self.unlock()
11177 +
11178 + return counter
11179 +
11180 + def _dblink(self, cpv):
11181 + category, pf = catsplit(cpv)
11182 + return dblink(
11183 + category,
11184 + pf,
11185 + settings=self.settings,
11186 + vartree=self.vartree,
11187 + treetype="vartree",
11188 + )
11189 +
11190 + def removeFromContents(self, pkg, paths, relative_paths=True):
11191 + """
11192 + @param pkg: cpv for an installed package
11193 + @type pkg: string
11194 + @param paths: paths of files to remove from contents
11195 + @type paths: iterable
11196 + """
11197 + if not hasattr(pkg, "getcontents"):
11198 + pkg = self._dblink(pkg)
11199 + root = self.settings["ROOT"]
11200 + root_len = len(root) - 1
11201 + new_contents = pkg.getcontents().copy()
11202 + removed = 0
11203 +
11204 + for filename in paths:
11205 + filename = _unicode_decode(
11206 + filename, encoding=_encodings["content"], errors="strict"
11207 + )
11208 + filename = normalize_path(filename)
11209 + if relative_paths:
11210 + relative_filename = filename
11211 + else:
11212 + relative_filename = filename[root_len:]
11213 + contents_key = pkg._match_contents(relative_filename)
11214 + if contents_key:
11215 + # It's possible for two different paths to refer to the same
11216 + # contents_key, due to directory symlinks. Therefore, pass a
11217 + # default value to pop, in order to avoid a KeyError which
11218 + # could otherwise be triggered (see bug #454400).
11219 + new_contents.pop(contents_key, None)
11220 + removed += 1
11221 +
11222 + if removed:
11223 + # Also remove corresponding NEEDED lines, so that they do
11224 + # no corrupt LinkageMap data for preserve-libs.
11225 + needed_filename = os.path.join(pkg.dbdir, LinkageMap._needed_aux_key)
11226 + new_needed = None
11227 + try:
11228 + with io.open(
11229 + _unicode_encode(
11230 + needed_filename, encoding=_encodings["fs"], errors="strict"
11231 + ),
11232 + mode="r",
11233 + encoding=_encodings["repo.content"],
11234 + errors="replace",
11235 + ) as f:
11236 + needed_lines = f.readlines()
11237 + except IOError as e:
11238 + if e.errno not in (errno.ENOENT, errno.ESTALE):
11239 + raise
11240 + else:
11241 + new_needed = []
11242 + for l in needed_lines:
11243 + l = l.rstrip("\n")
11244 + if not l:
11245 + continue
11246 + try:
11247 + entry = NeededEntry.parse(needed_filename, l)
11248 + except InvalidData as e:
11249 + writemsg_level(
11250 + "\n%s\n\n" % (e,), level=logging.ERROR, noiselevel=-1
11251 + )
11252 + continue
11253 +
11254 + filename = os.path.join(root, entry.filename.lstrip(os.sep))
11255 + if filename in new_contents:
11256 + new_needed.append(entry)
11257 +
11258 + self.writeContentsToContentsFile(pkg, new_contents, new_needed=new_needed)
11259 +
11260 + def writeContentsToContentsFile(self, pkg, new_contents, new_needed=None):
11261 + """
11262 + @param pkg: package to write contents file for
11263 + @type pkg: dblink
11264 + @param new_contents: contents to write to CONTENTS file
11265 + @type new_contents: contents dictionary of the form
11266 + {u'/path/to/file' : (contents_attribute 1, ...), ...}
11267 + @param new_needed: new NEEDED entries
11268 + @type new_needed: list of NeededEntry
11269 + """
11270 + root = self.settings["ROOT"]
11271 + self._bump_mtime(pkg.mycpv)
11272 + if new_needed is not None:
11273 + f = atomic_ofstream(os.path.join(pkg.dbdir, LinkageMap._needed_aux_key))
11274 + for entry in new_needed:
11275 + f.write(str(entry))
11276 + f.close()
11277 + f = atomic_ofstream(os.path.join(pkg.dbdir, "CONTENTS"))
11278 + write_contents(new_contents, root, f)
11279 + f.close()
11280 + self._bump_mtime(pkg.mycpv)
11281 + pkg._clear_contents_cache()
11282 +
11283 + class _owners_cache:
11284 + """
11285 + This class maintains an hash table that serves to index package
11286 + contents by mapping the basename of file to a list of possible
11287 + packages that own it. This is used to optimize owner lookups
11288 + by narrowing the search down to a smaller number of packages.
11289 + """
11290 +
11291 + _new_hash = md5
11292 + _hash_bits = 16
11293 + _hex_chars = _hash_bits // 4
11294 +
11295 + def __init__(self, vardb):
11296 + self._vardb = vardb
11297 +
11298 + def add(self, cpv):
11299 + eroot_len = len(self._vardb._eroot)
11300 + pkg_hash = self._hash_pkg(cpv)
11301 + db = self._vardb._dblink(cpv)
11302 + if not db.getcontents():
11303 + # Empty path is a code used to represent empty contents.
11304 + self._add_path("", pkg_hash)
11305 +
11306 + for x in db._contents.keys():
11307 + self._add_path(x[eroot_len:], pkg_hash)
11308 +
11309 + self._vardb._aux_cache["modified"].add(cpv)
11310 +
11311 + def _add_path(self, path, pkg_hash):
11312 + """
11313 + Empty path is a code that represents empty contents.
11314 + """
11315 + if path:
11316 + name = os.path.basename(path.rstrip(os.path.sep))
11317 + if not name:
11318 + return
11319 + else:
11320 + name = path
11321 + name_hash = self._hash_str(name)
11322 + base_names = self._vardb._aux_cache["owners"]["base_names"]
11323 + pkgs = base_names.get(name_hash)
11324 + if pkgs is None:
11325 + pkgs = {}
11326 + base_names[name_hash] = pkgs
11327 + pkgs[pkg_hash] = None
11328 +
11329 + def _hash_str(self, s):
11330 + h = self._new_hash()
11331 + # Always use a constant utf_8 encoding here, since
11332 + # the "default" encoding can change.
11333 + h.update(
11334 + _unicode_encode(
11335 + s, encoding=_encodings["repo.content"], errors="backslashreplace"
11336 + )
11337 + )
11338 + h = h.hexdigest()
11339 + h = h[-self._hex_chars :]
11340 + h = int(h, 16)
11341 + return h
11342 +
11343 + def _hash_pkg(self, cpv):
11344 + counter, mtime = self._vardb.aux_get(cpv, ["COUNTER", "_mtime_"])
11345 + try:
11346 + counter = int(counter)
11347 + except ValueError:
11348 + counter = 0
11349 + return (str(cpv), counter, mtime)
11350 +
11351 + class _owners_db:
11352 + def __init__(self, vardb):
11353 + self._vardb = vardb
11354 +
11355 + def populate(self):
11356 + self._populate()
11357 +
11358 + def _populate(self):
11359 + owners_cache = vardbapi._owners_cache(self._vardb)
11360 + cached_hashes = set()
11361 + base_names = self._vardb._aux_cache["owners"]["base_names"]
11362 +
11363 + # Take inventory of all cached package hashes.
11364 + for name, hash_values in list(base_names.items()):
11365 + if not isinstance(hash_values, dict):
11366 + del base_names[name]
11367 + continue
11368 + cached_hashes.update(hash_values)
11369 +
11370 + # Create sets of valid package hashes and uncached packages.
11371 + uncached_pkgs = set()
11372 + hash_pkg = owners_cache._hash_pkg
11373 + valid_pkg_hashes = set()
11374 + for cpv in self._vardb.cpv_all():
11375 + hash_value = hash_pkg(cpv)
11376 + valid_pkg_hashes.add(hash_value)
11377 + if hash_value not in cached_hashes:
11378 + uncached_pkgs.add(cpv)
11379 +
11380 + # Cache any missing packages.
11381 + for cpv in uncached_pkgs:
11382 + owners_cache.add(cpv)
11383 +
11384 + # Delete any stale cache.
11385 + stale_hashes = cached_hashes.difference(valid_pkg_hashes)
11386 + if stale_hashes:
11387 + for base_name_hash, bucket in list(base_names.items()):
11388 + for hash_value in stale_hashes.intersection(bucket):
11389 + del bucket[hash_value]
11390 + if not bucket:
11391 + del base_names[base_name_hash]
11392 +
11393 + return owners_cache
11394 +
11395 + def get_owners(self, path_iter):
11396 + """
11397 + @return the owners as a dblink -> set(files) mapping.
11398 + """
11399 + owners = {}
11400 + for owner, f in self.iter_owners(path_iter):
11401 + owned_files = owners.get(owner)
11402 + if owned_files is None:
11403 + owned_files = set()
11404 + owners[owner] = owned_files
11405 + owned_files.add(f)
11406 + return owners
11407 +
11408 + def getFileOwnerMap(self, path_iter):
11409 + owners = self.get_owners(path_iter)
11410 + file_owners = {}
11411 + for pkg_dblink, files in owners.items():
11412 + for f in files:
11413 + owner_set = file_owners.get(f)
11414 + if owner_set is None:
11415 + owner_set = set()
11416 + file_owners[f] = owner_set
11417 + owner_set.add(pkg_dblink)
11418 + return file_owners
11419 +
11420 + def iter_owners(self, path_iter):
11421 + """
11422 + Iterate over tuples of (dblink, path). In order to avoid
11423 + consuming too many resources for too much time, resources
11424 + are only allocated for the duration of a given iter_owners()
11425 + call. Therefore, to maximize reuse of resources when searching
11426 + for multiple files, it's best to search for them all in a single
11427 + call.
11428 + """
11429 +
11430 + if not isinstance(path_iter, list):
11431 + path_iter = list(path_iter)
11432 + owners_cache = self._populate()
11433 + vardb = self._vardb
11434 + root = vardb._eroot
11435 + hash_pkg = owners_cache._hash_pkg
11436 + hash_str = owners_cache._hash_str
11437 + base_names = self._vardb._aux_cache["owners"]["base_names"]
11438 + case_insensitive = "case-insensitive-fs" in vardb.settings.features
11439 +
11440 + dblink_cache = {}
11441 +
11442 + def dblink(cpv):
11443 + x = dblink_cache.get(cpv)
11444 + if x is None:
11445 + if len(dblink_cache) > 20:
11446 + # Ensure that we don't run out of memory.
11447 + raise StopIteration()
11448 + x = self._vardb._dblink(cpv)
11449 + dblink_cache[cpv] = x
11450 + return x
11451 +
11452 + while path_iter:
11453 +
11454 + path = path_iter.pop()
11455 + if case_insensitive:
11456 + path = path.lower()
11457 + is_basename = os.sep != path[:1]
11458 + if is_basename:
11459 + name = path
11460 + else:
11461 + name = os.path.basename(path.rstrip(os.path.sep))
11462 +
11463 + if not name:
11464 + continue
11465 +
11466 + name_hash = hash_str(name)
11467 + pkgs = base_names.get(name_hash)
11468 + owners = []
11469 + if pkgs is not None:
11470 + try:
11471 + for hash_value in pkgs:
11472 + if (
11473 + not isinstance(hash_value, tuple)
11474 + or len(hash_value) != 3
11475 + ):
11476 + continue
11477 + cpv, counter, mtime = hash_value
11478 + if not isinstance(cpv, str):
11479 + continue
11480 + try:
11481 + current_hash = hash_pkg(cpv)
11482 + except KeyError:
11483 + continue
11484 +
11485 + if current_hash != hash_value:
11486 + continue
11487 +
11488 + if is_basename:
11489 + for p in dblink(cpv)._contents.keys():
11490 + if os.path.basename(p) == name:
11491 + owners.append(
11492 + (
11493 + cpv,
11494 + dblink(cpv)._contents.unmap_key(p)[
11495 + len(root) :
11496 + ],
11497 + )
11498 + )
11499 + else:
11500 + key = dblink(cpv)._match_contents(path)
11501 + if key is not False:
11502 + owners.append((cpv, key[len(root) :]))
11503 +
11504 + except StopIteration:
11505 + path_iter.append(path)
11506 + del owners[:]
11507 + dblink_cache.clear()
11508 + gc.collect()
11509 + for x in self._iter_owners_low_mem(path_iter):
11510 + yield x
11511 + return
11512 + else:
11513 + for cpv, p in owners:
11514 + yield (dblink(cpv), p)
11515 +
11516 + def _iter_owners_low_mem(self, path_list):
11517 + """
11518 + This implemention will make a short-lived dblink instance (and
11519 + parse CONTENTS) for every single installed package. This is
11520 + slower and but uses less memory than the method which uses the
11521 + basename cache.
11522 + """
11523 +
11524 + if not path_list:
11525 + return
11526 +
11527 + case_insensitive = "case-insensitive-fs" in self._vardb.settings.features
11528 + path_info_list = []
11529 + for path in path_list:
11530 + if case_insensitive:
11531 + path = path.lower()
11532 + is_basename = os.sep != path[:1]
11533 + if is_basename:
11534 + name = path
11535 + else:
11536 + name = os.path.basename(path.rstrip(os.path.sep))
11537 + path_info_list.append((path, name, is_basename))
11538 +
11539 + # Do work via the global event loop, so that it can be used
11540 + # for indication of progress during the search (bug #461412).
11541 + event_loop = asyncio._safe_loop()
11542 + root = self._vardb._eroot
11543 +
11544 + def search_pkg(cpv, search_future):
11545 + dblnk = self._vardb._dblink(cpv)
11546 + results = []
11547 + for path, name, is_basename in path_info_list:
11548 + if is_basename:
11549 + for p in dblnk._contents.keys():
11550 + if os.path.basename(p) == name:
11551 + results.append(
11552 + (dblnk, dblnk._contents.unmap_key(p)[len(root) :])
11553 + )
11554 + else:
11555 + key = dblnk._match_contents(path)
11556 + if key is not False:
11557 + results.append((dblnk, key[len(root) :]))
11558 + search_future.set_result(results)
11559 +
11560 + for cpv in self._vardb.cpv_all():
11561 + search_future = event_loop.create_future()
11562 + event_loop.call_soon(search_pkg, cpv, search_future)
11563 + event_loop.run_until_complete(search_future)
11564 + for result in search_future.result():
11565 + yield result
11566 +
11567
11568 class vartree:
11569 - "this tree will scan a var/db/pkg database located at root (passed to init)"
11570 - def __init__(self, root=None, virtual=DeprecationWarning, categories=None,
11571 - settings=None):
11572 -
11573 - if settings is None:
11574 - settings = portage.settings
11575 -
11576 - if root is not None and root != settings['ROOT']:
11577 - warnings.warn("The 'root' parameter of the "
11578 - "portage.dbapi.vartree.vartree"
11579 - " constructor is now unused. Use "
11580 - "settings['ROOT'] instead.",
11581 - DeprecationWarning, stacklevel=2)
11582 -
11583 - if virtual is not DeprecationWarning:
11584 - warnings.warn("The 'virtual' parameter of the "
11585 - "portage.dbapi.vartree.vartree"
11586 - " constructor is unused",
11587 - DeprecationWarning, stacklevel=2)
11588 -
11589 - self.settings = settings
11590 - self.dbapi = vardbapi(settings=settings, vartree=self)
11591 - self.populated = 1
11592 -
11593 - @property
11594 - def root(self):
11595 - warnings.warn("The root attribute of "
11596 - "portage.dbapi.vartree.vartree"
11597 - " is deprecated. Use "
11598 - "settings['ROOT'] instead.",
11599 - DeprecationWarning, stacklevel=3)
11600 - return self.settings['ROOT']
11601 -
11602 - def getpath(self, mykey, filename=None):
11603 - return self.dbapi.getpath(mykey, filename=filename)
11604 -
11605 - def zap(self, mycpv):
11606 - return
11607 -
11608 - def inject(self, mycpv):
11609 - return
11610 -
11611 - def get_provide(self, mycpv):
11612 - return []
11613 -
11614 - def get_all_provides(self):
11615 - return {}
11616 -
11617 - def dep_bestmatch(self, mydep, use_cache=1):
11618 - "compatibility method -- all matches, not just visible ones"
11619 - #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
11620 - mymatch = best(self.dbapi.match(
11621 - dep_expand(mydep, mydb=self.dbapi, settings=self.settings),
11622 - use_cache=use_cache))
11623 - if mymatch is None:
11624 - return ""
11625 - return mymatch
11626 -
11627 - def dep_match(self, mydep, use_cache=1):
11628 - "compatibility method -- we want to see all matches, not just visible ones"
11629 - #mymatch = match(mydep,self.dbapi)
11630 - mymatch = self.dbapi.match(mydep, use_cache=use_cache)
11631 - if mymatch is None:
11632 - return []
11633 - return mymatch
11634 -
11635 - def exists_specific(self, cpv):
11636 - return self.dbapi.cpv_exists(cpv)
11637 -
11638 - def getallcpv(self):
11639 - """temporary function, probably to be renamed --- Gets a list of all
11640 - category/package-versions installed on the system."""
11641 - return self.dbapi.cpv_all()
11642 -
11643 - def getallnodes(self):
11644 - """new behavior: these are all *unmasked* nodes. There may or may not be available
11645 - masked package for nodes in this nodes list."""
11646 - return self.dbapi.cp_all()
11647 -
11648 - def getebuildpath(self, fullpackage):
11649 - cat, package = catsplit(fullpackage)
11650 - return self.getpath(fullpackage, filename=package+".ebuild")
11651 -
11652 - def getslot(self, mycatpkg):
11653 - "Get a slot for a catpkg; assume it exists."
11654 - try:
11655 - return self.dbapi._pkg_str(mycatpkg, None).slot
11656 - except KeyError:
11657 - return ""
11658 -
11659 - def populate(self):
11660 - self.populated=1
11661 + "this tree will scan a var/db/pkg database located at root (passed to init)"
11662 +
11663 + def __init__(
11664 + self, root=None, virtual=DeprecationWarning, categories=None, settings=None
11665 + ):
11666 +
11667 + if settings is None:
11668 + settings = portage.settings
11669 +
11670 + if root is not None and root != settings["ROOT"]:
11671 + warnings.warn(
11672 + "The 'root' parameter of the "
11673 + "portage.dbapi.vartree.vartree"
11674 + " constructor is now unused. Use "
11675 + "settings['ROOT'] instead.",
11676 + DeprecationWarning,
11677 + stacklevel=2,
11678 + )
11679 +
11680 + if virtual is not DeprecationWarning:
11681 + warnings.warn(
11682 + "The 'virtual' parameter of the "
11683 + "portage.dbapi.vartree.vartree"
11684 + " constructor is unused",
11685 + DeprecationWarning,
11686 + stacklevel=2,
11687 + )
11688 +
11689 + self.settings = settings
11690 + self.dbapi = vardbapi(settings=settings, vartree=self)
11691 + self.populated = 1
11692 +
11693 + @property
11694 + def root(self):
11695 + warnings.warn(
11696 + "The root attribute of "
11697 + "portage.dbapi.vartree.vartree"
11698 + " is deprecated. Use "
11699 + "settings['ROOT'] instead.",
11700 + DeprecationWarning,
11701 + stacklevel=3,
11702 + )
11703 + return self.settings["ROOT"]
11704 +
11705 + def getpath(self, mykey, filename=None):
11706 + return self.dbapi.getpath(mykey, filename=filename)
11707 +
11708 + def zap(self, mycpv):
11709 + return
11710 +
11711 + def inject(self, mycpv):
11712 + return
11713 +
11714 + def get_provide(self, mycpv):
11715 + return []
11716 +
11717 + def get_all_provides(self):
11718 + return {}
11719 +
11720 + def dep_bestmatch(self, mydep, use_cache=1):
11721 + "compatibility method -- all matches, not just visible ones"
11722 + # mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
11723 + mymatch = best(
11724 + self.dbapi.match(
11725 + dep_expand(mydep, mydb=self.dbapi, settings=self.settings),
11726 + use_cache=use_cache,
11727 + )
11728 + )
11729 + if mymatch is None:
11730 + return ""
11731 + return mymatch
11732 +
11733 + def dep_match(self, mydep, use_cache=1):
11734 + "compatibility method -- we want to see all matches, not just visible ones"
11735 + # mymatch = match(mydep,self.dbapi)
11736 + mymatch = self.dbapi.match(mydep, use_cache=use_cache)
11737 + if mymatch is None:
11738 + return []
11739 + return mymatch
11740 +
11741 + def exists_specific(self, cpv):
11742 + return self.dbapi.cpv_exists(cpv)
11743 +
11744 + def getallcpv(self):
11745 + """temporary function, probably to be renamed --- Gets a list of all
11746 + category/package-versions installed on the system."""
11747 + return self.dbapi.cpv_all()
11748 +
11749 + def getallnodes(self):
11750 + """new behavior: these are all *unmasked* nodes. There may or may not be available
11751 + masked package for nodes in this nodes list."""
11752 + return self.dbapi.cp_all()
11753 +
11754 + def getebuildpath(self, fullpackage):
11755 + cat, package = catsplit(fullpackage)
11756 + return self.getpath(fullpackage, filename=package + ".ebuild")
11757 +
11758 + def getslot(self, mycatpkg):
11759 + "Get a slot for a catpkg; assume it exists."
11760 + try:
11761 + return self.dbapi._pkg_str(mycatpkg, None).slot
11762 + except KeyError:
11763 + return ""
11764 +
11765 + def populate(self):
11766 + self.populated = 1
11767 +
11768
11769 class dblink:
11770 - """
11771 - This class provides an interface to the installed package database
11772 - At present this is implemented as a text backend in /var/db/pkg.
11773 - """
11774 -
11775 - _normalize_needed = re.compile(r'//|^[^/]|./$|(^|/)\.\.?(/|$)')
11776 -
11777 - _contents_re = re.compile(r'^(' + \
11778 - r'(?P<dir>(dev|dir|fif) (.+))|' + \
11779 - r'(?P<obj>(obj) (.+) (\S+) (\d+))|' + \
11780 - r'(?P<sym>(sym) (.+) -> (.+) ((\d+)|(?P<oldsym>(' + \
11781 - r'\(\d+, \d+L, \d+L, \d+, \d+, \d+, \d+L, \d+, (\d+), \d+\)))))' + \
11782 - r')$'
11783 - )
11784 -
11785 - # These files are generated by emerge, so we need to remove
11786 - # them when they are the only thing left in a directory.
11787 - _infodir_cleanup = frozenset(["dir", "dir.old"])
11788 -
11789 - _ignored_unlink_errnos = (
11790 - errno.EBUSY, errno.ENOENT,
11791 - errno.ENOTDIR, errno.EISDIR)
11792 -
11793 - _ignored_rmdir_errnos = (
11794 - errno.EEXIST, errno.ENOTEMPTY,
11795 - errno.EBUSY, errno.ENOENT,
11796 - errno.ENOTDIR, errno.EISDIR,
11797 - errno.EPERM)
11798 -
11799 - def __init__(self, cat, pkg, myroot=None, settings=None, treetype=None,
11800 - vartree=None, blockers=None, scheduler=None, pipe=None):
11801 - """
11802 - Creates a DBlink object for a given CPV.
11803 - The given CPV may not be present in the database already.
11804 -
11805 - @param cat: Category
11806 - @type cat: String
11807 - @param pkg: Package (PV)
11808 - @type pkg: String
11809 - @param myroot: ignored, settings['ROOT'] is used instead
11810 - @type myroot: String (Path)
11811 - @param settings: Typically portage.settings
11812 - @type settings: portage.config
11813 - @param treetype: one of ['porttree','bintree','vartree']
11814 - @type treetype: String
11815 - @param vartree: an instance of vartree corresponding to myroot.
11816 - @type vartree: vartree
11817 - """
11818 -
11819 - if settings is None:
11820 - raise TypeError("settings argument is required")
11821 -
11822 - mysettings = settings
11823 - self._eroot = mysettings['EROOT']
11824 - self.cat = cat
11825 - self.pkg = pkg
11826 - self.mycpv = self.cat + "/" + self.pkg
11827 - if self.mycpv == settings.mycpv and \
11828 - isinstance(settings.mycpv, _pkg_str):
11829 - self.mycpv = settings.mycpv
11830 - else:
11831 - self.mycpv = _pkg_str(self.mycpv)
11832 - self.mysplit = list(self.mycpv.cpv_split[1:])
11833 - self.mysplit[0] = self.mycpv.cp
11834 - self.treetype = treetype
11835 - if vartree is None:
11836 - vartree = portage.db[self._eroot]["vartree"]
11837 - self.vartree = vartree
11838 - self._blockers = blockers
11839 - self._scheduler = scheduler
11840 - self.dbroot = normalize_path(os.path.join(self._eroot, VDB_PATH))
11841 - self.dbcatdir = self.dbroot+"/"+cat
11842 - self.dbpkgdir = self.dbcatdir+"/"+pkg
11843 - self.dbtmpdir = self.dbcatdir+"/"+MERGING_IDENTIFIER+pkg
11844 - self.dbdir = self.dbpkgdir
11845 - self.settings = mysettings
11846 - self._verbose = self.settings.get("PORTAGE_VERBOSE") == "1"
11847 -
11848 - self.myroot = self.settings['ROOT']
11849 - self._installed_instance = None
11850 - self.contentscache = None
11851 - self._contents_inodes = None
11852 - self._contents_basenames = None
11853 - self._linkmap_broken = False
11854 - self._device_path_map = {}
11855 - self._hardlink_merge_map = {}
11856 - self._hash_key = (self._eroot, self.mycpv)
11857 - self._protect_obj = None
11858 - self._pipe = pipe
11859 - self._postinst_failure = False
11860 -
11861 - # When necessary, this attribute is modified for
11862 - # compliance with RESTRICT=preserve-libs.
11863 - self._preserve_libs = "preserve-libs" in mysettings.features
11864 - self._contents = ContentsCaseSensitivityManager(self)
11865 - self._slot_locks = []
11866 -
11867 - def __hash__(self):
11868 - return hash(self._hash_key)
11869 -
11870 - def __eq__(self, other):
11871 - return isinstance(other, dblink) and \
11872 - self._hash_key == other._hash_key
11873 -
11874 - def _get_protect_obj(self):
11875 -
11876 - if self._protect_obj is None:
11877 - self._protect_obj = ConfigProtect(self._eroot,
11878 - portage.util.shlex_split(
11879 - self.settings.get("CONFIG_PROTECT", "")),
11880 - portage.util.shlex_split(
11881 - self.settings.get("CONFIG_PROTECT_MASK", "")),
11882 - case_insensitive=("case-insensitive-fs"
11883 - in self.settings.features))
11884 -
11885 - return self._protect_obj
11886 -
11887 - def isprotected(self, obj):
11888 - return self._get_protect_obj().isprotected(obj)
11889 -
11890 - def updateprotect(self):
11891 - self._get_protect_obj().updateprotect()
11892 -
11893 - def lockdb(self):
11894 - self.vartree.dbapi.lock()
11895 -
11896 - def unlockdb(self):
11897 - self.vartree.dbapi.unlock()
11898 -
11899 - def _slot_locked(f):
11900 - """
11901 - A decorator function which, when parallel-install is enabled,
11902 - acquires and releases slot locks for the current package and
11903 - blocked packages. This is required in order to account for
11904 - interactions with blocked packages (involving resolution of
11905 - file collisions).
11906 - """
11907 - def wrapper(self, *args, **kwargs):
11908 - if "parallel-install" in self.settings.features:
11909 - self._acquire_slot_locks(
11910 - kwargs.get("mydbapi", self.vartree.dbapi))
11911 - try:
11912 - return f(self, *args, **kwargs)
11913 - finally:
11914 - self._release_slot_locks()
11915 - return wrapper
11916 -
11917 - def _acquire_slot_locks(self, db):
11918 - """
11919 - Acquire slot locks for the current package and blocked packages.
11920 - """
11921 -
11922 - slot_atoms = []
11923 -
11924 - try:
11925 - slot = self.mycpv.slot
11926 - except AttributeError:
11927 - slot, = db.aux_get(self.mycpv, ["SLOT"])
11928 - slot = slot.partition("/")[0]
11929 -
11930 - slot_atoms.append(portage.dep.Atom(
11931 - "%s:%s" % (self.mycpv.cp, slot)))
11932 -
11933 - for blocker in self._blockers or []:
11934 - slot_atoms.append(blocker.slot_atom)
11935 -
11936 - # Sort atoms so that locks are acquired in a predictable
11937 - # order, preventing deadlocks with competitors that may
11938 - # be trying to acquire overlapping locks.
11939 - slot_atoms.sort()
11940 - for slot_atom in slot_atoms:
11941 - self.vartree.dbapi._slot_lock(slot_atom)
11942 - self._slot_locks.append(slot_atom)
11943 -
11944 - def _release_slot_locks(self):
11945 - """
11946 - Release all slot locks.
11947 - """
11948 - while self._slot_locks:
11949 - self.vartree.dbapi._slot_unlock(self._slot_locks.pop())
11950 -
11951 - def getpath(self):
11952 - "return path to location of db information (for >>> informational display)"
11953 - return self.dbdir
11954 -
11955 - def exists(self):
11956 - "does the db entry exist? boolean."
11957 - return os.path.exists(self.dbdir)
11958 -
11959 - def delete(self):
11960 - """
11961 - Remove this entry from the database
11962 - """
11963 - try:
11964 - os.lstat(self.dbdir)
11965 - except OSError as e:
11966 - if e.errno not in (errno.ENOENT, errno.ENOTDIR, errno.ESTALE):
11967 - raise
11968 - return
11969 -
11970 - # Check validity of self.dbdir before attempting to remove it.
11971 - if not self.dbdir.startswith(self.dbroot):
11972 - writemsg(_("portage.dblink.delete(): invalid dbdir: %s\n") % \
11973 - self.dbdir, noiselevel=-1)
11974 - return
11975 -
11976 - if self.dbdir is self.dbpkgdir:
11977 - counter, = self.vartree.dbapi.aux_get(
11978 - self.mycpv, ["COUNTER"])
11979 - self.vartree.dbapi._cache_delta.recordEvent(
11980 - "remove", self.mycpv,
11981 - self.settings["SLOT"].split("/")[0], counter)
11982 -
11983 - shutil.rmtree(self.dbdir)
11984 - # If empty, remove parent category directory.
11985 - try:
11986 - os.rmdir(os.path.dirname(self.dbdir))
11987 - except OSError:
11988 - pass
11989 - self.vartree.dbapi._remove(self)
11990 -
11991 - # Use self.dbroot since we need an existing path for syncfs.
11992 - try:
11993 - self._merged_path(self.dbroot, os.lstat(self.dbroot))
11994 - except OSError:
11995 - pass
11996 -
11997 - self._post_merge_sync()
11998 -
11999 - def clearcontents(self):
12000 - """
12001 - For a given db entry (self), erase the CONTENTS values.
12002 - """
12003 - self.lockdb()
12004 - try:
12005 - if os.path.exists(self.dbdir+"/CONTENTS"):
12006 - os.unlink(self.dbdir+"/CONTENTS")
12007 - finally:
12008 - self.unlockdb()
12009 -
12010 - def _clear_contents_cache(self):
12011 - self.contentscache = None
12012 - self._contents_inodes = None
12013 - self._contents_basenames = None
12014 - self._contents.clear_cache()
12015 -
12016 - def getcontents(self):
12017 - """
12018 - Get the installed files of a given package (aka what that package installed)
12019 - """
12020 - if self.contentscache is not None:
12021 - return self.contentscache
12022 - contents_file = os.path.join(self.dbdir, "CONTENTS")
12023 - pkgfiles = {}
12024 - try:
12025 - with io.open(_unicode_encode(contents_file,
12026 - encoding=_encodings['fs'], errors='strict'),
12027 - mode='r', encoding=_encodings['repo.content'],
12028 - errors='replace') as f:
12029 - mylines = f.readlines()
12030 - except EnvironmentError as e:
12031 - if e.errno != errno.ENOENT:
12032 - raise
12033 - del e
12034 - self.contentscache = pkgfiles
12035 - return pkgfiles
12036 -
12037 - null_byte = "\0"
12038 - normalize_needed = self._normalize_needed
12039 - contents_re = self._contents_re
12040 - obj_index = contents_re.groupindex['obj']
12041 - dir_index = contents_re.groupindex['dir']
12042 - sym_index = contents_re.groupindex['sym']
12043 - # The old symlink format may exist on systems that have packages
12044 - # which were installed many years ago (see bug #351814).
12045 - oldsym_index = contents_re.groupindex['oldsym']
12046 - # CONTENTS files already contain EPREFIX
12047 - myroot = self.settings['ROOT']
12048 - if myroot == os.path.sep:
12049 - myroot = None
12050 - # used to generate parent dir entries
12051 - dir_entry = ("dir",)
12052 - eroot_split_len = len(self.settings["EROOT"].split(os.sep)) - 1
12053 - pos = 0
12054 - errors = []
12055 - for pos, line in enumerate(mylines):
12056 - if null_byte in line:
12057 - # Null bytes are a common indication of corruption.
12058 - errors.append((pos + 1, _("Null byte found in CONTENTS entry")))
12059 - continue
12060 - line = line.rstrip("\n")
12061 - m = contents_re.match(line)
12062 - if m is None:
12063 - errors.append((pos + 1, _("Unrecognized CONTENTS entry")))
12064 - continue
12065 -
12066 - if m.group(obj_index) is not None:
12067 - base = obj_index
12068 - #format: type, mtime, md5sum
12069 - data = (m.group(base+1), m.group(base+4), m.group(base+3))
12070 - elif m.group(dir_index) is not None:
12071 - base = dir_index
12072 - #format: type
12073 - data = (m.group(base+1),)
12074 - elif m.group(sym_index) is not None:
12075 - base = sym_index
12076 - if m.group(oldsym_index) is None:
12077 - mtime = m.group(base+5)
12078 - else:
12079 - mtime = m.group(base+8)
12080 - #format: type, mtime, dest
12081 - data = (m.group(base+1), mtime, m.group(base+3))
12082 - else:
12083 - # This won't happen as long the regular expression
12084 - # is written to only match valid entries.
12085 - raise AssertionError(_("required group not found " + \
12086 - "in CONTENTS entry: '%s'") % line)
12087 -
12088 - path = m.group(base+2)
12089 - if normalize_needed.search(path) is not None:
12090 - path = normalize_path(path)
12091 - if not path.startswith(os.path.sep):
12092 - path = os.path.sep + path
12093 -
12094 - if myroot is not None:
12095 - path = os.path.join(myroot, path.lstrip(os.path.sep))
12096 -
12097 - # Implicitly add parent directories, since we can't necessarily
12098 - # assume that they are explicitly listed in CONTENTS, and it's
12099 - # useful for callers if they can rely on parent directory entries
12100 - # being generated here (crucial for things like dblink.isowner()).
12101 - path_split = path.split(os.sep)
12102 - path_split.pop()
12103 - while len(path_split) > eroot_split_len:
12104 - parent = os.sep.join(path_split)
12105 - if parent in pkgfiles:
12106 - break
12107 - pkgfiles[parent] = dir_entry
12108 - path_split.pop()
12109 -
12110 - pkgfiles[path] = data
12111 -
12112 - if errors:
12113 - writemsg(_("!!! Parse error in '%s'\n") % contents_file, noiselevel=-1)
12114 - for pos, e in errors:
12115 - writemsg(_("!!! line %d: %s\n") % (pos, e), noiselevel=-1)
12116 - self.contentscache = pkgfiles
12117 - return pkgfiles
12118 -
12119 - def quickpkg(self, output_file, include_config=False, include_unmodified_config=False):
12120 - """
12121 - Create a tar file appropriate for use by quickpkg.
12122 -
12123 - @param output_file: Write binary tar stream to file.
12124 - @type output_file: file
12125 - @param include_config: Include all files protected by CONFIG_PROTECT
12126 - (as a security precaution, default is False).
12127 - @type include_config: bool
12128 - @param include_unmodified_config: Include files protected by CONFIG_PROTECT
12129 - that have not been modified since installation (as a security precaution,
12130 - default is False).
12131 - @type include_unmodified_config: bool
12132 - @rtype: list
12133 - @return: Paths of protected configuration files which have been omitted.
12134 - """
12135 - settings = self.settings
12136 - cpv = self.mycpv
12137 - xattrs = 'xattr' in settings.features
12138 - contents = self.getcontents()
12139 - excluded_config_files = []
12140 - protect = None
12141 -
12142 - if not include_config:
12143 - confprot = ConfigProtect(settings['EROOT'],
12144 - portage.util.shlex_split(settings.get('CONFIG_PROTECT', '')),
12145 - portage.util.shlex_split(settings.get('CONFIG_PROTECT_MASK', '')),
12146 - case_insensitive=('case-insensitive-fs' in settings.features))
12147 -
12148 - def protect(filename):
12149 - if not confprot.isprotected(filename):
12150 - return False
12151 - if include_unmodified_config:
12152 - file_data = contents[filename]
12153 - if file_data[0] == 'obj':
12154 - orig_md5 = file_data[2].lower()
12155 - cur_md5 = perform_md5(filename, calc_prelink=1)
12156 - if orig_md5 == cur_md5:
12157 - return False
12158 - excluded_config_files.append(filename)
12159 - return True
12160 -
12161 - # The tarfile module will write pax headers holding the
12162 - # xattrs only if PAX_FORMAT is specified here.
12163 - with tarfile.open(fileobj=output_file, mode='w|',
12164 - format=tarfile.PAX_FORMAT if xattrs else tarfile.DEFAULT_FORMAT) as tar:
12165 - tar_contents(contents, settings['ROOT'], tar, protect=protect, xattrs=xattrs)
12166 -
12167 - return excluded_config_files
12168 -
12169 - def _prune_plib_registry(self, unmerge=False,
12170 - needed=None, preserve_paths=None):
12171 - # remove preserved libraries that don't have any consumers left
12172 - if not (self._linkmap_broken or
12173 - self.vartree.dbapi._linkmap is None or
12174 - self.vartree.dbapi._plib_registry is None):
12175 - self.vartree.dbapi._fs_lock()
12176 - plib_registry = self.vartree.dbapi._plib_registry
12177 - plib_registry.lock()
12178 - try:
12179 - plib_registry.load()
12180 -
12181 - unmerge_with_replacement = \
12182 - unmerge and preserve_paths is not None
12183 - if unmerge_with_replacement:
12184 - # If self.mycpv is about to be unmerged and we
12185 - # have a replacement package, we want to exclude
12186 - # the irrelevant NEEDED data that belongs to
12187 - # files which are being unmerged now.
12188 - exclude_pkgs = (self.mycpv,)
12189 - else:
12190 - exclude_pkgs = None
12191 -
12192 - self._linkmap_rebuild(exclude_pkgs=exclude_pkgs,
12193 - include_file=needed, preserve_paths=preserve_paths)
12194 -
12195 - if unmerge:
12196 - unmerge_preserve = None
12197 - if not unmerge_with_replacement:
12198 - unmerge_preserve = \
12199 - self._find_libs_to_preserve(unmerge=True)
12200 - counter = self.vartree.dbapi.cpv_counter(self.mycpv)
12201 - try:
12202 - slot = self.mycpv.slot
12203 - except AttributeError:
12204 - slot = _pkg_str(self.mycpv, slot=self.settings["SLOT"]).slot
12205 - plib_registry.unregister(self.mycpv, slot, counter)
12206 - if unmerge_preserve:
12207 - for path in sorted(unmerge_preserve):
12208 - contents_key = self._match_contents(path)
12209 - if not contents_key:
12210 - continue
12211 - obj_type = self.getcontents()[contents_key][0]
12212 - self._display_merge(_(">>> needed %s %s\n") % \
12213 - (obj_type, contents_key), noiselevel=-1)
12214 - plib_registry.register(self.mycpv,
12215 - slot, counter, unmerge_preserve)
12216 - # Remove the preserved files from our contents
12217 - # so that they won't be unmerged.
12218 - self.vartree.dbapi.removeFromContents(self,
12219 - unmerge_preserve)
12220 -
12221 - unmerge_no_replacement = \
12222 - unmerge and not unmerge_with_replacement
12223 - cpv_lib_map = self._find_unused_preserved_libs(
12224 - unmerge_no_replacement)
12225 - if cpv_lib_map:
12226 - self._remove_preserved_libs(cpv_lib_map)
12227 - self.vartree.dbapi.lock()
12228 - try:
12229 - for cpv, removed in cpv_lib_map.items():
12230 - if not self.vartree.dbapi.cpv_exists(cpv):
12231 - continue
12232 - self.vartree.dbapi.removeFromContents(cpv, removed)
12233 - finally:
12234 - self.vartree.dbapi.unlock()
12235 -
12236 - plib_registry.store()
12237 - finally:
12238 - plib_registry.unlock()
12239 - self.vartree.dbapi._fs_unlock()
12240 -
12241 - @_slot_locked
12242 - def unmerge(self, pkgfiles=None, trimworld=None, cleanup=True,
12243 - ldpath_mtimes=None, others_in_slot=None, needed=None,
12244 - preserve_paths=None):
12245 - """
12246 - Calls prerm
12247 - Unmerges a given package (CPV)
12248 - calls postrm
12249 - calls cleanrm
12250 - calls env_update
12251 -
12252 - @param pkgfiles: files to unmerge (generally self.getcontents() )
12253 - @type pkgfiles: Dictionary
12254 - @param trimworld: Unused
12255 - @type trimworld: Boolean
12256 - @param cleanup: cleanup to pass to doebuild (see doebuild)
12257 - @type cleanup: Boolean
12258 - @param ldpath_mtimes: mtimes to pass to env_update (see env_update)
12259 - @type ldpath_mtimes: Dictionary
12260 - @param others_in_slot: all dblink instances in this slot, excluding self
12261 - @type others_in_slot: list
12262 - @param needed: Filename containing libraries needed after unmerge.
12263 - @type needed: String
12264 - @param preserve_paths: Libraries preserved by a package instance that
12265 - is currently being merged. They need to be explicitly passed to the
12266 - LinkageMap, since they are not registered in the
12267 - PreservedLibsRegistry yet.
12268 - @type preserve_paths: set
12269 - @rtype: Integer
12270 - @return:
12271 - 1. os.EX_OK if everything went well.
12272 - 2. return code of the failed phase (for prerm, postrm, cleanrm)
12273 - """
12274 -
12275 - if trimworld is not None:
12276 - warnings.warn("The trimworld parameter of the " + \
12277 - "portage.dbapi.vartree.dblink.unmerge()" + \
12278 - " method is now unused.",
12279 - DeprecationWarning, stacklevel=2)
12280 -
12281 - background = False
12282 - log_path = self.settings.get("PORTAGE_LOG_FILE")
12283 - if self._scheduler is None:
12284 - # We create a scheduler instance and use it to
12285 - # log unmerge output separately from merge output.
12286 - self._scheduler = SchedulerInterface(asyncio._safe_loop())
12287 - if self.settings.get("PORTAGE_BACKGROUND") == "subprocess":
12288 - if self.settings.get("PORTAGE_BACKGROUND_UNMERGE") == "1":
12289 - self.settings["PORTAGE_BACKGROUND"] = "1"
12290 - self.settings.backup_changes("PORTAGE_BACKGROUND")
12291 - background = True
12292 - elif self.settings.get("PORTAGE_BACKGROUND_UNMERGE") == "0":
12293 - self.settings["PORTAGE_BACKGROUND"] = "0"
12294 - self.settings.backup_changes("PORTAGE_BACKGROUND")
12295 - elif self.settings.get("PORTAGE_BACKGROUND") == "1":
12296 - background = True
12297 -
12298 - self.vartree.dbapi._bump_mtime(self.mycpv)
12299 - showMessage = self._display_merge
12300 - if self.vartree.dbapi._categories is not None:
12301 - self.vartree.dbapi._categories = None
12302 -
12303 - # When others_in_slot is not None, the backup has already been
12304 - # handled by the caller.
12305 - caller_handles_backup = others_in_slot is not None
12306 -
12307 - # When others_in_slot is supplied, the security check has already been
12308 - # done for this slot, so it shouldn't be repeated until the next
12309 - # replacement or unmerge operation.
12310 - if others_in_slot is None:
12311 - slot = self.vartree.dbapi._pkg_str(self.mycpv, None).slot
12312 - slot_matches = self.vartree.dbapi.match(
12313 - "%s:%s" % (portage.cpv_getkey(self.mycpv), slot))
12314 - others_in_slot = []
12315 - for cur_cpv in slot_matches:
12316 - if cur_cpv == self.mycpv:
12317 - continue
12318 - others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
12319 - settings=self.settings, vartree=self.vartree,
12320 - treetype="vartree", pipe=self._pipe))
12321 -
12322 - retval = self._security_check([self] + others_in_slot)
12323 - if retval:
12324 - return retval
12325 -
12326 - contents = self.getcontents()
12327 - # Now, don't assume that the name of the ebuild is the same as the
12328 - # name of the dir; the package may have been moved.
12329 - myebuildpath = os.path.join(self.dbdir, self.pkg + ".ebuild")
12330 - failures = 0
12331 - ebuild_phase = "prerm"
12332 - mystuff = os.listdir(self.dbdir)
12333 - for x in mystuff:
12334 - if x.endswith(".ebuild"):
12335 - if x[:-7] != self.pkg:
12336 - # Clean up after vardbapi.move_ent() breakage in
12337 - # portage versions before 2.1.2
12338 - os.rename(os.path.join(self.dbdir, x), myebuildpath)
12339 - write_atomic(os.path.join(self.dbdir, "PF"), self.pkg+"\n")
12340 - break
12341 -
12342 - if self.mycpv != self.settings.mycpv or \
12343 - "EAPI" not in self.settings.configdict["pkg"]:
12344 - # We avoid a redundant setcpv call here when
12345 - # the caller has already taken care of it.
12346 - self.settings.setcpv(self.mycpv, mydb=self.vartree.dbapi)
12347 -
12348 - eapi_unsupported = False
12349 - try:
12350 - doebuild_environment(myebuildpath, "prerm",
12351 - settings=self.settings, db=self.vartree.dbapi)
12352 - except UnsupportedAPIException as e:
12353 - eapi_unsupported = e
12354 -
12355 - if self._preserve_libs and "preserve-libs" in \
12356 - self.settings["PORTAGE_RESTRICT"].split():
12357 - self._preserve_libs = False
12358 -
12359 - builddir_lock = None
12360 - scheduler = self._scheduler
12361 - retval = os.EX_OK
12362 - try:
12363 - # Only create builddir_lock if the caller
12364 - # has not already acquired the lock.
12365 - if "PORTAGE_BUILDDIR_LOCKED" not in self.settings:
12366 - builddir_lock = EbuildBuildDir(
12367 - scheduler=scheduler,
12368 - settings=self.settings)
12369 - scheduler.run_until_complete(builddir_lock.async_lock())
12370 - prepare_build_dirs(settings=self.settings, cleanup=True)
12371 - log_path = self.settings.get("PORTAGE_LOG_FILE")
12372 -
12373 - # Do this before the following _prune_plib_registry call, since
12374 - # that removes preserved libraries from our CONTENTS, and we
12375 - # may want to backup those libraries first.
12376 - if not caller_handles_backup:
12377 - retval = self._pre_unmerge_backup(background)
12378 - if retval != os.EX_OK:
12379 - showMessage(_("!!! FAILED prerm: quickpkg: %s\n") % retval,
12380 - level=logging.ERROR, noiselevel=-1)
12381 - return retval
12382 -
12383 - self._prune_plib_registry(unmerge=True, needed=needed,
12384 - preserve_paths=preserve_paths)
12385 -
12386 - # Log the error after PORTAGE_LOG_FILE is initialized
12387 - # by prepare_build_dirs above.
12388 - if eapi_unsupported:
12389 - # Sometimes this happens due to corruption of the EAPI file.
12390 - failures += 1
12391 - showMessage(_("!!! FAILED prerm: %s\n") % \
12392 - os.path.join(self.dbdir, "EAPI"),
12393 - level=logging.ERROR, noiselevel=-1)
12394 - showMessage("%s\n" % (eapi_unsupported,),
12395 - level=logging.ERROR, noiselevel=-1)
12396 - elif os.path.isfile(myebuildpath):
12397 - phase = EbuildPhase(background=background,
12398 - phase=ebuild_phase, scheduler=scheduler,
12399 - settings=self.settings)
12400 - phase.start()
12401 - retval = phase.wait()
12402 -
12403 - # XXX: Decide how to handle failures here.
12404 - if retval != os.EX_OK:
12405 - failures += 1
12406 - showMessage(_("!!! FAILED prerm: %s\n") % retval,
12407 - level=logging.ERROR, noiselevel=-1)
12408 -
12409 - self.vartree.dbapi._fs_lock()
12410 - try:
12411 - self._unmerge_pkgfiles(pkgfiles, others_in_slot)
12412 - finally:
12413 - self.vartree.dbapi._fs_unlock()
12414 - self._clear_contents_cache()
12415 -
12416 - if not eapi_unsupported and os.path.isfile(myebuildpath):
12417 - ebuild_phase = "postrm"
12418 - phase = EbuildPhase(background=background,
12419 - phase=ebuild_phase, scheduler=scheduler,
12420 - settings=self.settings)
12421 - phase.start()
12422 - retval = phase.wait()
12423 -
12424 - # XXX: Decide how to handle failures here.
12425 - if retval != os.EX_OK:
12426 - failures += 1
12427 - showMessage(_("!!! FAILED postrm: %s\n") % retval,
12428 - level=logging.ERROR, noiselevel=-1)
12429 -
12430 - finally:
12431 - self.vartree.dbapi._bump_mtime(self.mycpv)
12432 - try:
12433 - if not eapi_unsupported and os.path.isfile(myebuildpath):
12434 - if retval != os.EX_OK:
12435 - msg_lines = []
12436 - msg = _("The '%(ebuild_phase)s' "
12437 - "phase of the '%(cpv)s' package "
12438 - "has failed with exit value %(retval)s.") % \
12439 - {"ebuild_phase":ebuild_phase, "cpv":self.mycpv,
12440 - "retval":retval}
12441 - from textwrap import wrap
12442 - msg_lines.extend(wrap(msg, 72))
12443 - msg_lines.append("")
12444 -
12445 - ebuild_name = os.path.basename(myebuildpath)
12446 - ebuild_dir = os.path.dirname(myebuildpath)
12447 - msg = _("The problem occurred while executing "
12448 - "the ebuild file named '%(ebuild_name)s' "
12449 - "located in the '%(ebuild_dir)s' directory. "
12450 - "If necessary, manually remove "
12451 - "the environment.bz2 file and/or the "
12452 - "ebuild file located in that directory.") % \
12453 - {"ebuild_name":ebuild_name, "ebuild_dir":ebuild_dir}
12454 - msg_lines.extend(wrap(msg, 72))
12455 - msg_lines.append("")
12456 -
12457 - msg = _("Removal "
12458 - "of the environment.bz2 file is "
12459 - "preferred since it may allow the "
12460 - "removal phases to execute successfully. "
12461 - "The ebuild will be "
12462 - "sourced and the eclasses "
12463 - "from the current ebuild repository will be used "
12464 - "when necessary. Removal of "
12465 - "the ebuild file will cause the "
12466 - "pkg_prerm() and pkg_postrm() removal "
12467 - "phases to be skipped entirely.")
12468 - msg_lines.extend(wrap(msg, 72))
12469 -
12470 - self._eerror(ebuild_phase, msg_lines)
12471 -
12472 - self._elog_process(phasefilter=("prerm", "postrm"))
12473 -
12474 - if retval == os.EX_OK:
12475 - try:
12476 - doebuild_environment(myebuildpath, "cleanrm",
12477 - settings=self.settings, db=self.vartree.dbapi)
12478 - except UnsupportedAPIException:
12479 - pass
12480 - phase = EbuildPhase(background=background,
12481 - phase="cleanrm", scheduler=scheduler,
12482 - settings=self.settings)
12483 - phase.start()
12484 - retval = phase.wait()
12485 - finally:
12486 - if builddir_lock is not None:
12487 - scheduler.run_until_complete(
12488 - builddir_lock.async_unlock())
12489 -
12490 - if log_path is not None:
12491 -
12492 - if not failures and 'unmerge-logs' not in self.settings.features:
12493 - try:
12494 - os.unlink(log_path)
12495 - except OSError:
12496 - pass
12497 -
12498 - try:
12499 - st = os.stat(log_path)
12500 - except OSError:
12501 - pass
12502 - else:
12503 - if st.st_size == 0:
12504 - try:
12505 - os.unlink(log_path)
12506 - except OSError:
12507 - pass
12508 -
12509 - if log_path is not None and os.path.exists(log_path):
12510 - # Restore this since it gets lost somewhere above and it
12511 - # needs to be set for _display_merge() to be able to log.
12512 - # Note that the log isn't necessarily supposed to exist
12513 - # since if PORTAGE_LOGDIR is unset then it's a temp file
12514 - # so it gets cleaned above.
12515 - self.settings["PORTAGE_LOG_FILE"] = log_path
12516 - else:
12517 - self.settings.pop("PORTAGE_LOG_FILE", None)
12518 -
12519 - env_update(target_root=self.settings['ROOT'],
12520 - prev_mtimes=ldpath_mtimes,
12521 - contents=contents, env=self.settings,
12522 - writemsg_level=self._display_merge, vardbapi=self.vartree.dbapi)
12523 -
12524 - unmerge_with_replacement = preserve_paths is not None
12525 - if not unmerge_with_replacement:
12526 - # When there's a replacement package which calls us via treewalk,
12527 - # treewalk will automatically call _prune_plib_registry for us.
12528 - # Otherwise, we need to call _prune_plib_registry ourselves.
12529 - # Don't pass in the "unmerge=True" flag here, since that flag
12530 - # is intended to be used _prior_ to unmerge, not after.
12531 - self._prune_plib_registry()
12532 -
12533 - return os.EX_OK
12534 -
12535 - def _display_merge(self, msg, level=0, noiselevel=0):
12536 - if not self._verbose and noiselevel >= 0 and level < logging.WARN:
12537 - return
12538 - if self._scheduler is None:
12539 - writemsg_level(msg, level=level, noiselevel=noiselevel)
12540 - else:
12541 - log_path = None
12542 - if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
12543 - log_path = self.settings.get("PORTAGE_LOG_FILE")
12544 - background = self.settings.get("PORTAGE_BACKGROUND") == "1"
12545 -
12546 - if background and log_path is None:
12547 - if level >= logging.WARN:
12548 - writemsg_level(msg, level=level, noiselevel=noiselevel)
12549 - else:
12550 - self._scheduler.output(msg,
12551 - log_path=log_path, background=background,
12552 - level=level, noiselevel=noiselevel)
12553 -
12554 - def _show_unmerge(self, zing, desc, file_type, file_name):
12555 - self._display_merge("%s %s %s %s\n" % \
12556 - (zing, desc.ljust(8), file_type, file_name))
12557 -
12558 - def _unmerge_pkgfiles(self, pkgfiles, others_in_slot):
12559 - """
12560 -
12561 - Unmerges the contents of a package from the liveFS
12562 - Removes the VDB entry for self
12563 -
12564 - @param pkgfiles: typically self.getcontents()
12565 - @type pkgfiles: Dictionary { filename: [ 'type', '?', 'md5sum' ] }
12566 - @param others_in_slot: all dblink instances in this slot, excluding self
12567 - @type others_in_slot: list
12568 - @rtype: None
12569 - """
12570 -
12571 - os = _os_merge
12572 - perf_md5 = perform_md5
12573 - showMessage = self._display_merge
12574 - show_unmerge = self._show_unmerge
12575 - ignored_unlink_errnos = self._ignored_unlink_errnos
12576 - ignored_rmdir_errnos = self._ignored_rmdir_errnos
12577 -
12578 - if not pkgfiles:
12579 - showMessage(_("No package files given... Grabbing a set.\n"))
12580 - pkgfiles = self.getcontents()
12581 -
12582 - if others_in_slot is None:
12583 - others_in_slot = []
12584 - slot = self.vartree.dbapi._pkg_str(self.mycpv, None).slot
12585 - slot_matches = self.vartree.dbapi.match(
12586 - "%s:%s" % (portage.cpv_getkey(self.mycpv), slot))
12587 - for cur_cpv in slot_matches:
12588 - if cur_cpv == self.mycpv:
12589 - continue
12590 - others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
12591 - settings=self.settings,
12592 - vartree=self.vartree, treetype="vartree", pipe=self._pipe))
12593 -
12594 - cfgfiledict = grabdict(self.vartree.dbapi._conf_mem_file)
12595 - stale_confmem = []
12596 - protected_symlinks = {}
12597 -
12598 - unmerge_orphans = "unmerge-orphans" in self.settings.features
12599 - calc_prelink = "prelink-checksums" in self.settings.features
12600 -
12601 - if pkgfiles:
12602 - self.updateprotect()
12603 - mykeys = list(pkgfiles)
12604 - mykeys.sort()
12605 - mykeys.reverse()
12606 -
12607 - #process symlinks second-to-last, directories last.
12608 - mydirs = set()
12609 -
12610 - uninstall_ignore = portage.util.shlex_split(
12611 - self.settings.get("UNINSTALL_IGNORE", ""))
12612 -
12613 - def unlink(file_name, lstatobj):
12614 - if bsd_chflags:
12615 - if lstatobj.st_flags != 0:
12616 - bsd_chflags.lchflags(file_name, 0)
12617 - parent_name = os.path.dirname(file_name)
12618 - # Use normal stat/chflags for the parent since we want to
12619 - # follow any symlinks to the real parent directory.
12620 - pflags = os.stat(parent_name).st_flags
12621 - if pflags != 0:
12622 - bsd_chflags.chflags(parent_name, 0)
12623 - try:
12624 - if not stat.S_ISLNK(lstatobj.st_mode):
12625 - # Remove permissions to ensure that any hardlinks to
12626 - # suid/sgid files are rendered harmless.
12627 - os.chmod(file_name, 0)
12628 - os.unlink(file_name)
12629 - except OSError as ose:
12630 - # If the chmod or unlink fails, you are in trouble.
12631 - # With Prefix this can be because the file is owned
12632 - # by someone else (a screwup by root?), on a normal
12633 - # system maybe filesystem corruption. In any case,
12634 - # if we backtrace and die here, we leave the system
12635 - # in a totally undefined state, hence we just bleed
12636 - # like hell and continue to hopefully finish all our
12637 - # administrative and pkg_postinst stuff.
12638 - self._eerror("postrm",
12639 - ["Could not chmod or unlink '%s': %s" % \
12640 - (file_name, ose)])
12641 - else:
12642 -
12643 - # Even though the file no longer exists, we log it
12644 - # here so that _unmerge_dirs can see that we've
12645 - # removed a file from this device, and will record
12646 - # the parent directory for a syncfs call.
12647 - self._merged_path(file_name, lstatobj, exists=False)
12648 -
12649 - finally:
12650 - if bsd_chflags and pflags != 0:
12651 - # Restore the parent flags we saved before unlinking
12652 - bsd_chflags.chflags(parent_name, pflags)
12653 -
12654 - unmerge_desc = {}
12655 - unmerge_desc["cfgpro"] = _("cfgpro")
12656 - unmerge_desc["replaced"] = _("replaced")
12657 - unmerge_desc["!dir"] = _("!dir")
12658 - unmerge_desc["!empty"] = _("!empty")
12659 - unmerge_desc["!fif"] = _("!fif")
12660 - unmerge_desc["!found"] = _("!found")
12661 - unmerge_desc["!md5"] = _("!md5")
12662 - unmerge_desc["!mtime"] = _("!mtime")
12663 - unmerge_desc["!obj"] = _("!obj")
12664 - unmerge_desc["!sym"] = _("!sym")
12665 - unmerge_desc["!prefix"] = _("!prefix")
12666 -
12667 - real_root = self.settings['ROOT']
12668 - real_root_len = len(real_root) - 1
12669 - eroot = self.settings["EROOT"]
12670 -
12671 - infodirs = frozenset(infodir for infodir in chain(
12672 - self.settings.get("INFOPATH", "").split(":"),
12673 - self.settings.get("INFODIR", "").split(":")) if infodir)
12674 - infodirs_inodes = set()
12675 - for infodir in infodirs:
12676 - infodir = os.path.join(real_root, infodir.lstrip(os.sep))
12677 - try:
12678 - statobj = os.stat(infodir)
12679 - except OSError:
12680 - pass
12681 - else:
12682 - infodirs_inodes.add((statobj.st_dev, statobj.st_ino))
12683 -
12684 - for i, objkey in enumerate(mykeys):
12685 -
12686 - obj = normalize_path(objkey)
12687 - if os is _os_merge:
12688 - try:
12689 - _unicode_encode(obj,
12690 - encoding=_encodings['merge'], errors='strict')
12691 - except UnicodeEncodeError:
12692 - # The package appears to have been merged with a
12693 - # different value of sys.getfilesystemencoding(),
12694 - # so fall back to utf_8 if appropriate.
12695 - try:
12696 - _unicode_encode(obj,
12697 - encoding=_encodings['fs'], errors='strict')
12698 - except UnicodeEncodeError:
12699 - pass
12700 - else:
12701 - os = portage.os
12702 - perf_md5 = portage.checksum.perform_md5
12703 -
12704 - file_data = pkgfiles[objkey]
12705 - file_type = file_data[0]
12706 -
12707 - # don't try to unmerge the prefix offset itself
12708 - if len(obj) <= len(eroot) or not obj.startswith(eroot):
12709 - show_unmerge("---", unmerge_desc["!prefix"], file_type, obj)
12710 - continue
12711 -
12712 - statobj = None
12713 - try:
12714 - statobj = os.stat(obj)
12715 - except OSError:
12716 - pass
12717 - lstatobj = None
12718 - try:
12719 - lstatobj = os.lstat(obj)
12720 - except (OSError, AttributeError):
12721 - pass
12722 - islink = lstatobj is not None and stat.S_ISLNK(lstatobj.st_mode)
12723 - if lstatobj is None:
12724 - show_unmerge("---", unmerge_desc["!found"], file_type, obj)
12725 - continue
12726 -
12727 - f_match = obj[len(eroot)-1:]
12728 - ignore = False
12729 - for pattern in uninstall_ignore:
12730 - if fnmatch.fnmatch(f_match, pattern):
12731 - ignore = True
12732 - break
12733 -
12734 - if not ignore:
12735 - if islink and f_match in \
12736 - ("/lib", "/usr/lib", "/usr/local/lib"):
12737 - # Ignore libdir symlinks for bug #423127.
12738 - ignore = True
12739 -
12740 - if ignore:
12741 - show_unmerge("---", unmerge_desc["cfgpro"], file_type, obj)
12742 - continue
12743 -
12744 - # don't use EROOT, CONTENTS entries already contain EPREFIX
12745 - if obj.startswith(real_root):
12746 - relative_path = obj[real_root_len:]
12747 - is_owned = False
12748 - for dblnk in others_in_slot:
12749 - if dblnk.isowner(relative_path):
12750 - is_owned = True
12751 - break
12752 -
12753 - if is_owned and islink and \
12754 - file_type in ("sym", "dir") and \
12755 - statobj and stat.S_ISDIR(statobj.st_mode):
12756 - # A new instance of this package claims the file, so
12757 - # don't unmerge it. If the file is symlink to a
12758 - # directory and the unmerging package installed it as
12759 - # a symlink, but the new owner has it listed as a
12760 - # directory, then we'll produce a warning since the
12761 - # symlink is a sort of orphan in this case (see
12762 - # bug #326685).
12763 - symlink_orphan = False
12764 - for dblnk in others_in_slot:
12765 - parent_contents_key = \
12766 - dblnk._match_contents(relative_path)
12767 - if not parent_contents_key:
12768 - continue
12769 - if not parent_contents_key.startswith(
12770 - real_root):
12771 - continue
12772 - if dblnk.getcontents()[
12773 - parent_contents_key][0] == "dir":
12774 - symlink_orphan = True
12775 - break
12776 -
12777 - if symlink_orphan:
12778 - protected_symlinks.setdefault(
12779 - (statobj.st_dev, statobj.st_ino),
12780 - []).append(relative_path)
12781 -
12782 - if is_owned:
12783 - show_unmerge("---", unmerge_desc["replaced"], file_type, obj)
12784 - continue
12785 - elif relative_path in cfgfiledict:
12786 - stale_confmem.append(relative_path)
12787 -
12788 - # Don't unlink symlinks to directories here since that can
12789 - # remove /lib and /usr/lib symlinks.
12790 - if unmerge_orphans and \
12791 - lstatobj and not stat.S_ISDIR(lstatobj.st_mode) and \
12792 - not (islink and statobj and stat.S_ISDIR(statobj.st_mode)) and \
12793 - not self.isprotected(obj):
12794 - try:
12795 - unlink(obj, lstatobj)
12796 - except EnvironmentError as e:
12797 - if e.errno not in ignored_unlink_errnos:
12798 - raise
12799 - del e
12800 - show_unmerge("<<<", "", file_type, obj)
12801 - continue
12802 -
12803 - lmtime = str(lstatobj[stat.ST_MTIME])
12804 - if (pkgfiles[objkey][0] not in ("dir", "fif", "dev")) and (lmtime != pkgfiles[objkey][1]):
12805 - show_unmerge("---", unmerge_desc["!mtime"], file_type, obj)
12806 - continue
12807 -
12808 - if file_type == "dir" and not islink:
12809 - if lstatobj is None or not stat.S_ISDIR(lstatobj.st_mode):
12810 - show_unmerge("---", unmerge_desc["!dir"], file_type, obj)
12811 - continue
12812 - mydirs.add((obj, (lstatobj.st_dev, lstatobj.st_ino)))
12813 - elif file_type == "sym" or (file_type == "dir" and islink):
12814 - if not islink:
12815 - show_unmerge("---", unmerge_desc["!sym"], file_type, obj)
12816 - continue
12817 -
12818 - # If this symlink points to a directory then we don't want
12819 - # to unmerge it if there are any other packages that
12820 - # installed files into the directory via this symlink
12821 - # (see bug #326685).
12822 - # TODO: Resolving a symlink to a directory will require
12823 - # simulation if $ROOT != / and the link is not relative.
12824 - if islink and statobj and stat.S_ISDIR(statobj.st_mode) \
12825 - and obj.startswith(real_root):
12826 -
12827 - relative_path = obj[real_root_len:]
12828 - try:
12829 - target_dir_contents = os.listdir(obj)
12830 - except OSError:
12831 - pass
12832 - else:
12833 - if target_dir_contents:
12834 - # If all the children are regular files owned
12835 - # by this package, then the symlink should be
12836 - # safe to unmerge.
12837 - all_owned = True
12838 - for child in target_dir_contents:
12839 - child = os.path.join(relative_path, child)
12840 - if not self.isowner(child):
12841 - all_owned = False
12842 - break
12843 - try:
12844 - child_lstat = os.lstat(os.path.join(
12845 - real_root, child.lstrip(os.sep)))
12846 - except OSError:
12847 - continue
12848 -
12849 - if not stat.S_ISREG(child_lstat.st_mode):
12850 - # Nested symlinks or directories make
12851 - # the issue very complex, so just
12852 - # preserve the symlink in order to be
12853 - # on the safe side.
12854 - all_owned = False
12855 - break
12856 -
12857 - if not all_owned:
12858 - protected_symlinks.setdefault(
12859 - (statobj.st_dev, statobj.st_ino),
12860 - []).append(relative_path)
12861 - show_unmerge("---", unmerge_desc["!empty"],
12862 - file_type, obj)
12863 - continue
12864 -
12865 - # Go ahead and unlink symlinks to directories here when
12866 - # they're actually recorded as symlinks in the contents.
12867 - # Normally, symlinks such as /lib -> lib64 are not recorded
12868 - # as symlinks in the contents of a package. If a package
12869 - # installs something into ${D}/lib/, it is recorded in the
12870 - # contents as a directory even if it happens to correspond
12871 - # to a symlink when it's merged to the live filesystem.
12872 - try:
12873 - unlink(obj, lstatobj)
12874 - show_unmerge("<<<", "", file_type, obj)
12875 - except (OSError, IOError) as e:
12876 - if e.errno not in ignored_unlink_errnos:
12877 - raise
12878 - del e
12879 - show_unmerge("!!!", "", file_type, obj)
12880 - elif pkgfiles[objkey][0] == "obj":
12881 - if statobj is None or not stat.S_ISREG(statobj.st_mode):
12882 - show_unmerge("---", unmerge_desc["!obj"], file_type, obj)
12883 - continue
12884 - mymd5 = None
12885 - try:
12886 - mymd5 = perf_md5(obj, calc_prelink=calc_prelink)
12887 - except FileNotFound as e:
12888 - # the file has disappeared between now and our stat call
12889 - show_unmerge("---", unmerge_desc["!obj"], file_type, obj)
12890 - continue
12891 -
12892 - # string.lower is needed because db entries used to be in upper-case. The
12893 - # string.lower allows for backwards compatibility.
12894 - if mymd5 != pkgfiles[objkey][2].lower():
12895 - show_unmerge("---", unmerge_desc["!md5"], file_type, obj)
12896 - continue
12897 - try:
12898 - unlink(obj, lstatobj)
12899 - except (OSError, IOError) as e:
12900 - if e.errno not in ignored_unlink_errnos:
12901 - raise
12902 - del e
12903 - show_unmerge("<<<", "", file_type, obj)
12904 - elif pkgfiles[objkey][0] == "fif":
12905 - if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
12906 - show_unmerge("---", unmerge_desc["!fif"], file_type, obj)
12907 - continue
12908 - show_unmerge("---", "", file_type, obj)
12909 - elif pkgfiles[objkey][0] == "dev":
12910 - show_unmerge("---", "", file_type, obj)
12911 -
12912 - self._unmerge_dirs(mydirs, infodirs_inodes,
12913 - protected_symlinks, unmerge_desc, unlink, os)
12914 - mydirs.clear()
12915 -
12916 - if protected_symlinks:
12917 - self._unmerge_protected_symlinks(others_in_slot, infodirs_inodes,
12918 - protected_symlinks, unmerge_desc, unlink, os)
12919 -
12920 - if protected_symlinks:
12921 - msg = "One or more symlinks to directories have been " + \
12922 - "preserved in order to ensure that files installed " + \
12923 - "via these symlinks remain accessible. " + \
12924 - "This indicates that the mentioned symlink(s) may " + \
12925 - "be obsolete remnants of an old install, and it " + \
12926 - "may be appropriate to replace a given symlink " + \
12927 - "with the directory that it points to."
12928 - lines = textwrap.wrap(msg, 72)
12929 - lines.append("")
12930 - flat_list = set()
12931 - flat_list.update(*protected_symlinks.values())
12932 - flat_list = sorted(flat_list)
12933 - for f in flat_list:
12934 - lines.append("\t%s" % (os.path.join(real_root,
12935 - f.lstrip(os.sep))))
12936 - lines.append("")
12937 - self._elog("elog", "postrm", lines)
12938 -
12939 - # Remove stale entries from config memory.
12940 - if stale_confmem:
12941 - for filename in stale_confmem:
12942 - del cfgfiledict[filename]
12943 - writedict(cfgfiledict, self.vartree.dbapi._conf_mem_file)
12944 -
12945 - #remove self from vartree database so that our own virtual gets zapped if we're the last node
12946 - self.vartree.zap(self.mycpv)
12947 -
12948 - def _unmerge_protected_symlinks(self, others_in_slot, infodirs_inodes,
12949 - protected_symlinks, unmerge_desc, unlink, os):
12950 -
12951 - real_root = self.settings['ROOT']
12952 - show_unmerge = self._show_unmerge
12953 - ignored_unlink_errnos = self._ignored_unlink_errnos
12954 -
12955 - flat_list = set()
12956 - flat_list.update(*protected_symlinks.values())
12957 - flat_list = sorted(flat_list)
12958 -
12959 - for f in flat_list:
12960 - for dblnk in others_in_slot:
12961 - if dblnk.isowner(f):
12962 - # If another package in the same slot installed
12963 - # a file via a protected symlink, return early
12964 - # and don't bother searching for any other owners.
12965 - return
12966 -
12967 - msg = []
12968 - msg.append("")
12969 - msg.append(_("Directory symlink(s) may need protection:"))
12970 - msg.append("")
12971 -
12972 - for f in flat_list:
12973 - msg.append("\t%s" % \
12974 - os.path.join(real_root, f.lstrip(os.path.sep)))
12975 -
12976 - msg.append("")
12977 - msg.append("Use the UNINSTALL_IGNORE variable to exempt specific symlinks")
12978 - msg.append("from the following search (see the make.conf man page).")
12979 - msg.append("")
12980 - msg.append(_("Searching all installed"
12981 - " packages for files installed via above symlink(s)..."))
12982 - msg.append("")
12983 - self._elog("elog", "postrm", msg)
12984 -
12985 - self.lockdb()
12986 - try:
12987 - owners = self.vartree.dbapi._owners.get_owners(flat_list)
12988 - self.vartree.dbapi.flush_cache()
12989 - finally:
12990 - self.unlockdb()
12991 -
12992 - for owner in list(owners):
12993 - if owner.mycpv == self.mycpv:
12994 - owners.pop(owner, None)
12995 -
12996 - if not owners:
12997 - msg = []
12998 - msg.append(_("The above directory symlink(s) are all "
12999 - "safe to remove. Removing them now..."))
13000 - msg.append("")
13001 - self._elog("elog", "postrm", msg)
13002 - dirs = set()
13003 - for unmerge_syms in protected_symlinks.values():
13004 - for relative_path in unmerge_syms:
13005 - obj = os.path.join(real_root,
13006 - relative_path.lstrip(os.sep))
13007 - parent = os.path.dirname(obj)
13008 - while len(parent) > len(self._eroot):
13009 - try:
13010 - lstatobj = os.lstat(parent)
13011 - except OSError:
13012 - break
13013 - else:
13014 - dirs.add((parent,
13015 - (lstatobj.st_dev, lstatobj.st_ino)))
13016 - parent = os.path.dirname(parent)
13017 - try:
13018 - unlink(obj, os.lstat(obj))
13019 - show_unmerge("<<<", "", "sym", obj)
13020 - except (OSError, IOError) as e:
13021 - if e.errno not in ignored_unlink_errnos:
13022 - raise
13023 - del e
13024 - show_unmerge("!!!", "", "sym", obj)
13025 -
13026 - protected_symlinks.clear()
13027 - self._unmerge_dirs(dirs, infodirs_inodes,
13028 - protected_symlinks, unmerge_desc, unlink, os)
13029 - dirs.clear()
13030 -
13031 - def _unmerge_dirs(self, dirs, infodirs_inodes,
13032 - protected_symlinks, unmerge_desc, unlink, os):
13033 -
13034 - show_unmerge = self._show_unmerge
13035 - infodir_cleanup = self._infodir_cleanup
13036 - ignored_unlink_errnos = self._ignored_unlink_errnos
13037 - ignored_rmdir_errnos = self._ignored_rmdir_errnos
13038 - real_root = self.settings['ROOT']
13039 -
13040 - dirs = sorted(dirs)
13041 - revisit = {}
13042 -
13043 - while True:
13044 - try:
13045 - obj, inode_key = dirs.pop()
13046 - except IndexError:
13047 - break
13048 - # Treat any directory named "info" as a candidate here,
13049 - # since it might have been in INFOPATH previously even
13050 - # though it may not be there now.
13051 - if inode_key in infodirs_inodes or \
13052 - os.path.basename(obj) == "info":
13053 - try:
13054 - remaining = os.listdir(obj)
13055 - except OSError:
13056 - pass
13057 - else:
13058 - cleanup_info_dir = ()
13059 - if remaining and \
13060 - len(remaining) <= len(infodir_cleanup):
13061 - if not set(remaining).difference(infodir_cleanup):
13062 - cleanup_info_dir = remaining
13063 -
13064 - for child in cleanup_info_dir:
13065 - child = os.path.join(obj, child)
13066 - try:
13067 - lstatobj = os.lstat(child)
13068 - if stat.S_ISREG(lstatobj.st_mode):
13069 - unlink(child, lstatobj)
13070 - show_unmerge("<<<", "", "obj", child)
13071 - except EnvironmentError as e:
13072 - if e.errno not in ignored_unlink_errnos:
13073 - raise
13074 - del e
13075 - show_unmerge("!!!", "", "obj", child)
13076 -
13077 - try:
13078 - parent_name = os.path.dirname(obj)
13079 - parent_stat = os.stat(parent_name)
13080 -
13081 - if bsd_chflags:
13082 - lstatobj = os.lstat(obj)
13083 - if lstatobj.st_flags != 0:
13084 - bsd_chflags.lchflags(obj, 0)
13085 -
13086 - # Use normal stat/chflags for the parent since we want to
13087 - # follow any symlinks to the real parent directory.
13088 - pflags = parent_stat.st_flags
13089 - if pflags != 0:
13090 - bsd_chflags.chflags(parent_name, 0)
13091 - try:
13092 - os.rmdir(obj)
13093 - finally:
13094 - if bsd_chflags and pflags != 0:
13095 - # Restore the parent flags we saved before unlinking
13096 - bsd_chflags.chflags(parent_name, pflags)
13097 -
13098 - # Record the parent directory for use in syncfs calls.
13099 - # Note that we use a realpath and a regular stat here, since
13100 - # we want to follow any symlinks back to the real device where
13101 - # the real parent directory resides.
13102 - self._merged_path(os.path.realpath(parent_name), parent_stat)
13103 -
13104 - show_unmerge("<<<", "", "dir", obj)
13105 - except EnvironmentError as e:
13106 - if e.errno not in ignored_rmdir_errnos:
13107 - raise
13108 - if e.errno != errno.ENOENT:
13109 - show_unmerge("---", unmerge_desc["!empty"], "dir", obj)
13110 - revisit[obj] = inode_key
13111 -
13112 - # Since we didn't remove this directory, record the directory
13113 - # itself for use in syncfs calls, if we have removed another
13114 - # file from the same device.
13115 - # Note that we use a realpath and a regular stat here, since
13116 - # we want to follow any symlinks back to the real device where
13117 - # the real directory resides.
13118 - try:
13119 - dir_stat = os.stat(obj)
13120 - except OSError:
13121 - pass
13122 - else:
13123 - if dir_stat.st_dev in self._device_path_map:
13124 - self._merged_path(os.path.realpath(obj), dir_stat)
13125 -
13126 - else:
13127 - # When a directory is successfully removed, there's
13128 - # no need to protect symlinks that point to it.
13129 - unmerge_syms = protected_symlinks.pop(inode_key, None)
13130 - if unmerge_syms is not None:
13131 - parents = []
13132 - for relative_path in unmerge_syms:
13133 - obj = os.path.join(real_root,
13134 - relative_path.lstrip(os.sep))
13135 - try:
13136 - unlink(obj, os.lstat(obj))
13137 - show_unmerge("<<<", "", "sym", obj)
13138 - except (OSError, IOError) as e:
13139 - if e.errno not in ignored_unlink_errnos:
13140 - raise
13141 - del e
13142 - show_unmerge("!!!", "", "sym", obj)
13143 - else:
13144 - parents.append(os.path.dirname(obj))
13145 -
13146 - if parents:
13147 - # Revisit parents recursively (bug 640058).
13148 - recursive_parents = []
13149 - for parent in set(parents):
13150 - while parent in revisit:
13151 - recursive_parents.append(parent)
13152 - parent = os.path.dirname(parent)
13153 - if parent == '/':
13154 - break
13155 -
13156 - for parent in sorted(set(recursive_parents)):
13157 - dirs.append((parent, revisit.pop(parent)))
13158 -
13159 - def isowner(self, filename, destroot=None):
13160 - """
13161 - Check if a file belongs to this package. This may
13162 - result in a stat call for the parent directory of
13163 - every installed file, since the inode numbers are
13164 - used to work around the problem of ambiguous paths
13165 - caused by symlinked directories. The results of
13166 - stat calls are cached to optimize multiple calls
13167 - to this method.
13168 -
13169 - @param filename:
13170 - @type filename:
13171 - @param destroot:
13172 - @type destroot:
13173 - @rtype: Boolean
13174 - @return:
13175 - 1. True if this package owns the file.
13176 - 2. False if this package does not own the file.
13177 - """
13178 -
13179 - if destroot is not None and destroot != self._eroot:
13180 - warnings.warn("The second parameter of the " + \
13181 - "portage.dbapi.vartree.dblink.isowner()" + \
13182 - " is now unused. Instead " + \
13183 - "self.settings['EROOT'] will be used.",
13184 - DeprecationWarning, stacklevel=2)
13185 -
13186 - return bool(self._match_contents(filename))
13187 -
13188 - def _match_contents(self, filename, destroot=None):
13189 - """
13190 - The matching contents entry is returned, which is useful
13191 - since the path may differ from the one given by the caller,
13192 - due to symlinks.
13193 -
13194 - @rtype: String
13195 - @return: the contents entry corresponding to the given path, or False
13196 - if the file is not owned by this package.
13197 - """
13198 -
13199 - filename = _unicode_decode(filename,
13200 - encoding=_encodings['content'], errors='strict')
13201 -
13202 - if destroot is not None and destroot != self._eroot:
13203 - warnings.warn("The second parameter of the " + \
13204 - "portage.dbapi.vartree.dblink._match_contents()" + \
13205 - " is now unused. Instead " + \
13206 - "self.settings['ROOT'] will be used.",
13207 - DeprecationWarning, stacklevel=2)
13208 -
13209 - # don't use EROOT here, image already contains EPREFIX
13210 - destroot = self.settings['ROOT']
13211 -
13212 - # The given filename argument might have a different encoding than the
13213 - # the filenames contained in the contents, so use separate wrapped os
13214 - # modules for each. The basename is more likely to contain non-ascii
13215 - # characters than the directory path, so use os_filename_arg for all
13216 - # operations involving the basename of the filename arg.
13217 - os_filename_arg = _os_merge
13218 - os = _os_merge
13219 -
13220 - try:
13221 - _unicode_encode(filename,
13222 - encoding=_encodings['merge'], errors='strict')
13223 - except UnicodeEncodeError:
13224 - # The package appears to have been merged with a
13225 - # different value of sys.getfilesystemencoding(),
13226 - # so fall back to utf_8 if appropriate.
13227 - try:
13228 - _unicode_encode(filename,
13229 - encoding=_encodings['fs'], errors='strict')
13230 - except UnicodeEncodeError:
13231 - pass
13232 - else:
13233 - os_filename_arg = portage.os
13234 -
13235 - destfile = normalize_path(
13236 - os_filename_arg.path.join(destroot,
13237 - filename.lstrip(os_filename_arg.path.sep)))
13238 -
13239 - if "case-insensitive-fs" in self.settings.features:
13240 - destfile = destfile.lower()
13241 -
13242 - if self._contents.contains(destfile):
13243 - return self._contents.unmap_key(destfile)
13244 -
13245 - if self.getcontents():
13246 - basename = os_filename_arg.path.basename(destfile)
13247 - if self._contents_basenames is None:
13248 -
13249 - try:
13250 - for x in self._contents.keys():
13251 - _unicode_encode(x,
13252 - encoding=_encodings['merge'],
13253 - errors='strict')
13254 - except UnicodeEncodeError:
13255 - # The package appears to have been merged with a
13256 - # different value of sys.getfilesystemencoding(),
13257 - # so fall back to utf_8 if appropriate.
13258 - try:
13259 - for x in self._contents.keys():
13260 - _unicode_encode(x,
13261 - encoding=_encodings['fs'],
13262 - errors='strict')
13263 - except UnicodeEncodeError:
13264 - pass
13265 - else:
13266 - os = portage.os
13267 -
13268 - self._contents_basenames = set(
13269 - os.path.basename(x) for x in self._contents.keys())
13270 - if basename not in self._contents_basenames:
13271 - # This is a shortcut that, in most cases, allows us to
13272 - # eliminate this package as an owner without the need
13273 - # to examine inode numbers of parent directories.
13274 - return False
13275 -
13276 - # Use stat rather than lstat since we want to follow
13277 - # any symlinks to the real parent directory.
13278 - parent_path = os_filename_arg.path.dirname(destfile)
13279 - try:
13280 - parent_stat = os_filename_arg.stat(parent_path)
13281 - except EnvironmentError as e:
13282 - if e.errno != errno.ENOENT:
13283 - raise
13284 - del e
13285 - return False
13286 - if self._contents_inodes is None:
13287 -
13288 - if os is _os_merge:
13289 - try:
13290 - for x in self._contents.keys():
13291 - _unicode_encode(x,
13292 - encoding=_encodings['merge'],
13293 - errors='strict')
13294 - except UnicodeEncodeError:
13295 - # The package appears to have been merged with a
13296 - # different value of sys.getfilesystemencoding(),
13297 - # so fall back to utf_8 if appropriate.
13298 - try:
13299 - for x in self._contents.keys():
13300 - _unicode_encode(x,
13301 - encoding=_encodings['fs'],
13302 - errors='strict')
13303 - except UnicodeEncodeError:
13304 - pass
13305 - else:
13306 - os = portage.os
13307 -
13308 - self._contents_inodes = {}
13309 - parent_paths = set()
13310 - for x in self._contents.keys():
13311 - p_path = os.path.dirname(x)
13312 - if p_path in parent_paths:
13313 - continue
13314 - parent_paths.add(p_path)
13315 - try:
13316 - s = os.stat(p_path)
13317 - except OSError:
13318 - pass
13319 - else:
13320 - inode_key = (s.st_dev, s.st_ino)
13321 - # Use lists of paths in case multiple
13322 - # paths reference the same inode.
13323 - p_path_list = self._contents_inodes.get(inode_key)
13324 - if p_path_list is None:
13325 - p_path_list = []
13326 - self._contents_inodes[inode_key] = p_path_list
13327 - if p_path not in p_path_list:
13328 - p_path_list.append(p_path)
13329 -
13330 - p_path_list = self._contents_inodes.get(
13331 - (parent_stat.st_dev, parent_stat.st_ino))
13332 - if p_path_list:
13333 - for p_path in p_path_list:
13334 - x = os_filename_arg.path.join(p_path, basename)
13335 - if self._contents.contains(x):
13336 - return self._contents.unmap_key(x)
13337 -
13338 - return False
13339 -
13340 - def _linkmap_rebuild(self, **kwargs):
13341 - """
13342 - Rebuild the self._linkmap if it's not broken due to missing
13343 - scanelf binary. Also, return early if preserve-libs is disabled
13344 - and the preserve-libs registry is empty.
13345 - """
13346 - if self._linkmap_broken or \
13347 - self.vartree.dbapi._linkmap is None or \
13348 - self.vartree.dbapi._plib_registry is None or \
13349 - ("preserve-libs" not in self.settings.features and \
13350 - not self.vartree.dbapi._plib_registry.hasEntries()):
13351 - return
13352 - try:
13353 - self.vartree.dbapi._linkmap.rebuild(**kwargs)
13354 - except CommandNotFound as e:
13355 - self._linkmap_broken = True
13356 - self._display_merge(_("!!! Disabling preserve-libs " \
13357 - "due to error: Command Not Found: %s\n") % (e,),
13358 - level=logging.ERROR, noiselevel=-1)
13359 -
13360 - def _find_libs_to_preserve(self, unmerge=False):
13361 - """
13362 - Get set of relative paths for libraries to be preserved. When
13363 - unmerge is False, file paths to preserve are selected from
13364 - self._installed_instance. Otherwise, paths are selected from
13365 - self.
13366 - """
13367 - if self._linkmap_broken or \
13368 - self.vartree.dbapi._linkmap is None or \
13369 - self.vartree.dbapi._plib_registry is None or \
13370 - (not unmerge and self._installed_instance is None) or \
13371 - not self._preserve_libs:
13372 - return set()
13373 -
13374 - os = _os_merge
13375 - linkmap = self.vartree.dbapi._linkmap
13376 - if unmerge:
13377 - installed_instance = self
13378 - else:
13379 - installed_instance = self._installed_instance
13380 - old_contents = installed_instance.getcontents()
13381 - root = self.settings['ROOT']
13382 - root_len = len(root) - 1
13383 - lib_graph = digraph()
13384 - path_node_map = {}
13385 -
13386 - def path_to_node(path):
13387 - node = path_node_map.get(path)
13388 - if node is None:
13389 - node = linkmap._LibGraphNode(linkmap._obj_key(path))
13390 - alt_path_node = lib_graph.get(node)
13391 - if alt_path_node is not None:
13392 - node = alt_path_node
13393 - node.alt_paths.add(path)
13394 - path_node_map[path] = node
13395 - return node
13396 -
13397 - consumer_map = {}
13398 - provider_nodes = set()
13399 - # Create provider nodes and add them to the graph.
13400 - for f_abs in old_contents:
13401 -
13402 - if os is _os_merge:
13403 - try:
13404 - _unicode_encode(f_abs,
13405 - encoding=_encodings['merge'], errors='strict')
13406 - except UnicodeEncodeError:
13407 - # The package appears to have been merged with a
13408 - # different value of sys.getfilesystemencoding(),
13409 - # so fall back to utf_8 if appropriate.
13410 - try:
13411 - _unicode_encode(f_abs,
13412 - encoding=_encodings['fs'], errors='strict')
13413 - except UnicodeEncodeError:
13414 - pass
13415 - else:
13416 - os = portage.os
13417 -
13418 - f = f_abs[root_len:]
13419 - try:
13420 - consumers = linkmap.findConsumers(f,
13421 - exclude_providers=(installed_instance.isowner,))
13422 - except KeyError:
13423 - continue
13424 - if not consumers:
13425 - continue
13426 - provider_node = path_to_node(f)
13427 - lib_graph.add(provider_node, None)
13428 - provider_nodes.add(provider_node)
13429 - consumer_map[provider_node] = consumers
13430 -
13431 - # Create consumer nodes and add them to the graph.
13432 - # Note that consumers can also be providers.
13433 - for provider_node, consumers in consumer_map.items():
13434 - for c in consumers:
13435 - consumer_node = path_to_node(c)
13436 - if installed_instance.isowner(c) and \
13437 - consumer_node not in provider_nodes:
13438 - # This is not a provider, so it will be uninstalled.
13439 - continue
13440 - lib_graph.add(provider_node, consumer_node)
13441 -
13442 - # Locate nodes which should be preserved. They consist of all
13443 - # providers that are reachable from consumers that are not
13444 - # providers themselves.
13445 - preserve_nodes = set()
13446 - for consumer_node in lib_graph.root_nodes():
13447 - if consumer_node in provider_nodes:
13448 - continue
13449 - # Preserve all providers that are reachable from this consumer.
13450 - node_stack = lib_graph.child_nodes(consumer_node)
13451 - while node_stack:
13452 - provider_node = node_stack.pop()
13453 - if provider_node in preserve_nodes:
13454 - continue
13455 - preserve_nodes.add(provider_node)
13456 - node_stack.extend(lib_graph.child_nodes(provider_node))
13457 -
13458 - preserve_paths = set()
13459 - for preserve_node in preserve_nodes:
13460 - # Preserve the library itself, and also preserve the
13461 - # soname symlink which is the only symlink that is
13462 - # strictly required.
13463 - hardlinks = set()
13464 - soname_symlinks = set()
13465 - soname = linkmap.getSoname(next(iter(preserve_node.alt_paths)))
13466 - have_replacement_soname_link = False
13467 - have_replacement_hardlink = False
13468 - for f in preserve_node.alt_paths:
13469 - f_abs = os.path.join(root, f.lstrip(os.sep))
13470 - try:
13471 - if stat.S_ISREG(os.lstat(f_abs).st_mode):
13472 - hardlinks.add(f)
13473 - if not unmerge and self.isowner(f):
13474 - have_replacement_hardlink = True
13475 - if os.path.basename(f) == soname:
13476 - have_replacement_soname_link = True
13477 - elif os.path.basename(f) == soname:
13478 - soname_symlinks.add(f)
13479 - if not unmerge and self.isowner(f):
13480 - have_replacement_soname_link = True
13481 - except OSError:
13482 - pass
13483 -
13484 - if have_replacement_hardlink and have_replacement_soname_link:
13485 - continue
13486 -
13487 - if hardlinks:
13488 - preserve_paths.update(hardlinks)
13489 - preserve_paths.update(soname_symlinks)
13490 -
13491 - return preserve_paths
13492 -
13493 - def _add_preserve_libs_to_contents(self, preserve_paths):
13494 - """
13495 - Preserve libs returned from _find_libs_to_preserve().
13496 - """
13497 -
13498 - if not preserve_paths:
13499 - return
13500 -
13501 - os = _os_merge
13502 - showMessage = self._display_merge
13503 - root = self.settings['ROOT']
13504 -
13505 - # Copy contents entries from the old package to the new one.
13506 - new_contents = self.getcontents().copy()
13507 - old_contents = self._installed_instance.getcontents()
13508 - for f in sorted(preserve_paths):
13509 - f = _unicode_decode(f,
13510 - encoding=_encodings['content'], errors='strict')
13511 - f_abs = os.path.join(root, f.lstrip(os.sep))
13512 - contents_entry = old_contents.get(f_abs)
13513 - if contents_entry is None:
13514 - # This will probably never happen, but it might if one of the
13515 - # paths returned from findConsumers() refers to one of the libs
13516 - # that should be preserved yet the path is not listed in the
13517 - # contents. Such a path might belong to some other package, so
13518 - # it shouldn't be preserved here.
13519 - showMessage(_("!!! File '%s' will not be preserved "
13520 - "due to missing contents entry\n") % (f_abs,),
13521 - level=logging.ERROR, noiselevel=-1)
13522 - preserve_paths.remove(f)
13523 - continue
13524 - new_contents[f_abs] = contents_entry
13525 - obj_type = contents_entry[0]
13526 - showMessage(_(">>> needed %s %s\n") % (obj_type, f_abs),
13527 - noiselevel=-1)
13528 - # Add parent directories to contents if necessary.
13529 - parent_dir = os.path.dirname(f_abs)
13530 - while len(parent_dir) > len(root):
13531 - new_contents[parent_dir] = ["dir"]
13532 - prev = parent_dir
13533 - parent_dir = os.path.dirname(parent_dir)
13534 - if prev == parent_dir:
13535 - break
13536 - outfile = atomic_ofstream(os.path.join(self.dbtmpdir, "CONTENTS"))
13537 - write_contents(new_contents, root, outfile)
13538 - outfile.close()
13539 - self._clear_contents_cache()
13540 -
13541 - def _find_unused_preserved_libs(self, unmerge_no_replacement):
13542 - """
13543 - Find preserved libraries that don't have any consumers left.
13544 - """
13545 -
13546 - if self._linkmap_broken or \
13547 - self.vartree.dbapi._linkmap is None or \
13548 - self.vartree.dbapi._plib_registry is None or \
13549 - not self.vartree.dbapi._plib_registry.hasEntries():
13550 - return {}
13551 -
13552 - # Since preserved libraries can be consumers of other preserved
13553 - # libraries, use a graph to track consumer relationships.
13554 - plib_dict = self.vartree.dbapi._plib_registry.getPreservedLibs()
13555 - linkmap = self.vartree.dbapi._linkmap
13556 - lib_graph = digraph()
13557 - preserved_nodes = set()
13558 - preserved_paths = set()
13559 - path_cpv_map = {}
13560 - path_node_map = {}
13561 - root = self.settings['ROOT']
13562 -
13563 - def path_to_node(path):
13564 - node = path_node_map.get(path)
13565 - if node is None:
13566 - chost = self.settings.get('CHOST')
13567 - if chost.find('darwin') >= 0:
13568 - node = LinkageMapMachO._LibGraphNode(linkmap._obj_key(path))
13569 - elif chost.find('interix') >= 0 or chost.find('winnt') >= 0:
13570 - node = LinkageMapPeCoff._LibGraphNode(linkmap._obj_key(path))
13571 - elif chost.find('aix') >= 0:
13572 - node = LinkageMapXCoff._LibGraphNode(linkmap._obj_key(path))
13573 - else:
13574 - node = LinkageMap._LibGraphNode(linkmap._obj_key(path))
13575 - alt_path_node = lib_graph.get(node)
13576 - if alt_path_node is not None:
13577 - node = alt_path_node
13578 - node.alt_paths.add(path)
13579 - path_node_map[path] = node
13580 - return node
13581 -
13582 - for cpv, plibs in plib_dict.items():
13583 - for f in plibs:
13584 - path_cpv_map[f] = cpv
13585 - preserved_node = path_to_node(f)
13586 - if not preserved_node.file_exists():
13587 - continue
13588 - lib_graph.add(preserved_node, None)
13589 - preserved_paths.add(f)
13590 - preserved_nodes.add(preserved_node)
13591 - for c in self.vartree.dbapi._linkmap.findConsumers(f):
13592 - consumer_node = path_to_node(c)
13593 - if not consumer_node.file_exists():
13594 - continue
13595 - # Note that consumers may also be providers.
13596 - lib_graph.add(preserved_node, consumer_node)
13597 -
13598 - # Eliminate consumers having providers with the same soname as an
13599 - # installed library that is not preserved. This eliminates
13600 - # libraries that are erroneously preserved due to a move from one
13601 - # directory to another.
13602 - # Also eliminate consumers that are going to be unmerged if
13603 - # unmerge_no_replacement is True.
13604 - provider_cache = {}
13605 - for preserved_node in preserved_nodes:
13606 - soname = linkmap.getSoname(preserved_node)
13607 - for consumer_node in lib_graph.parent_nodes(preserved_node):
13608 - if consumer_node in preserved_nodes:
13609 - continue
13610 - if unmerge_no_replacement:
13611 - will_be_unmerged = True
13612 - for path in consumer_node.alt_paths:
13613 - if not self.isowner(path):
13614 - will_be_unmerged = False
13615 - break
13616 - if will_be_unmerged:
13617 - # This consumer is not preserved and it is
13618 - # being unmerged, so drop this edge.
13619 - lib_graph.remove_edge(preserved_node, consumer_node)
13620 - continue
13621 -
13622 - providers = provider_cache.get(consumer_node)
13623 - if providers is None:
13624 - providers = linkmap.findProviders(consumer_node)
13625 - provider_cache[consumer_node] = providers
13626 - providers = providers.get(soname)
13627 - if providers is None:
13628 - continue
13629 - for provider in providers:
13630 - if provider in preserved_paths:
13631 - continue
13632 - provider_node = path_to_node(provider)
13633 - if not provider_node.file_exists():
13634 - continue
13635 - if provider_node in preserved_nodes:
13636 - continue
13637 - # An alternative provider seems to be
13638 - # installed, so drop this edge.
13639 - lib_graph.remove_edge(preserved_node, consumer_node)
13640 - break
13641 -
13642 - cpv_lib_map = {}
13643 - while lib_graph:
13644 - root_nodes = preserved_nodes.intersection(lib_graph.root_nodes())
13645 - if not root_nodes:
13646 - break
13647 - lib_graph.difference_update(root_nodes)
13648 - unlink_list = set()
13649 - for node in root_nodes:
13650 - unlink_list.update(node.alt_paths)
13651 - unlink_list = sorted(unlink_list)
13652 - for obj in unlink_list:
13653 - cpv = path_cpv_map.get(obj)
13654 - if cpv is None:
13655 - # This means that a symlink is in the preserved libs
13656 - # registry, but the actual lib it points to is not.
13657 - self._display_merge(_("!!! symlink to lib is preserved, "
13658 - "but not the lib itself:\n!!! '%s'\n") % (obj,),
13659 - level=logging.ERROR, noiselevel=-1)
13660 - continue
13661 - removed = cpv_lib_map.get(cpv)
13662 - if removed is None:
13663 - removed = set()
13664 - cpv_lib_map[cpv] = removed
13665 - removed.add(obj)
13666 -
13667 - return cpv_lib_map
13668 -
13669 - def _remove_preserved_libs(self, cpv_lib_map):
13670 - """
13671 - Remove files returned from _find_unused_preserved_libs().
13672 - """
13673 -
13674 - os = _os_merge
13675 -
13676 - files_to_remove = set()
13677 - for files in cpv_lib_map.values():
13678 - files_to_remove.update(files)
13679 - files_to_remove = sorted(files_to_remove)
13680 - showMessage = self._display_merge
13681 - root = self.settings['ROOT']
13682 -
13683 - parent_dirs = set()
13684 - for obj in files_to_remove:
13685 - obj = os.path.join(root, obj.lstrip(os.sep))
13686 - parent_dirs.add(os.path.dirname(obj))
13687 - if os.path.islink(obj):
13688 - obj_type = _("sym")
13689 - else:
13690 - obj_type = _("obj")
13691 - try:
13692 - os.unlink(obj)
13693 - except OSError as e:
13694 - if e.errno != errno.ENOENT:
13695 - raise
13696 - del e
13697 - else:
13698 - showMessage(_("<<< !needed %s %s\n") % (obj_type, obj),
13699 - noiselevel=-1)
13700 -
13701 - # Remove empty parent directories if possible.
13702 - while parent_dirs:
13703 - x = parent_dirs.pop()
13704 - while True:
13705 - try:
13706 - os.rmdir(x)
13707 - except OSError:
13708 - break
13709 - prev = x
13710 - x = os.path.dirname(x)
13711 - if x == prev:
13712 - break
13713 -
13714 - self.vartree.dbapi._plib_registry.pruneNonExisting()
13715 -
13716 - def _collision_protect(self, srcroot, destroot, mypkglist,
13717 - file_list, symlink_list):
13718 -
13719 - os = _os_merge
13720 -
13721 - real_relative_paths = {}
13722 -
13723 - collision_ignore = []
13724 - for x in portage.util.shlex_split(
13725 - self.settings.get("COLLISION_IGNORE", "")):
13726 - if os.path.isdir(os.path.join(self._eroot, x.lstrip(os.sep))):
13727 - x = normalize_path(x)
13728 - x += "/*"
13729 - collision_ignore.append(x)
13730 -
13731 - # For collisions with preserved libraries, the current package
13732 - # will assume ownership and the libraries will be unregistered.
13733 - if self.vartree.dbapi._plib_registry is None:
13734 - # preserve-libs is entirely disabled
13735 - plib_cpv_map = None
13736 - plib_paths = None
13737 - plib_inodes = {}
13738 - else:
13739 - plib_dict = self.vartree.dbapi._plib_registry.getPreservedLibs()
13740 - plib_cpv_map = {}
13741 - plib_paths = set()
13742 - for cpv, paths in plib_dict.items():
13743 - plib_paths.update(paths)
13744 - for f in paths:
13745 - plib_cpv_map[f] = cpv
13746 - plib_inodes = self._lstat_inode_map(plib_paths)
13747 -
13748 - plib_collisions = {}
13749 -
13750 - showMessage = self._display_merge
13751 - stopmerge = False
13752 - collisions = []
13753 - dirs = set()
13754 - dirs_ro = set()
13755 - symlink_collisions = []
13756 - destroot = self.settings['ROOT']
13757 - totfiles = len(file_list) + len(symlink_list)
13758 - previous = time.monotonic()
13759 - progress_shown = False
13760 - report_interval = 1.7 # seconds
13761 - falign = len("%d" % totfiles)
13762 - showMessage(_(" %s checking %d files for package collisions\n") % \
13763 - (colorize("GOOD", "*"), totfiles))
13764 - for i, (f, f_type) in enumerate(chain(
13765 - ((f, "reg") for f in file_list),
13766 - ((f, "sym") for f in symlink_list))):
13767 - current = time.monotonic()
13768 - if current - previous > report_interval:
13769 - showMessage(_("%3d%% done, %*d files remaining ...\n") %
13770 - (i * 100 / totfiles, falign, totfiles - i))
13771 - previous = current
13772 - progress_shown = True
13773 -
13774 - dest_path = normalize_path(os.path.join(destroot, f.lstrip(os.path.sep)))
13775 -
13776 - # Relative path with symbolic links resolved only in parent directories
13777 - real_relative_path = os.path.join(os.path.realpath(os.path.dirname(dest_path)),
13778 - os.path.basename(dest_path))[len(destroot):]
13779 -
13780 - real_relative_paths.setdefault(real_relative_path, []).append(f.lstrip(os.path.sep))
13781 -
13782 - parent = os.path.dirname(dest_path)
13783 - if parent not in dirs:
13784 - for x in iter_parents(parent):
13785 - if x in dirs:
13786 - break
13787 - dirs.add(x)
13788 - if os.path.isdir(x):
13789 - if not os.access(x, os.W_OK):
13790 - dirs_ro.add(x)
13791 - break
13792 -
13793 - try:
13794 - dest_lstat = os.lstat(dest_path)
13795 - except EnvironmentError as e:
13796 - if e.errno == errno.ENOENT:
13797 - del e
13798 - continue
13799 - elif e.errno == errno.ENOTDIR:
13800 - del e
13801 - # A non-directory is in a location where this package
13802 - # expects to have a directory.
13803 - dest_lstat = None
13804 - parent_path = dest_path
13805 - while len(parent_path) > len(destroot):
13806 - parent_path = os.path.dirname(parent_path)
13807 - try:
13808 - dest_lstat = os.lstat(parent_path)
13809 - break
13810 - except EnvironmentError as e:
13811 - if e.errno != errno.ENOTDIR:
13812 - raise
13813 - del e
13814 - if not dest_lstat:
13815 - raise AssertionError(
13816 - "unable to find non-directory " + \
13817 - "parent for '%s'" % dest_path)
13818 - dest_path = parent_path
13819 - f = os.path.sep + dest_path[len(destroot):]
13820 - if f in collisions:
13821 - continue
13822 - else:
13823 - raise
13824 - if f[0] != "/":
13825 - f="/"+f
13826 -
13827 - if stat.S_ISDIR(dest_lstat.st_mode):
13828 - if f_type == "sym":
13829 - # This case is explicitly banned
13830 - # by PMS (see bug #326685).
13831 - symlink_collisions.append(f)
13832 - collisions.append(f)
13833 - continue
13834 -
13835 - plibs = plib_inodes.get((dest_lstat.st_dev, dest_lstat.st_ino))
13836 - if plibs:
13837 - for path in plibs:
13838 - cpv = plib_cpv_map[path]
13839 - paths = plib_collisions.get(cpv)
13840 - if paths is None:
13841 - paths = set()
13842 - plib_collisions[cpv] = paths
13843 - paths.add(path)
13844 - # The current package will assume ownership and the
13845 - # libraries will be unregistered, so exclude this
13846 - # path from the normal collisions.
13847 - continue
13848 -
13849 - isowned = False
13850 - full_path = os.path.join(destroot, f.lstrip(os.path.sep))
13851 - for ver in mypkglist:
13852 - if ver.isowner(f):
13853 - isowned = True
13854 - break
13855 - if not isowned and self.isprotected(full_path):
13856 - isowned = True
13857 - if not isowned:
13858 - f_match = full_path[len(self._eroot)-1:]
13859 - stopmerge = True
13860 - for pattern in collision_ignore:
13861 - if fnmatch.fnmatch(f_match, pattern):
13862 - stopmerge = False
13863 - break
13864 - if stopmerge:
13865 - collisions.append(f)
13866 -
13867 - internal_collisions = {}
13868 - for real_relative_path, files in real_relative_paths.items():
13869 - # Detect internal collisions between non-identical files.
13870 - if len(files) >= 2:
13871 - files.sort()
13872 - for i in range(len(files) - 1):
13873 - file1 = normalize_path(os.path.join(srcroot, files[i]))
13874 - file2 = normalize_path(os.path.join(srcroot, files[i+1]))
13875 - # Compare files, ignoring differences in times.
13876 - differences = compare_files(file1, file2, skipped_types=("atime", "mtime", "ctime"))
13877 - if differences:
13878 - internal_collisions.setdefault(real_relative_path, {})[(files[i], files[i+1])] = differences
13879 -
13880 - if progress_shown:
13881 - showMessage(_("100% done\n"))
13882 -
13883 - return collisions, internal_collisions, dirs_ro, symlink_collisions, plib_collisions
13884 -
13885 - def _lstat_inode_map(self, path_iter):
13886 - """
13887 - Use lstat to create a map of the form:
13888 - {(st_dev, st_ino) : set([path1, path2, ...])}
13889 - Multiple paths may reference the same inode due to hardlinks.
13890 - All lstat() calls are relative to self.myroot.
13891 - """
13892 -
13893 - os = _os_merge
13894 -
13895 - root = self.settings['ROOT']
13896 - inode_map = {}
13897 - for f in path_iter:
13898 - path = os.path.join(root, f.lstrip(os.sep))
13899 - try:
13900 - st = os.lstat(path)
13901 - except OSError as e:
13902 - if e.errno not in (errno.ENOENT, errno.ENOTDIR):
13903 - raise
13904 - del e
13905 - continue
13906 - key = (st.st_dev, st.st_ino)
13907 - paths = inode_map.get(key)
13908 - if paths is None:
13909 - paths = set()
13910 - inode_map[key] = paths
13911 - paths.add(f)
13912 - return inode_map
13913 -
13914 - def _security_check(self, installed_instances):
13915 - if not installed_instances:
13916 - return 0
13917 -
13918 - os = _os_merge
13919 -
13920 - showMessage = self._display_merge
13921 -
13922 - file_paths = set()
13923 - for dblnk in installed_instances:
13924 - file_paths.update(dblnk.getcontents())
13925 - inode_map = {}
13926 - real_paths = set()
13927 - for i, path in enumerate(file_paths):
13928 -
13929 - if os is _os_merge:
13930 - try:
13931 - _unicode_encode(path,
13932 - encoding=_encodings['merge'], errors='strict')
13933 - except UnicodeEncodeError:
13934 - # The package appears to have been merged with a
13935 - # different value of sys.getfilesystemencoding(),
13936 - # so fall back to utf_8 if appropriate.
13937 - try:
13938 - _unicode_encode(path,
13939 - encoding=_encodings['fs'], errors='strict')
13940 - except UnicodeEncodeError:
13941 - pass
13942 - else:
13943 - os = portage.os
13944 -
13945 - try:
13946 - s = os.lstat(path)
13947 - except OSError as e:
13948 - if e.errno not in (errno.ENOENT, errno.ENOTDIR):
13949 - raise
13950 - del e
13951 - continue
13952 - if not stat.S_ISREG(s.st_mode):
13953 - continue
13954 - path = os.path.realpath(path)
13955 - if path in real_paths:
13956 - continue
13957 - real_paths.add(path)
13958 - if s.st_nlink > 1 and \
13959 - s.st_mode & (stat.S_ISUID | stat.S_ISGID):
13960 - k = (s.st_dev, s.st_ino)
13961 - inode_map.setdefault(k, []).append((path, s))
13962 - suspicious_hardlinks = []
13963 - for path_list in inode_map.values():
13964 - path, s = path_list[0]
13965 - if len(path_list) == s.st_nlink:
13966 - # All hardlinks seem to be owned by this package.
13967 - continue
13968 - suspicious_hardlinks.append(path_list)
13969 - if not suspicious_hardlinks:
13970 - return 0
13971 -
13972 - msg = []
13973 - msg.append(_("suid/sgid file(s) "
13974 - "with suspicious hardlink(s):"))
13975 - msg.append("")
13976 - for path_list in suspicious_hardlinks:
13977 - for path, s in path_list:
13978 - msg.append("\t%s" % path)
13979 - msg.append("")
13980 - msg.append(_("See the Gentoo Security Handbook "
13981 - "guide for advice on how to proceed."))
13982 -
13983 - self._eerror("preinst", msg)
13984 -
13985 - return 1
13986 -
13987 - def _eqawarn(self, phase, lines):
13988 - self._elog("eqawarn", phase, lines)
13989 -
13990 - def _eerror(self, phase, lines):
13991 - self._elog("eerror", phase, lines)
13992 -
13993 - def _elog(self, funcname, phase, lines):
13994 - func = getattr(portage.elog.messages, funcname)
13995 - if self._scheduler is None:
13996 - for l in lines:
13997 - func(l, phase=phase, key=self.mycpv)
13998 - else:
13999 - background = self.settings.get("PORTAGE_BACKGROUND") == "1"
14000 - log_path = None
14001 - if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
14002 - log_path = self.settings.get("PORTAGE_LOG_FILE")
14003 - out = io.StringIO()
14004 - for line in lines:
14005 - func(line, phase=phase, key=self.mycpv, out=out)
14006 - msg = out.getvalue()
14007 - self._scheduler.output(msg,
14008 - background=background, log_path=log_path)
14009 -
14010 - def _elog_process(self, phasefilter=None):
14011 - cpv = self.mycpv
14012 - if self._pipe is None:
14013 - elog_process(cpv, self.settings, phasefilter=phasefilter)
14014 - else:
14015 - logdir = os.path.join(self.settings["T"], "logging")
14016 - ebuild_logentries = collect_ebuild_messages(logdir)
14017 - # phasefilter is irrelevant for the above collect_ebuild_messages
14018 - # call, since this package instance has a private logdir. However,
14019 - # it may be relevant for the following collect_messages call.
14020 - py_logentries = collect_messages(key=cpv, phasefilter=phasefilter).get(cpv, {})
14021 - logentries = _merge_logentries(py_logentries, ebuild_logentries)
14022 - funcnames = {
14023 - "INFO": "einfo",
14024 - "LOG": "elog",
14025 - "WARN": "ewarn",
14026 - "QA": "eqawarn",
14027 - "ERROR": "eerror"
14028 - }
14029 - str_buffer = []
14030 - for phase, messages in logentries.items():
14031 - for key, lines in messages:
14032 - funcname = funcnames[key]
14033 - if isinstance(lines, str):
14034 - lines = [lines]
14035 - for line in lines:
14036 - for line in line.split('\n'):
14037 - fields = (funcname, phase, cpv, line)
14038 - str_buffer.append(' '.join(fields))
14039 - str_buffer.append('\n')
14040 - if str_buffer:
14041 - str_buffer = _unicode_encode(''.join(str_buffer))
14042 - while str_buffer:
14043 - str_buffer = str_buffer[os.write(self._pipe, str_buffer):]
14044 -
14045 - def _emerge_log(self, msg):
14046 - emergelog(False, msg)
14047 -
14048 - def treewalk(self, srcroot, destroot, inforoot, myebuild, cleanup=0,
14049 - mydbapi=None, prev_mtimes=None, counter=None):
14050 - """
14051 -
14052 - This function does the following:
14053 -
14054 - calls doebuild(mydo=instprep)
14055 - calls get_ro_checker to retrieve a function for checking whether Portage
14056 - will write to a read-only filesystem, then runs it against the directory list
14057 - calls self._preserve_libs if FEATURES=preserve-libs
14058 - calls self._collision_protect if FEATURES=collision-protect
14059 - calls doebuild(mydo=pkg_preinst)
14060 - Merges the package to the livefs
14061 - unmerges old version (if required)
14062 - calls doebuild(mydo=pkg_postinst)
14063 - calls env_update
14064 -
14065 - @param srcroot: Typically this is ${D}
14066 - @type srcroot: String (Path)
14067 - @param destroot: ignored, self.settings['ROOT'] is used instead
14068 - @type destroot: String (Path)
14069 - @param inforoot: root of the vardb entry ?
14070 - @type inforoot: String (Path)
14071 - @param myebuild: path to the ebuild that we are processing
14072 - @type myebuild: String (Path)
14073 - @param mydbapi: dbapi which is handed to doebuild.
14074 - @type mydbapi: portdbapi instance
14075 - @param prev_mtimes: { Filename:mtime } mapping for env_update
14076 - @type prev_mtimes: Dictionary
14077 - @rtype: Boolean
14078 - @return:
14079 - 1. 0 on success
14080 - 2. 1 on failure
14081 -
14082 - secondhand is a list of symlinks that have been skipped due to their target
14083 - not existing; we will merge these symlinks at a later time.
14084 - """
14085 -
14086 - os = _os_merge
14087 -
14088 - srcroot = _unicode_decode(srcroot,
14089 - encoding=_encodings['content'], errors='strict')
14090 - destroot = self.settings['ROOT']
14091 - inforoot = _unicode_decode(inforoot,
14092 - encoding=_encodings['content'], errors='strict')
14093 - myebuild = _unicode_decode(myebuild,
14094 - encoding=_encodings['content'], errors='strict')
14095 -
14096 - showMessage = self._display_merge
14097 - srcroot = normalize_path(srcroot).rstrip(os.path.sep) + os.path.sep
14098 -
14099 - if not os.path.isdir(srcroot):
14100 - showMessage(_("!!! Directory Not Found: D='%s'\n") % srcroot,
14101 - level=logging.ERROR, noiselevel=-1)
14102 - return 1
14103 -
14104 - # run instprep internal phase
14105 - doebuild_environment(myebuild, "instprep",
14106 - settings=self.settings, db=mydbapi)
14107 - phase = EbuildPhase(background=False, phase="instprep",
14108 - scheduler=self._scheduler, settings=self.settings)
14109 - phase.start()
14110 - if phase.wait() != os.EX_OK:
14111 - showMessage(_("!!! instprep failed\n"),
14112 - level=logging.ERROR, noiselevel=-1)
14113 - return 1
14114 -
14115 - is_binpkg = self.settings.get("EMERGE_FROM") == "binary"
14116 - slot = ''
14117 - for var_name in ('CHOST', 'SLOT'):
14118 - try:
14119 - with io.open(_unicode_encode(
14120 - os.path.join(inforoot, var_name),
14121 - encoding=_encodings['fs'], errors='strict'),
14122 - mode='r', encoding=_encodings['repo.content'],
14123 - errors='replace') as f:
14124 - val = f.readline().strip()
14125 - except EnvironmentError as e:
14126 - if e.errno != errno.ENOENT:
14127 - raise
14128 - del e
14129 - val = ''
14130 -
14131 - if var_name == 'SLOT':
14132 - slot = val
14133 -
14134 - if not slot.strip():
14135 - slot = self.settings.get(var_name, '')
14136 - if not slot.strip():
14137 - showMessage(_("!!! SLOT is undefined\n"),
14138 - level=logging.ERROR, noiselevel=-1)
14139 - return 1
14140 - write_atomic(os.path.join(inforoot, var_name), slot + '\n')
14141 -
14142 - # This check only applies when built from source, since
14143 - # inforoot values are written just after src_install.
14144 - if not is_binpkg and val != self.settings.get(var_name, ''):
14145 - self._eqawarn('preinst',
14146 - [_("QA Notice: Expected %(var_name)s='%(expected_value)s', got '%(actual_value)s'\n") % \
14147 - {"var_name":var_name, "expected_value":self.settings.get(var_name, ''), "actual_value":val}])
14148 -
14149 - def eerror(lines):
14150 - self._eerror("preinst", lines)
14151 -
14152 - if not os.path.exists(self.dbcatdir):
14153 - ensure_dirs(self.dbcatdir)
14154 -
14155 - # NOTE: We use SLOT obtained from the inforoot
14156 - # directory, in order to support USE=multislot.
14157 - # Use _pkg_str discard the sub-slot part if necessary.
14158 - slot = _pkg_str(self.mycpv, slot=slot).slot
14159 - cp = self.mysplit[0]
14160 - slot_atom = "%s:%s" % (cp, slot)
14161 -
14162 - self.lockdb()
14163 - try:
14164 - # filter any old-style virtual matches
14165 - slot_matches = [cpv for cpv in self.vartree.dbapi.match(slot_atom)
14166 - if cpv_getkey(cpv) == cp]
14167 -
14168 - if self.mycpv not in slot_matches and \
14169 - self.vartree.dbapi.cpv_exists(self.mycpv):
14170 - # handle multislot or unapplied slotmove
14171 - slot_matches.append(self.mycpv)
14172 -
14173 - others_in_slot = []
14174 - for cur_cpv in slot_matches:
14175 - # Clone the config in case one of these has to be unmerged,
14176 - # since we need it to have private ${T} etc... for things
14177 - # like elog.
14178 - settings_clone = portage.config(clone=self.settings)
14179 - # This reset ensures that there is no unintended leakage
14180 - # of variables which should not be shared.
14181 - settings_clone.reset()
14182 - settings_clone.setcpv(cur_cpv, mydb=self.vartree.dbapi)
14183 - if self._preserve_libs and "preserve-libs" in \
14184 - settings_clone["PORTAGE_RESTRICT"].split():
14185 - self._preserve_libs = False
14186 - others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
14187 - settings=settings_clone,
14188 - vartree=self.vartree, treetype="vartree",
14189 - scheduler=self._scheduler, pipe=self._pipe))
14190 - finally:
14191 - self.unlockdb()
14192 -
14193 - # If any instance has RESTRICT=preserve-libs, then
14194 - # restrict it for all instances.
14195 - if not self._preserve_libs:
14196 - for dblnk in others_in_slot:
14197 - dblnk._preserve_libs = False
14198 -
14199 - retval = self._security_check(others_in_slot)
14200 - if retval:
14201 - return retval
14202 -
14203 - if slot_matches:
14204 - # Used by self.isprotected().
14205 - max_dblnk = None
14206 - max_counter = -1
14207 - for dblnk in others_in_slot:
14208 - cur_counter = self.vartree.dbapi.cpv_counter(dblnk.mycpv)
14209 - if cur_counter > max_counter:
14210 - max_counter = cur_counter
14211 - max_dblnk = dblnk
14212 - self._installed_instance = max_dblnk
14213 -
14214 - # Apply INSTALL_MASK before collision-protect, since it may
14215 - # be useful to avoid collisions in some scenarios.
14216 - # We cannot detect if this is needed or not here as INSTALL_MASK can be
14217 - # modified by bashrc files.
14218 - phase = MiscFunctionsProcess(background=False,
14219 - commands=["preinst_mask"], phase="preinst",
14220 - scheduler=self._scheduler, settings=self.settings)
14221 - phase.start()
14222 - phase.wait()
14223 - try:
14224 - with io.open(_unicode_encode(os.path.join(inforoot, "INSTALL_MASK"),
14225 - encoding=_encodings['fs'], errors='strict'),
14226 - mode='r', encoding=_encodings['repo.content'],
14227 - errors='replace') as f:
14228 - install_mask = InstallMask(f.read())
14229 - except EnvironmentError:
14230 - install_mask = None
14231 -
14232 - if install_mask:
14233 - install_mask_dir(self.settings["ED"], install_mask)
14234 - if any(x in self.settings.features for x in ('nodoc', 'noman', 'noinfo')):
14235 - try:
14236 - os.rmdir(os.path.join(self.settings["ED"], 'usr', 'share'))
14237 - except OSError:
14238 - pass
14239 -
14240 - # We check for unicode encoding issues after src_install. However,
14241 - # the check must be repeated here for binary packages (it's
14242 - # inexpensive since we call os.walk() here anyway).
14243 - unicode_errors = []
14244 - line_ending_re = re.compile('[\n\r]')
14245 - srcroot_len = len(srcroot)
14246 - ed_len = len(self.settings["ED"])
14247 - eprefix_len = len(self.settings["EPREFIX"])
14248 -
14249 - while True:
14250 -
14251 - unicode_error = False
14252 - eagain_error = False
14253 -
14254 - filelist = []
14255 - linklist = []
14256 - paths_with_newlines = []
14257 - def onerror(e):
14258 - raise
14259 - walk_iter = os.walk(srcroot, onerror=onerror)
14260 - while True:
14261 - try:
14262 - parent, dirs, files = next(walk_iter)
14263 - except StopIteration:
14264 - break
14265 - except OSError as e:
14266 - if e.errno != errno.EAGAIN:
14267 - raise
14268 - # Observed with PyPy 1.8.
14269 - eagain_error = True
14270 - break
14271 -
14272 - try:
14273 - parent = _unicode_decode(parent,
14274 - encoding=_encodings['merge'], errors='strict')
14275 - except UnicodeDecodeError:
14276 - new_parent = _unicode_decode(parent,
14277 - encoding=_encodings['merge'], errors='replace')
14278 - new_parent = _unicode_encode(new_parent,
14279 - encoding='ascii', errors='backslashreplace')
14280 - new_parent = _unicode_decode(new_parent,
14281 - encoding=_encodings['merge'], errors='replace')
14282 - os.rename(parent, new_parent)
14283 - unicode_error = True
14284 - unicode_errors.append(new_parent[ed_len:])
14285 - break
14286 -
14287 - for fname in files:
14288 - try:
14289 - fname = _unicode_decode(fname,
14290 - encoding=_encodings['merge'], errors='strict')
14291 - except UnicodeDecodeError:
14292 - fpath = portage._os.path.join(
14293 - parent.encode(_encodings['merge']), fname)
14294 - new_fname = _unicode_decode(fname,
14295 - encoding=_encodings['merge'], errors='replace')
14296 - new_fname = _unicode_encode(new_fname,
14297 - encoding='ascii', errors='backslashreplace')
14298 - new_fname = _unicode_decode(new_fname,
14299 - encoding=_encodings['merge'], errors='replace')
14300 - new_fpath = os.path.join(parent, new_fname)
14301 - os.rename(fpath, new_fpath)
14302 - unicode_error = True
14303 - unicode_errors.append(new_fpath[ed_len:])
14304 - fname = new_fname
14305 - fpath = new_fpath
14306 - else:
14307 - fpath = os.path.join(parent, fname)
14308 -
14309 - relative_path = fpath[srcroot_len:]
14310 -
14311 - if line_ending_re.search(relative_path) is not None:
14312 - paths_with_newlines.append(relative_path)
14313 -
14314 - file_mode = os.lstat(fpath).st_mode
14315 - if stat.S_ISREG(file_mode):
14316 - filelist.append(relative_path)
14317 - elif stat.S_ISLNK(file_mode):
14318 - # Note: os.walk puts symlinks to directories in the "dirs"
14319 - # list and it does not traverse them since that could lead
14320 - # to an infinite recursion loop.
14321 - linklist.append(relative_path)
14322 -
14323 - myto = _unicode_decode(
14324 - _os.readlink(_unicode_encode(fpath,
14325 - encoding=_encodings['merge'], errors='strict')),
14326 - encoding=_encodings['merge'], errors='replace')
14327 - if line_ending_re.search(myto) is not None:
14328 - paths_with_newlines.append(relative_path)
14329 -
14330 - if unicode_error:
14331 - break
14332 -
14333 - if not (unicode_error or eagain_error):
14334 - break
14335 -
14336 - if unicode_errors:
14337 - self._elog("eqawarn", "preinst",
14338 - _merge_unicode_error(unicode_errors))
14339 -
14340 - if paths_with_newlines:
14341 - msg = []
14342 - msg.append(_("This package installs one or more files containing line ending characters:"))
14343 - msg.append("")
14344 - paths_with_newlines.sort()
14345 - for f in paths_with_newlines:
14346 - msg.append("\t/%s" % (f.replace("\n", "\\n").replace("\r", "\\r")))
14347 - msg.append("")
14348 - msg.append(_("package %s NOT merged") % self.mycpv)
14349 - msg.append("")
14350 - eerror(msg)
14351 - return 1
14352 -
14353 - # If there are no files to merge, and an installed package in the same
14354 - # slot has files, it probably means that something went wrong.
14355 - if self.settings.get("PORTAGE_PACKAGE_EMPTY_ABORT") == "1" and \
14356 - not filelist and not linklist and others_in_slot:
14357 - installed_files = None
14358 - for other_dblink in others_in_slot:
14359 - installed_files = other_dblink.getcontents()
14360 - if not installed_files:
14361 - continue
14362 - from textwrap import wrap
14363 - wrap_width = 72
14364 - msg = []
14365 - d = {
14366 - "new_cpv":self.mycpv,
14367 - "old_cpv":other_dblink.mycpv
14368 - }
14369 - msg.extend(wrap(_("The '%(new_cpv)s' package will not install "
14370 - "any files, but the currently installed '%(old_cpv)s'"
14371 - " package has the following files: ") % d, wrap_width))
14372 - msg.append("")
14373 - msg.extend(sorted(installed_files))
14374 - msg.append("")
14375 - msg.append(_("package %s NOT merged") % self.mycpv)
14376 - msg.append("")
14377 - msg.extend(wrap(
14378 - _("Manually run `emerge --unmerge =%s` if you "
14379 - "really want to remove the above files. Set "
14380 - "PORTAGE_PACKAGE_EMPTY_ABORT=\"0\" in "
14381 - "/etc/portage/make.conf if you do not want to "
14382 - "abort in cases like this.") % other_dblink.mycpv,
14383 - wrap_width))
14384 - eerror(msg)
14385 - if installed_files:
14386 - return 1
14387 -
14388 - # Make sure the ebuild environment is initialized and that ${T}/elog
14389 - # exists for logging of collision-protect eerror messages.
14390 - if myebuild is None:
14391 - myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
14392 - doebuild_environment(myebuild, "preinst",
14393 - settings=self.settings, db=mydbapi)
14394 - self.settings["REPLACING_VERSIONS"] = " ".join(
14395 - [portage.versions.cpv_getversion(other.mycpv)
14396 - for other in others_in_slot])
14397 - prepare_build_dirs(settings=self.settings, cleanup=cleanup)
14398 -
14399 - # check for package collisions
14400 - blockers = []
14401 - for blocker in self._blockers or []:
14402 - blocker = self.vartree.dbapi._dblink(blocker.cpv)
14403 - # It may have been unmerged before lock(s)
14404 - # were aquired.
14405 - if blocker.exists():
14406 - blockers.append(blocker)
14407 -
14408 - collisions, internal_collisions, dirs_ro, symlink_collisions, plib_collisions = \
14409 - self._collision_protect(srcroot, destroot,
14410 - others_in_slot + blockers, filelist, linklist)
14411 -
14412 - # Check for read-only filesystems.
14413 - ro_checker = get_ro_checker()
14414 - rofilesystems = ro_checker(dirs_ro)
14415 -
14416 - if rofilesystems:
14417 - msg = _("One or more files installed to this package are "
14418 - "set to be installed to read-only filesystems. "
14419 - "Please mount the following filesystems as read-write "
14420 - "and retry.")
14421 - msg = textwrap.wrap(msg, 70)
14422 - msg.append("")
14423 - for f in rofilesystems:
14424 - msg.append("\t%s" % f)
14425 - msg.append("")
14426 - self._elog("eerror", "preinst", msg)
14427 -
14428 - msg = _("Package '%s' NOT merged due to read-only file systems.") % \
14429 - self.settings.mycpv
14430 - msg += _(" If necessary, refer to your elog "
14431 - "messages for the whole content of the above message.")
14432 - msg = textwrap.wrap(msg, 70)
14433 - eerror(msg)
14434 - return 1
14435 -
14436 - if internal_collisions:
14437 - msg = _("Package '%s' has internal collisions between non-identical files "
14438 - "(located in separate directories in the installation image (${D}) "
14439 - "corresponding to merged directories in the target "
14440 - "filesystem (${ROOT})):") % self.settings.mycpv
14441 - msg = textwrap.wrap(msg, 70)
14442 - msg.append("")
14443 - for k, v in sorted(internal_collisions.items(), key=operator.itemgetter(0)):
14444 - msg.append("\t%s" % os.path.join(destroot, k.lstrip(os.path.sep)))
14445 - for (file1, file2), differences in sorted(v.items()):
14446 - msg.append("\t\t%s" % os.path.join(destroot, file1.lstrip(os.path.sep)))
14447 - msg.append("\t\t%s" % os.path.join(destroot, file2.lstrip(os.path.sep)))
14448 - msg.append("\t\t\tDifferences: %s" % ", ".join(differences))
14449 - msg.append("")
14450 - self._elog("eerror", "preinst", msg)
14451 -
14452 - msg = _("Package '%s' NOT merged due to internal collisions "
14453 - "between non-identical files.") % self.settings.mycpv
14454 - msg += _(" If necessary, refer to your elog messages for the whole "
14455 - "content of the above message.")
14456 - eerror(textwrap.wrap(msg, 70))
14457 - return 1
14458 -
14459 - if symlink_collisions:
14460 - # Symlink collisions need to be distinguished from other types
14461 - # of collisions, in order to avoid confusion (see bug #409359).
14462 - msg = _("Package '%s' has one or more collisions "
14463 - "between symlinks and directories, which is explicitly "
14464 - "forbidden by PMS section 13.4 (see bug #326685):") % \
14465 - (self.settings.mycpv,)
14466 - msg = textwrap.wrap(msg, 70)
14467 - msg.append("")
14468 - for f in symlink_collisions:
14469 - msg.append("\t%s" % os.path.join(destroot,
14470 - f.lstrip(os.path.sep)))
14471 - msg.append("")
14472 - self._elog("eerror", "preinst", msg)
14473 -
14474 - if collisions:
14475 - collision_protect = "collision-protect" in self.settings.features
14476 - protect_owned = "protect-owned" in self.settings.features
14477 - msg = _("This package will overwrite one or more files that"
14478 - " may belong to other packages (see list below).")
14479 - if not (collision_protect or protect_owned):
14480 - msg += _(" Add either \"collision-protect\" or"
14481 - " \"protect-owned\" to FEATURES in"
14482 - " make.conf if you would like the merge to abort"
14483 - " in cases like this. See the make.conf man page for"
14484 - " more information about these features.")
14485 - if self.settings.get("PORTAGE_QUIET") != "1":
14486 - msg += _(" You can use a command such as"
14487 - " `portageq owners / <filename>` to identify the"
14488 - " installed package that owns a file. If portageq"
14489 - " reports that only one package owns a file then do NOT"
14490 - " file a bug report. A bug report is only useful if it"
14491 - " identifies at least two or more packages that are known"
14492 - " to install the same file(s)."
14493 - " If a collision occurs and you"
14494 - " can not explain where the file came from then you"
14495 - " should simply ignore the collision since there is not"
14496 - " enough information to determine if a real problem"
14497 - " exists. Please do NOT file a bug report at"
14498 - " https://bugs.gentoo.org/ unless you report exactly which"
14499 - " two packages install the same file(s). See"
14500 - " https://wiki.gentoo.org/wiki/Knowledge_Base:Blockers"
14501 - " for tips on how to solve the problem. And once again,"
14502 - " please do NOT file a bug report unless you have"
14503 - " completely understood the above message.")
14504 -
14505 - self.settings["EBUILD_PHASE"] = "preinst"
14506 - from textwrap import wrap
14507 - msg = wrap(msg, 70)
14508 - if collision_protect:
14509 - msg.append("")
14510 - msg.append(_("package %s NOT merged") % self.settings.mycpv)
14511 - msg.append("")
14512 - msg.append(_("Detected file collision(s):"))
14513 - msg.append("")
14514 -
14515 - for f in collisions:
14516 - msg.append("\t%s" % \
14517 - os.path.join(destroot, f.lstrip(os.path.sep)))
14518 -
14519 - eerror(msg)
14520 -
14521 - owners = None
14522 - if collision_protect or protect_owned or symlink_collisions:
14523 - msg = []
14524 - msg.append("")
14525 - msg.append(_("Searching all installed"
14526 - " packages for file collisions..."))
14527 - msg.append("")
14528 - msg.append(_("Press Ctrl-C to Stop"))
14529 - msg.append("")
14530 - eerror(msg)
14531 -
14532 - if len(collisions) > 20:
14533 - # get_owners is slow for large numbers of files, so
14534 - # don't look them all up.
14535 - collisions = collisions[:20]
14536 -
14537 - pkg_info_strs = {}
14538 - self.lockdb()
14539 - try:
14540 - owners = self.vartree.dbapi._owners.get_owners(collisions)
14541 - self.vartree.dbapi.flush_cache()
14542 -
14543 - for pkg in owners:
14544 - pkg = self.vartree.dbapi._pkg_str(pkg.mycpv, None)
14545 - pkg_info_str = "%s%s%s" % (pkg,
14546 - _slot_separator, pkg.slot)
14547 - if pkg.repo != _unknown_repo:
14548 - pkg_info_str += "%s%s" % (_repo_separator,
14549 - pkg.repo)
14550 - pkg_info_strs[pkg] = pkg_info_str
14551 -
14552 - finally:
14553 - self.unlockdb()
14554 -
14555 - for pkg, owned_files in owners.items():
14556 - msg = []
14557 - msg.append(pkg_info_strs[pkg.mycpv])
14558 - for f in sorted(owned_files):
14559 - msg.append("\t%s" % os.path.join(destroot,
14560 - f.lstrip(os.path.sep)))
14561 - msg.append("")
14562 - eerror(msg)
14563 -
14564 - if not owners:
14565 - eerror([_("None of the installed"
14566 - " packages claim the file(s)."), ""])
14567 -
14568 - symlink_abort_msg =_("Package '%s' NOT merged since it has "
14569 - "one or more collisions between symlinks and directories, "
14570 - "which is explicitly forbidden by PMS section 13.4 "
14571 - "(see bug #326685).")
14572 -
14573 - # The explanation about the collision and how to solve
14574 - # it may not be visible via a scrollback buffer, especially
14575 - # if the number of file collisions is large. Therefore,
14576 - # show a summary at the end.
14577 - abort = False
14578 - if symlink_collisions:
14579 - abort = True
14580 - msg = symlink_abort_msg % (self.settings.mycpv,)
14581 - elif collision_protect:
14582 - abort = True
14583 - msg = _("Package '%s' NOT merged due to file collisions.") % \
14584 - self.settings.mycpv
14585 - elif protect_owned and owners:
14586 - abort = True
14587 - msg = _("Package '%s' NOT merged due to file collisions.") % \
14588 - self.settings.mycpv
14589 - else:
14590 - msg = _("Package '%s' merged despite file collisions.") % \
14591 - self.settings.mycpv
14592 - msg += _(" If necessary, refer to your elog "
14593 - "messages for the whole content of the above message.")
14594 - eerror(wrap(msg, 70))
14595 -
14596 - if abort:
14597 - return 1
14598 -
14599 - # The merge process may move files out of the image directory,
14600 - # which causes invalidation of the .installed flag.
14601 - try:
14602 - os.unlink(os.path.join(
14603 - os.path.dirname(normalize_path(srcroot)), ".installed"))
14604 - except OSError as e:
14605 - if e.errno != errno.ENOENT:
14606 - raise
14607 - del e
14608 -
14609 - self.dbdir = self.dbtmpdir
14610 - self.delete()
14611 - ensure_dirs(self.dbtmpdir)
14612 -
14613 - downgrade = False
14614 - if self._installed_instance is not None and \
14615 - vercmp(self.mycpv.version,
14616 - self._installed_instance.mycpv.version) < 0:
14617 - downgrade = True
14618 -
14619 - if self._installed_instance is not None:
14620 - rval = self._pre_merge_backup(self._installed_instance, downgrade)
14621 - if rval != os.EX_OK:
14622 - showMessage(_("!!! FAILED preinst: ") +
14623 - "quickpkg: %s\n" % rval,
14624 - level=logging.ERROR, noiselevel=-1)
14625 - return rval
14626 -
14627 - # run preinst script
14628 - showMessage(_(">>> Merging %(cpv)s to %(destroot)s\n") % \
14629 - {"cpv":self.mycpv, "destroot":destroot})
14630 - phase = EbuildPhase(background=False, phase="preinst",
14631 - scheduler=self._scheduler, settings=self.settings)
14632 - phase.start()
14633 - a = phase.wait()
14634 -
14635 - # XXX: Decide how to handle failures here.
14636 - if a != os.EX_OK:
14637 - showMessage(_("!!! FAILED preinst: ")+str(a)+"\n",
14638 - level=logging.ERROR, noiselevel=-1)
14639 - return a
14640 -
14641 - # copy "info" files (like SLOT, CFLAGS, etc.) into the database
14642 - for x in os.listdir(inforoot):
14643 - self.copyfile(inforoot+"/"+x)
14644 -
14645 - # write local package counter for recording
14646 - if counter is None:
14647 - counter = self.vartree.dbapi.counter_tick(mycpv=self.mycpv)
14648 - with io.open(_unicode_encode(os.path.join(self.dbtmpdir, 'COUNTER'),
14649 - encoding=_encodings['fs'], errors='strict'),
14650 - mode='w', encoding=_encodings['repo.content'],
14651 - errors='backslashreplace') as f:
14652 - f.write("%s" % counter)
14653 -
14654 - self.updateprotect()
14655 -
14656 - #if we have a file containing previously-merged config file md5sums, grab it.
14657 - self.vartree.dbapi._fs_lock()
14658 - try:
14659 - # This prunes any libraries from the registry that no longer
14660 - # exist on disk, in case they have been manually removed.
14661 - # This has to be done prior to merge, since after merge it
14662 - # is non-trivial to distinguish these files from files
14663 - # that have just been merged.
14664 - plib_registry = self.vartree.dbapi._plib_registry
14665 - if plib_registry:
14666 - plib_registry.lock()
14667 - try:
14668 - plib_registry.load()
14669 - plib_registry.store()
14670 - finally:
14671 - plib_registry.unlock()
14672 -
14673 - # Always behave like --noconfmem is enabled for downgrades
14674 - # so that people who don't know about this option are less
14675 - # likely to get confused when doing upgrade/downgrade cycles.
14676 - cfgfiledict = grabdict(self.vartree.dbapi._conf_mem_file)
14677 - if "NOCONFMEM" in self.settings or downgrade:
14678 - cfgfiledict["IGNORE"]=1
14679 - else:
14680 - cfgfiledict["IGNORE"]=0
14681 -
14682 - rval = self._merge_contents(srcroot, destroot, cfgfiledict)
14683 - if rval != os.EX_OK:
14684 - return rval
14685 - finally:
14686 - self.vartree.dbapi._fs_unlock()
14687 -
14688 - # These caches are populated during collision-protect and the data
14689 - # they contain is now invalid. It's very important to invalidate
14690 - # the contents_inodes cache so that FEATURES=unmerge-orphans
14691 - # doesn't unmerge anything that belongs to this package that has
14692 - # just been merged.
14693 - for dblnk in others_in_slot:
14694 - dblnk._clear_contents_cache()
14695 - self._clear_contents_cache()
14696 -
14697 - linkmap = self.vartree.dbapi._linkmap
14698 - plib_registry = self.vartree.dbapi._plib_registry
14699 - # We initialize preserve_paths to an empty set rather
14700 - # than None here because it plays an important role
14701 - # in prune_plib_registry logic by serving to indicate
14702 - # that we have a replacement for a package that's
14703 - # being unmerged.
14704 -
14705 - preserve_paths = set()
14706 - needed = None
14707 - if not (self._linkmap_broken or linkmap is None or
14708 - plib_registry is None):
14709 - self.vartree.dbapi._fs_lock()
14710 - plib_registry.lock()
14711 - try:
14712 - plib_registry.load()
14713 - needed = os.path.join(inforoot, linkmap._needed_aux_key)
14714 - self._linkmap_rebuild(include_file=needed)
14715 -
14716 - # Preserve old libs if they are still in use
14717 - # TODO: Handle cases where the previous instance
14718 - # has already been uninstalled but it still has some
14719 - # preserved libraries in the registry that we may
14720 - # want to preserve here.
14721 - preserve_paths = self._find_libs_to_preserve()
14722 - finally:
14723 - plib_registry.unlock()
14724 - self.vartree.dbapi._fs_unlock()
14725 -
14726 - if preserve_paths:
14727 - self._add_preserve_libs_to_contents(preserve_paths)
14728 -
14729 - # If portage is reinstalling itself, remove the old
14730 - # version now since we want to use the temporary
14731 - # PORTAGE_BIN_PATH that will be removed when we return.
14732 - reinstall_self = False
14733 - if self.myroot == "/" and \
14734 - match_from_list(PORTAGE_PACKAGE_ATOM, [self.mycpv]):
14735 - reinstall_self = True
14736 -
14737 - emerge_log = self._emerge_log
14738 -
14739 - # If we have any preserved libraries then autoclean
14740 - # is forced so that preserve-libs logic doesn't have
14741 - # to account for the additional complexity of the
14742 - # AUTOCLEAN=no mode.
14743 - autoclean = self.settings.get("AUTOCLEAN", "yes") == "yes" \
14744 - or preserve_paths
14745 -
14746 - if autoclean:
14747 - emerge_log(_(" >>> AUTOCLEAN: %s") % (slot_atom,))
14748 -
14749 - others_in_slot.append(self) # self has just been merged
14750 - for dblnk in list(others_in_slot):
14751 - if dblnk is self:
14752 - continue
14753 - if not (autoclean or dblnk.mycpv == self.mycpv or reinstall_self):
14754 - continue
14755 - showMessage(_(">>> Safely unmerging already-installed instance...\n"))
14756 - emerge_log(_(" === Unmerging... (%s)") % (dblnk.mycpv,))
14757 - others_in_slot.remove(dblnk) # dblnk will unmerge itself now
14758 - dblnk._linkmap_broken = self._linkmap_broken
14759 - dblnk.settings["REPLACED_BY_VERSION"] = portage.versions.cpv_getversion(self.mycpv)
14760 - dblnk.settings.backup_changes("REPLACED_BY_VERSION")
14761 - unmerge_rval = dblnk.unmerge(ldpath_mtimes=prev_mtimes,
14762 - others_in_slot=others_in_slot, needed=needed,
14763 - preserve_paths=preserve_paths)
14764 - dblnk.settings.pop("REPLACED_BY_VERSION", None)
14765 -
14766 - if unmerge_rval == os.EX_OK:
14767 - emerge_log(_(" >>> unmerge success: %s") % (dblnk.mycpv,))
14768 - else:
14769 - emerge_log(_(" !!! unmerge FAILURE: %s") % (dblnk.mycpv,))
14770 -
14771 - self.lockdb()
14772 - try:
14773 - # TODO: Check status and abort if necessary.
14774 - dblnk.delete()
14775 - finally:
14776 - self.unlockdb()
14777 - showMessage(_(">>> Original instance of package unmerged safely.\n"))
14778 -
14779 - if len(others_in_slot) > 1:
14780 - showMessage(colorize("WARN", _("WARNING:"))
14781 - + _(" AUTOCLEAN is disabled. This can cause serious"
14782 - " problems due to overlapping packages.\n"),
14783 - level=logging.WARN, noiselevel=-1)
14784 -
14785 - # We hold both directory locks.
14786 - self.dbdir = self.dbpkgdir
14787 - self.lockdb()
14788 - try:
14789 - self.delete()
14790 - _movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
14791 - self._merged_path(self.dbpkgdir, os.lstat(self.dbpkgdir))
14792 - self.vartree.dbapi._cache_delta.recordEvent(
14793 - "add", self.mycpv, slot, counter)
14794 - finally:
14795 - self.unlockdb()
14796 -
14797 - # Check for file collisions with blocking packages
14798 - # and remove any colliding files from their CONTENTS
14799 - # since they now belong to this package.
14800 - self._clear_contents_cache()
14801 - contents = self.getcontents()
14802 - destroot_len = len(destroot) - 1
14803 - self.lockdb()
14804 - try:
14805 - for blocker in blockers:
14806 - self.vartree.dbapi.removeFromContents(blocker, iter(contents),
14807 - relative_paths=False)
14808 - finally:
14809 - self.unlockdb()
14810 -
14811 - plib_registry = self.vartree.dbapi._plib_registry
14812 - if plib_registry:
14813 - self.vartree.dbapi._fs_lock()
14814 - plib_registry.lock()
14815 - try:
14816 - plib_registry.load()
14817 -
14818 - if preserve_paths:
14819 - # keep track of the libs we preserved
14820 - plib_registry.register(self.mycpv, slot, counter,
14821 - sorted(preserve_paths))
14822 -
14823 - # Unregister any preserved libs that this package has overwritten
14824 - # and update the contents of the packages that owned them.
14825 - plib_dict = plib_registry.getPreservedLibs()
14826 - for cpv, paths in plib_collisions.items():
14827 - if cpv not in plib_dict:
14828 - continue
14829 - has_vdb_entry = False
14830 - if cpv != self.mycpv:
14831 - # If we've replaced another instance with the
14832 - # same cpv then the vdb entry no longer belongs
14833 - # to it, so we'll have to get the slot and counter
14834 - # from plib_registry._data instead.
14835 - self.vartree.dbapi.lock()
14836 - try:
14837 - try:
14838 - slot = self.vartree.dbapi._pkg_str(cpv, None).slot
14839 - counter = self.vartree.dbapi.cpv_counter(cpv)
14840 - except (KeyError, InvalidData):
14841 - pass
14842 - else:
14843 - has_vdb_entry = True
14844 - self.vartree.dbapi.removeFromContents(
14845 - cpv, paths)
14846 - finally:
14847 - self.vartree.dbapi.unlock()
14848 -
14849 - if not has_vdb_entry:
14850 - # It's possible for previously unmerged packages
14851 - # to have preserved libs in the registry, so try
14852 - # to retrieve the slot and counter from there.
14853 - has_registry_entry = False
14854 - for plib_cps, (plib_cpv, plib_counter, plib_paths) in \
14855 - plib_registry._data.items():
14856 - if plib_cpv != cpv:
14857 - continue
14858 - try:
14859 - cp, slot = plib_cps.split(":", 1)
14860 - except ValueError:
14861 - continue
14862 - counter = plib_counter
14863 - has_registry_entry = True
14864 - break
14865 -
14866 - if not has_registry_entry:
14867 - continue
14868 -
14869 - remaining = [f for f in plib_dict[cpv] if f not in paths]
14870 - plib_registry.register(cpv, slot, counter, remaining)
14871 -
14872 - plib_registry.store()
14873 - finally:
14874 - plib_registry.unlock()
14875 - self.vartree.dbapi._fs_unlock()
14876 -
14877 - self.vartree.dbapi._add(self)
14878 - contents = self.getcontents()
14879 -
14880 - #do postinst script
14881 - self.settings["PORTAGE_UPDATE_ENV"] = \
14882 - os.path.join(self.dbpkgdir, "environment.bz2")
14883 - self.settings.backup_changes("PORTAGE_UPDATE_ENV")
14884 - try:
14885 - phase = EbuildPhase(background=False, phase="postinst",
14886 - scheduler=self._scheduler, settings=self.settings)
14887 - phase.start()
14888 - a = phase.wait()
14889 - if a == os.EX_OK:
14890 - showMessage(_(">>> %s merged.\n") % self.mycpv)
14891 - finally:
14892 - self.settings.pop("PORTAGE_UPDATE_ENV", None)
14893 -
14894 - if a != os.EX_OK:
14895 - # It's stupid to bail out here, so keep going regardless of
14896 - # phase return code.
14897 - self._postinst_failure = True
14898 - self._elog("eerror", "postinst", [
14899 - _("FAILED postinst: %s") % (a,),
14900 - ])
14901 -
14902 - #update environment settings, library paths. DO NOT change symlinks.
14903 - env_update(
14904 - target_root=self.settings['ROOT'], prev_mtimes=prev_mtimes,
14905 - contents=contents, env=self.settings,
14906 - writemsg_level=self._display_merge, vardbapi=self.vartree.dbapi)
14907 -
14908 - # For gcc upgrades, preserved libs have to be removed after the
14909 - # the library path has been updated.
14910 - self._prune_plib_registry()
14911 - self._post_merge_sync()
14912 -
14913 - return os.EX_OK
14914 -
14915 - def _new_backup_path(self, p):
14916 - """
14917 - The works for any type path, such as a regular file, symlink,
14918 - or directory. The parent directory is assumed to exist.
14919 - The returned filename is of the form p + '.backup.' + x, where
14920 - x guarantees that the returned path does not exist yet.
14921 - """
14922 - os = _os_merge
14923 -
14924 - x = -1
14925 - while True:
14926 - x += 1
14927 - backup_p = '%s.backup.%04d' % (p, x)
14928 - try:
14929 - os.lstat(backup_p)
14930 - except OSError:
14931 - break
14932 -
14933 - return backup_p
14934 -
14935 - def _merge_contents(self, srcroot, destroot, cfgfiledict):
14936 -
14937 - cfgfiledict_orig = cfgfiledict.copy()
14938 -
14939 - # open CONTENTS file (possibly overwriting old one) for recording
14940 - # Use atomic_ofstream for automatic coercion of raw bytes to
14941 - # unicode, in order to prevent TypeError when writing raw bytes
14942 - # to TextIOWrapper with python2.
14943 - outfile = atomic_ofstream(_unicode_encode(
14944 - os.path.join(self.dbtmpdir, 'CONTENTS'),
14945 - encoding=_encodings['fs'], errors='strict'),
14946 - mode='w', encoding=_encodings['repo.content'],
14947 - errors='backslashreplace')
14948 -
14949 - # Don't bump mtimes on merge since some application require
14950 - # preservation of timestamps. This means that the unmerge phase must
14951 - # check to see if file belongs to an installed instance in the same
14952 - # slot.
14953 - mymtime = None
14954 -
14955 - # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
14956 - prevmask = os.umask(0)
14957 - secondhand = []
14958 -
14959 - # we do a first merge; this will recurse through all files in our srcroot but also build up a
14960 - # "second hand" of symlinks to merge later
14961 - if self.mergeme(srcroot, destroot, outfile, secondhand,
14962 - self.settings["EPREFIX"].lstrip(os.sep), cfgfiledict, mymtime):
14963 - return 1
14964 -
14965 - # now, it's time for dealing our second hand; we'll loop until we can't merge anymore. The rest are
14966 - # broken symlinks. We'll merge them too.
14967 - lastlen = 0
14968 - while len(secondhand) and len(secondhand)!=lastlen:
14969 - # clear the thirdhand. Anything from our second hand that
14970 - # couldn't get merged will be added to thirdhand.
14971 -
14972 - thirdhand = []
14973 - if self.mergeme(srcroot, destroot, outfile, thirdhand,
14974 - secondhand, cfgfiledict, mymtime):
14975 - return 1
14976 -
14977 - #swap hands
14978 - lastlen = len(secondhand)
14979 -
14980 - # our thirdhand now becomes our secondhand. It's ok to throw
14981 - # away secondhand since thirdhand contains all the stuff that
14982 - # couldn't be merged.
14983 - secondhand = thirdhand
14984 -
14985 - if len(secondhand):
14986 - # force merge of remaining symlinks (broken or circular; oh well)
14987 - if self.mergeme(srcroot, destroot, outfile, None,
14988 - secondhand, cfgfiledict, mymtime):
14989 - return 1
14990 -
14991 - #restore umask
14992 - os.umask(prevmask)
14993 -
14994 - #if we opened it, close it
14995 - outfile.flush()
14996 - outfile.close()
14997 -
14998 - # write out our collection of md5sums
14999 - if cfgfiledict != cfgfiledict_orig:
15000 - cfgfiledict.pop("IGNORE", None)
15001 - try:
15002 - writedict(cfgfiledict, self.vartree.dbapi._conf_mem_file)
15003 - except InvalidLocation:
15004 - self.settings._init_dirs()
15005 - writedict(cfgfiledict, self.vartree.dbapi._conf_mem_file)
15006 -
15007 - return os.EX_OK
15008 -
15009 - def mergeme(self, srcroot, destroot, outfile, secondhand, stufftomerge, cfgfiledict, thismtime):
15010 - """
15011 -
15012 - This function handles actual merging of the package contents to the livefs.
15013 - It also handles config protection.
15014 -
15015 - @param srcroot: Where are we copying files from (usually ${D})
15016 - @type srcroot: String (Path)
15017 - @param destroot: Typically ${ROOT}
15018 - @type destroot: String (Path)
15019 - @param outfile: File to log operations to
15020 - @type outfile: File Object
15021 - @param secondhand: A set of items to merge in pass two (usually
15022 - or symlinks that point to non-existing files that may get merged later)
15023 - @type secondhand: List
15024 - @param stufftomerge: Either a diretory to merge, or a list of items.
15025 - @type stufftomerge: String or List
15026 - @param cfgfiledict: { File:mtime } mapping for config_protected files
15027 - @type cfgfiledict: Dictionary
15028 - @param thismtime: None or new mtime for merged files (expressed in seconds
15029 - in Python <3.3 and nanoseconds in Python >=3.3)
15030 - @type thismtime: None or Int
15031 - @rtype: None or Boolean
15032 - @return:
15033 - 1. True on failure
15034 - 2. None otherwise
15035 -
15036 - """
15037 -
15038 - showMessage = self._display_merge
15039 - writemsg = self._display_merge
15040 -
15041 - os = _os_merge
15042 - sep = os.sep
15043 - join = os.path.join
15044 - srcroot = normalize_path(srcroot).rstrip(sep) + sep
15045 - destroot = normalize_path(destroot).rstrip(sep) + sep
15046 - calc_prelink = "prelink-checksums" in self.settings.features
15047 -
15048 - protect_if_modified = \
15049 - "config-protect-if-modified" in self.settings.features and \
15050 - self._installed_instance is not None
15051 -
15052 - # this is supposed to merge a list of files. There will be 2 forms of argument passing.
15053 - if isinstance(stufftomerge, str):
15054 - #A directory is specified. Figure out protection paths, listdir() it and process it.
15055 - mergelist = [join(stufftomerge, child) for child in \
15056 - os.listdir(join(srcroot, stufftomerge))]
15057 - else:
15058 - mergelist = stufftomerge[:]
15059 -
15060 - while mergelist:
15061 -
15062 - relative_path = mergelist.pop()
15063 - mysrc = join(srcroot, relative_path)
15064 - mydest = join(destroot, relative_path)
15065 - # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
15066 - myrealdest = join(sep, relative_path)
15067 - # stat file once, test using S_* macros many times (faster that way)
15068 - mystat = os.lstat(mysrc)
15069 - mymode = mystat[stat.ST_MODE]
15070 - mymd5 = None
15071 - myto = None
15072 -
15073 - mymtime = mystat.st_mtime_ns
15074 -
15075 - if stat.S_ISREG(mymode):
15076 - mymd5 = perform_md5(mysrc, calc_prelink=calc_prelink)
15077 - elif stat.S_ISLNK(mymode):
15078 - # The file name of mysrc and the actual file that it points to
15079 - # will have earlier been forcefully converted to the 'merge'
15080 - # encoding if necessary, but the content of the symbolic link
15081 - # may need to be forcefully converted here.
15082 - myto = _os.readlink(_unicode_encode(mysrc,
15083 - encoding=_encodings['merge'], errors='strict'))
15084 - try:
15085 - myto = _unicode_decode(myto,
15086 - encoding=_encodings['merge'], errors='strict')
15087 - except UnicodeDecodeError:
15088 - myto = _unicode_decode(myto, encoding=_encodings['merge'],
15089 - errors='replace')
15090 - myto = _unicode_encode(myto, encoding='ascii',
15091 - errors='backslashreplace')
15092 - myto = _unicode_decode(myto, encoding=_encodings['merge'],
15093 - errors='replace')
15094 - os.unlink(mysrc)
15095 - os.symlink(myto, mysrc)
15096 -
15097 - mymd5 = md5(_unicode_encode(myto)).hexdigest()
15098 -
15099 - protected = False
15100 - if stat.S_ISLNK(mymode) or stat.S_ISREG(mymode):
15101 - protected = self.isprotected(mydest)
15102 -
15103 - if stat.S_ISREG(mymode) and \
15104 - mystat.st_size == 0 and \
15105 - os.path.basename(mydest).startswith(".keep"):
15106 - protected = False
15107 -
15108 - destmd5 = None
15109 - mydest_link = None
15110 - # handy variables; mydest is the target object on the live filesystems;
15111 - # mysrc is the source object in the temporary install dir
15112 - try:
15113 - mydstat = os.lstat(mydest)
15114 - mydmode = mydstat.st_mode
15115 - if protected:
15116 - if stat.S_ISLNK(mydmode):
15117 - # Read symlink target as bytes, in case the
15118 - # target path has a bad encoding.
15119 - mydest_link = _os.readlink(
15120 - _unicode_encode(mydest,
15121 - encoding=_encodings['merge'],
15122 - errors='strict'))
15123 - mydest_link = _unicode_decode(mydest_link,
15124 - encoding=_encodings['merge'],
15125 - errors='replace')
15126 -
15127 - # For protection of symlinks, the md5
15128 - # of the link target path string is used
15129 - # for cfgfiledict (symlinks are
15130 - # protected since bug #485598).
15131 - destmd5 = md5(_unicode_encode(mydest_link)).hexdigest()
15132 -
15133 - elif stat.S_ISREG(mydmode):
15134 - destmd5 = perform_md5(mydest,
15135 - calc_prelink=calc_prelink)
15136 - except (FileNotFound, OSError) as e:
15137 - if isinstance(e, OSError) and e.errno != errno.ENOENT:
15138 - raise
15139 - #dest file doesn't exist
15140 - mydstat = None
15141 - mydmode = None
15142 - mydest_link = None
15143 - destmd5 = None
15144 -
15145 - moveme = True
15146 - if protected:
15147 - mydest, protected, moveme = self._protect(cfgfiledict,
15148 - protect_if_modified, mymd5, myto, mydest,
15149 - myrealdest, mydmode, destmd5, mydest_link)
15150 -
15151 - zing = "!!!"
15152 - if not moveme:
15153 - # confmem rejected this update
15154 - zing = "---"
15155 -
15156 - if stat.S_ISLNK(mymode):
15157 - # we are merging a symbolic link
15158 - # Pass in the symlink target in order to bypass the
15159 - # os.readlink() call inside abssymlink(), since that
15160 - # call is unsafe if the merge encoding is not ascii
15161 - # or utf_8 (see bug #382021).
15162 - myabsto = abssymlink(mysrc, target=myto)
15163 -
15164 - if myabsto.startswith(srcroot):
15165 - myabsto = myabsto[len(srcroot):]
15166 - myabsto = myabsto.lstrip(sep)
15167 - if self.settings and self.settings["D"]:
15168 - if myto.startswith(self.settings["D"]):
15169 - myto = myto[len(self.settings["D"])-1:]
15170 - # myrealto contains the path of the real file to which this symlink points.
15171 - # we can simply test for existence of this file to see if the target has been merged yet
15172 - myrealto = normalize_path(os.path.join(destroot, myabsto))
15173 - if mydmode is not None and stat.S_ISDIR(mydmode):
15174 - if not protected:
15175 - # we can't merge a symlink over a directory
15176 - newdest = self._new_backup_path(mydest)
15177 - msg = []
15178 - msg.append("")
15179 - msg.append(_("Installation of a symlink is blocked by a directory:"))
15180 - msg.append(" '%s'" % mydest)
15181 - msg.append(_("This symlink will be merged with a different name:"))
15182 - msg.append(" '%s'" % newdest)
15183 - msg.append("")
15184 - self._eerror("preinst", msg)
15185 - mydest = newdest
15186 -
15187 - # if secondhand is None it means we're operating in "force" mode and should not create a second hand.
15188 - if (secondhand != None) and (not os.path.exists(myrealto)):
15189 - # either the target directory doesn't exist yet or the target file doesn't exist -- or
15190 - # the target is a broken symlink. We will add this file to our "second hand" and merge
15191 - # it later.
15192 - secondhand.append(mysrc[len(srcroot):])
15193 - continue
15194 - # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
15195 - if moveme:
15196 - zing = ">>>"
15197 - mymtime = movefile(mysrc, mydest, newmtime=thismtime,
15198 - sstat=mystat, mysettings=self.settings,
15199 - encoding=_encodings['merge'])
15200 -
15201 - try:
15202 - self._merged_path(mydest, os.lstat(mydest))
15203 - except OSError:
15204 - pass
15205 -
15206 - if mymtime != None:
15207 - # Use lexists, since if the target happens to be a broken
15208 - # symlink then that should trigger an independent warning.
15209 - if not (os.path.lexists(myrealto) or
15210 - os.path.lexists(join(srcroot, myabsto))):
15211 - self._eqawarn('preinst',
15212 - [_("QA Notice: Symbolic link /%s points to /%s which does not exist.")
15213 - % (relative_path, myabsto)])
15214 -
15215 - showMessage("%s %s -> %s\n" % (zing, mydest, myto))
15216 - outfile.write(
15217 - self._format_contents_line(
15218 - node_type="sym",
15219 - abs_path=myrealdest,
15220 - symlink_target=myto,
15221 - mtime_ns=mymtime,
15222 - )
15223 - )
15224 - else:
15225 - showMessage(_("!!! Failed to move file.\n"),
15226 - level=logging.ERROR, noiselevel=-1)
15227 - showMessage("!!! %s -> %s\n" % (mydest, myto),
15228 - level=logging.ERROR, noiselevel=-1)
15229 - return 1
15230 - elif stat.S_ISDIR(mymode):
15231 - # we are merging a directory
15232 - if mydmode != None:
15233 - # destination exists
15234 -
15235 - if bsd_chflags:
15236 - # Save then clear flags on dest.
15237 - dflags = mydstat.st_flags
15238 - if dflags != 0:
15239 - bsd_chflags.lchflags(mydest, 0)
15240 -
15241 - if not stat.S_ISLNK(mydmode) and \
15242 - not os.access(mydest, os.W_OK):
15243 - pkgstuff = pkgsplit(self.pkg)
15244 - writemsg(_("\n!!! Cannot write to '%s'.\n") % mydest, noiselevel=-1)
15245 - writemsg(_("!!! Please check permissions and directories for broken symlinks.\n"))
15246 - writemsg(_("!!! You may start the merge process again by using ebuild:\n"))
15247 - writemsg("!!! ebuild "+self.settings["PORTDIR"]+"/"+self.cat+"/"+pkgstuff[0]+"/"+self.pkg+".ebuild merge\n")
15248 - writemsg(_("!!! And finish by running this: env-update\n\n"))
15249 - return 1
15250 -
15251 - if stat.S_ISDIR(mydmode) or \
15252 - (stat.S_ISLNK(mydmode) and os.path.isdir(mydest)):
15253 - # a symlink to an existing directory will work for us; keep it:
15254 - showMessage("--- %s/\n" % mydest)
15255 - if bsd_chflags:
15256 - bsd_chflags.lchflags(mydest, dflags)
15257 - else:
15258 - # a non-directory and non-symlink-to-directory. Won't work for us. Move out of the way.
15259 - backup_dest = self._new_backup_path(mydest)
15260 - msg = []
15261 - msg.append("")
15262 - msg.append(_("Installation of a directory is blocked by a file:"))
15263 - msg.append(" '%s'" % mydest)
15264 - msg.append(_("This file will be renamed to a different name:"))
15265 - msg.append(" '%s'" % backup_dest)
15266 - msg.append("")
15267 - self._eerror("preinst", msg)
15268 - if movefile(mydest, backup_dest,
15269 - mysettings=self.settings,
15270 - encoding=_encodings['merge']) is None:
15271 - return 1
15272 - showMessage(_("bak %s %s.backup\n") % (mydest, mydest),
15273 - level=logging.ERROR, noiselevel=-1)
15274 - #now create our directory
15275 - try:
15276 - if self.settings.selinux_enabled():
15277 - _selinux_merge.mkdir(mydest, mysrc)
15278 - else:
15279 - os.mkdir(mydest)
15280 - except OSError as e:
15281 - # Error handling should be equivalent to
15282 - # portage.util.ensure_dirs() for cases
15283 - # like bug #187518.
15284 - if e.errno in (errno.EEXIST,):
15285 - pass
15286 - elif os.path.isdir(mydest):
15287 - pass
15288 - else:
15289 - raise
15290 - del e
15291 -
15292 - if bsd_chflags:
15293 - bsd_chflags.lchflags(mydest, dflags)
15294 - os.chmod(mydest, mystat[0])
15295 - os.chown(mydest, mystat[4], mystat[5])
15296 - showMessage(">>> %s/\n" % mydest)
15297 - else:
15298 - try:
15299 - #destination doesn't exist
15300 - if self.settings.selinux_enabled():
15301 - _selinux_merge.mkdir(mydest, mysrc)
15302 - else:
15303 - os.mkdir(mydest)
15304 - except OSError as e:
15305 - # Error handling should be equivalent to
15306 - # portage.util.ensure_dirs() for cases
15307 - # like bug #187518.
15308 - if e.errno in (errno.EEXIST,):
15309 - pass
15310 - elif os.path.isdir(mydest):
15311 - pass
15312 - else:
15313 - raise
15314 - del e
15315 - os.chmod(mydest, mystat[0])
15316 - os.chown(mydest, mystat[4], mystat[5])
15317 - showMessage(">>> %s/\n" % mydest)
15318 -
15319 - try:
15320 - self._merged_path(mydest, os.lstat(mydest))
15321 - except OSError:
15322 - pass
15323 -
15324 - outfile.write(
15325 - self._format_contents_line(node_type="dir", abs_path=myrealdest)
15326 - )
15327 - # recurse and merge this directory
15328 - mergelist.extend(join(relative_path, child) for child in
15329 - os.listdir(join(srcroot, relative_path)))
15330 -
15331 - elif stat.S_ISREG(mymode):
15332 - # we are merging a regular file
15333 - if not protected and \
15334 - mydmode is not None and stat.S_ISDIR(mydmode):
15335 - # install of destination is blocked by an existing directory with the same name
15336 - newdest = self._new_backup_path(mydest)
15337 - msg = []
15338 - msg.append("")
15339 - msg.append(_("Installation of a regular file is blocked by a directory:"))
15340 - msg.append(" '%s'" % mydest)
15341 - msg.append(_("This file will be merged with a different name:"))
15342 - msg.append(" '%s'" % newdest)
15343 - msg.append("")
15344 - self._eerror("preinst", msg)
15345 - mydest = newdest
15346 -
15347 - # whether config protection or not, we merge the new file the
15348 - # same way. Unless moveme=0 (blocking directory)
15349 - if moveme:
15350 - # Create hardlinks only for source files that already exist
15351 - # as hardlinks (having identical st_dev and st_ino).
15352 - hardlink_key = (mystat.st_dev, mystat.st_ino)
15353 -
15354 - hardlink_candidates = self._hardlink_merge_map.get(hardlink_key)
15355 - if hardlink_candidates is None:
15356 - hardlink_candidates = []
15357 - self._hardlink_merge_map[hardlink_key] = hardlink_candidates
15358 -
15359 - mymtime = movefile(mysrc, mydest, newmtime=thismtime,
15360 - sstat=mystat, mysettings=self.settings,
15361 - hardlink_candidates=hardlink_candidates,
15362 - encoding=_encodings['merge'])
15363 - if mymtime is None:
15364 - return 1
15365 - hardlink_candidates.append(mydest)
15366 - zing = ">>>"
15367 -
15368 - try:
15369 - self._merged_path(mydest, os.lstat(mydest))
15370 - except OSError:
15371 - pass
15372 -
15373 - if mymtime != None:
15374 - outfile.write(
15375 - self._format_contents_line(
15376 - node_type="obj",
15377 - abs_path=myrealdest,
15378 - md5_digest=mymd5,
15379 - mtime_ns=mymtime,
15380 - )
15381 - )
15382 - showMessage("%s %s\n" % (zing,mydest))
15383 - else:
15384 - # we are merging a fifo or device node
15385 - zing = "!!!"
15386 - if mydmode is None:
15387 - # destination doesn't exist
15388 - if movefile(mysrc, mydest, newmtime=thismtime,
15389 - sstat=mystat, mysettings=self.settings,
15390 - encoding=_encodings['merge']) is not None:
15391 - zing = ">>>"
15392 -
15393 - try:
15394 - self._merged_path(mydest, os.lstat(mydest))
15395 - except OSError:
15396 - pass
15397 -
15398 - else:
15399 - return 1
15400 - if stat.S_ISFIFO(mymode):
15401 - outfile.write(
15402 - self._format_contents_line(node_type="fif", abs_path=myrealdest)
15403 - )
15404 - else:
15405 - outfile.write(
15406 - self._format_contents_line(node_type="dev", abs_path=myrealdest)
15407 - )
15408 - showMessage(zing + " " + mydest + "\n")
15409 -
15410 - def _protect(self, cfgfiledict, protect_if_modified, src_md5,
15411 - src_link, dest, dest_real, dest_mode, dest_md5, dest_link):
15412 -
15413 - move_me = True
15414 - protected = True
15415 - force = False
15416 - k = False
15417 - if self._installed_instance is not None:
15418 - k = self._installed_instance._match_contents(dest_real)
15419 - if k is not False:
15420 - if dest_mode is None:
15421 - # If the file doesn't exist, then it may
15422 - # have been deleted or renamed by the
15423 - # admin. Therefore, force the file to be
15424 - # merged with a ._cfg name, so that the
15425 - # admin will be prompted for this update
15426 - # (see bug #523684).
15427 - force = True
15428 -
15429 - elif protect_if_modified:
15430 - data = self._installed_instance.getcontents()[k]
15431 - if data[0] == "obj" and data[2] == dest_md5:
15432 - protected = False
15433 - elif data[0] == "sym" and data[2] == dest_link:
15434 - protected = False
15435 -
15436 - if protected and dest_mode is not None:
15437 - # we have a protection path; enable config file management.
15438 - if src_md5 == dest_md5:
15439 - protected = False
15440 -
15441 - elif src_md5 == cfgfiledict.get(dest_real, [None])[0]:
15442 - # An identical update has previously been
15443 - # merged. Skip it unless the user has chosen
15444 - # --noconfmem.
15445 - move_me = protected = bool(cfgfiledict["IGNORE"])
15446 -
15447 - if protected and \
15448 - (dest_link is not None or src_link is not None) and \
15449 - dest_link != src_link:
15450 - # If either one is a symlink, and they are not
15451 - # identical symlinks, then force config protection.
15452 - force = True
15453 -
15454 - if move_me:
15455 - # Merging a new file, so update confmem.
15456 - cfgfiledict[dest_real] = [src_md5]
15457 - elif dest_md5 == cfgfiledict.get(dest_real, [None])[0]:
15458 - # A previously remembered update has been
15459 - # accepted, so it is removed from confmem.
15460 - del cfgfiledict[dest_real]
15461 -
15462 - if protected and move_me:
15463 - dest = new_protect_filename(dest,
15464 - newmd5=(dest_link or src_md5),
15465 - force=force)
15466 -
15467 - return dest, protected, move_me
15468 -
15469 - def _format_contents_line(
15470 - self, node_type, abs_path, md5_digest=None, symlink_target=None, mtime_ns=None
15471 - ):
15472 - fields = [node_type, abs_path]
15473 - if md5_digest is not None:
15474 - fields.append(md5_digest)
15475 - elif symlink_target is not None:
15476 - fields.append("-> {}".format(symlink_target))
15477 - if mtime_ns is not None:
15478 - fields.append(str(mtime_ns // 1000000000))
15479 - return "{}\n".format(" ".join(fields))
15480 -
15481 - def _merged_path(self, path, lstatobj, exists=True):
15482 - previous_path = self._device_path_map.get(lstatobj.st_dev)
15483 - if previous_path is None or previous_path is False or \
15484 - (exists and len(path) < len(previous_path)):
15485 - if exists:
15486 - self._device_path_map[lstatobj.st_dev] = path
15487 - else:
15488 - # This entry is used to indicate that we've unmerged
15489 - # a file from this device, and later, this entry is
15490 - # replaced by a parent directory.
15491 - self._device_path_map[lstatobj.st_dev] = False
15492 -
15493 - def _post_merge_sync(self):
15494 - """
15495 - Call this after merge or unmerge, in order to sync relevant files to
15496 - disk and avoid data-loss in the event of a power failure. This method
15497 - does nothing if FEATURES=merge-sync is disabled.
15498 - """
15499 - if not self._device_path_map or \
15500 - "merge-sync" not in self.settings.features:
15501 - return
15502 -
15503 - returncode = None
15504 - if platform.system() == "Linux":
15505 -
15506 - paths = []
15507 - for path in self._device_path_map.values():
15508 - if path is not False:
15509 - paths.append(path)
15510 - paths = tuple(paths)
15511 -
15512 - proc = SyncfsProcess(paths=paths,
15513 - scheduler=(self._scheduler or asyncio._safe_loop()))
15514 - proc.start()
15515 - returncode = proc.wait()
15516 -
15517 - if returncode is None or returncode != os.EX_OK:
15518 - try:
15519 - proc = subprocess.Popen(["sync"])
15520 - except EnvironmentError:
15521 - pass
15522 - else:
15523 - proc.wait()
15524 -
15525 - @_slot_locked
15526 - def merge(self, mergeroot, inforoot, myroot=None, myebuild=None, cleanup=0,
15527 - mydbapi=None, prev_mtimes=None, counter=None):
15528 - """
15529 - @param myroot: ignored, self._eroot is used instead
15530 - """
15531 - myroot = None
15532 - retval = -1
15533 - parallel_install = "parallel-install" in self.settings.features
15534 - if not parallel_install:
15535 - self.lockdb()
15536 - self.vartree.dbapi._bump_mtime(self.mycpv)
15537 - if self._scheduler is None:
15538 - self._scheduler = SchedulerInterface(asyncio._safe_loop())
15539 - try:
15540 - retval = self.treewalk(mergeroot, myroot, inforoot, myebuild,
15541 - cleanup=cleanup, mydbapi=mydbapi, prev_mtimes=prev_mtimes,
15542 - counter=counter)
15543 -
15544 - # If PORTAGE_BUILDDIR doesn't exist, then it probably means
15545 - # fail-clean is enabled, and the success/die hooks have
15546 - # already been called by EbuildPhase.
15547 - if os.path.isdir(self.settings['PORTAGE_BUILDDIR']):
15548 -
15549 - if retval == os.EX_OK:
15550 - phase = 'success_hooks'
15551 - else:
15552 - phase = 'die_hooks'
15553 -
15554 - ebuild_phase = MiscFunctionsProcess(
15555 - background=False, commands=[phase],
15556 - scheduler=self._scheduler, settings=self.settings)
15557 - ebuild_phase.start()
15558 - ebuild_phase.wait()
15559 - self._elog_process()
15560 -
15561 - if 'noclean' not in self.settings.features and \
15562 - (retval == os.EX_OK or \
15563 - 'fail-clean' in self.settings.features):
15564 - if myebuild is None:
15565 - myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
15566 -
15567 - doebuild_environment(myebuild, "clean",
15568 - settings=self.settings, db=mydbapi)
15569 - phase = EbuildPhase(background=False, phase="clean",
15570 - scheduler=self._scheduler, settings=self.settings)
15571 - phase.start()
15572 - phase.wait()
15573 - finally:
15574 - self.settings.pop('REPLACING_VERSIONS', None)
15575 - if self.vartree.dbapi._linkmap is None:
15576 - # preserve-libs is entirely disabled
15577 - pass
15578 - else:
15579 - self.vartree.dbapi._linkmap._clear_cache()
15580 - self.vartree.dbapi._bump_mtime(self.mycpv)
15581 - if not parallel_install:
15582 - self.unlockdb()
15583 -
15584 - if retval == os.EX_OK and self._postinst_failure:
15585 - retval = portage.const.RETURNCODE_POSTINST_FAILURE
15586 -
15587 - return retval
15588 -
15589 - def getstring(self,name):
15590 - "returns contents of a file with whitespace converted to spaces"
15591 - if not os.path.exists(self.dbdir+"/"+name):
15592 - return ""
15593 - with io.open(
15594 - _unicode_encode(os.path.join(self.dbdir, name),
15595 - encoding=_encodings['fs'], errors='strict'),
15596 - mode='r', encoding=_encodings['repo.content'], errors='replace'
15597 - ) as f:
15598 - mydata = f.read().split()
15599 - return " ".join(mydata)
15600 -
15601 - def copyfile(self,fname):
15602 - shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
15603 -
15604 - def getfile(self,fname):
15605 - if not os.path.exists(self.dbdir+"/"+fname):
15606 - return ""
15607 - with io.open(_unicode_encode(os.path.join(self.dbdir, fname),
15608 - encoding=_encodings['fs'], errors='strict'),
15609 - mode='r', encoding=_encodings['repo.content'], errors='replace'
15610 - ) as f:
15611 - return f.read()
15612 -
15613 - def setfile(self,fname,data):
15614 - kwargs = {}
15615 - if fname == 'environment.bz2' or not isinstance(data, str):
15616 - kwargs['mode'] = 'wb'
15617 - else:
15618 - kwargs['mode'] = 'w'
15619 - kwargs['encoding'] = _encodings['repo.content']
15620 - write_atomic(os.path.join(self.dbdir, fname), data, **kwargs)
15621 -
15622 - def getelements(self,ename):
15623 - if not os.path.exists(self.dbdir+"/"+ename):
15624 - return []
15625 - with io.open(_unicode_encode(
15626 - os.path.join(self.dbdir, ename),
15627 - encoding=_encodings['fs'], errors='strict'),
15628 - mode='r', encoding=_encodings['repo.content'], errors='replace'
15629 - ) as f:
15630 - mylines = f.readlines()
15631 - myreturn = []
15632 - for x in mylines:
15633 - for y in x[:-1].split():
15634 - myreturn.append(y)
15635 - return myreturn
15636 -
15637 - def setelements(self,mylist,ename):
15638 - with io.open(_unicode_encode(
15639 - os.path.join(self.dbdir, ename),
15640 - encoding=_encodings['fs'], errors='strict'),
15641 - mode='w', encoding=_encodings['repo.content'],
15642 - errors='backslashreplace') as f:
15643 - for x in mylist:
15644 - f.write("%s\n" % x)
15645 -
15646 - def isregular(self):
15647 - "Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
15648 - return os.path.exists(os.path.join(self.dbdir, "CATEGORY"))
15649 -
15650 - def _pre_merge_backup(self, backup_dblink, downgrade):
15651 -
15652 - if ("unmerge-backup" in self.settings.features or
15653 - (downgrade and "downgrade-backup" in self.settings.features)):
15654 - return self._quickpkg_dblink(backup_dblink, False, None)
15655 -
15656 - return os.EX_OK
15657 -
15658 - def _pre_unmerge_backup(self, background):
15659 -
15660 - if "unmerge-backup" in self.settings.features :
15661 - logfile = None
15662 - if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
15663 - logfile = self.settings.get("PORTAGE_LOG_FILE")
15664 - return self._quickpkg_dblink(self, background, logfile)
15665 -
15666 - return os.EX_OK
15667 -
15668 - def _quickpkg_dblink(self, backup_dblink, background, logfile):
15669 -
15670 - build_time = backup_dblink.getfile('BUILD_TIME')
15671 - try:
15672 - build_time = int(build_time.strip())
15673 - except ValueError:
15674 - build_time = 0
15675 -
15676 - trees = QueryCommand.get_db()[self.settings["EROOT"]]
15677 - bintree = trees["bintree"]
15678 -
15679 - for binpkg in reversed(
15680 - bintree.dbapi.match('={}'.format(backup_dblink.mycpv))):
15681 - if binpkg.build_time == build_time:
15682 - return os.EX_OK
15683 -
15684 - self.lockdb()
15685 - try:
15686 -
15687 - if not backup_dblink.exists():
15688 - # It got unmerged by a concurrent process.
15689 - return os.EX_OK
15690 -
15691 - # Call quickpkg for support of QUICKPKG_DEFAULT_OPTS and stuff.
15692 - quickpkg_binary = os.path.join(self.settings["PORTAGE_BIN_PATH"],
15693 - "quickpkg")
15694 -
15695 - if not os.access(quickpkg_binary, os.X_OK):
15696 - # If not running from the source tree, use PATH.
15697 - quickpkg_binary = find_binary("quickpkg")
15698 - if quickpkg_binary is None:
15699 - self._display_merge(
15700 - _("%s: command not found") % "quickpkg",
15701 - level=logging.ERROR, noiselevel=-1)
15702 - return 127
15703 -
15704 - # Let quickpkg inherit the global vartree config's env.
15705 - env = dict(self.vartree.settings.items())
15706 - env["__PORTAGE_INHERIT_VARDB_LOCK"] = "1"
15707 -
15708 - pythonpath = [x for x in env.get('PYTHONPATH', '').split(":") if x]
15709 - if not pythonpath or \
15710 - not os.path.samefile(pythonpath[0], portage._pym_path):
15711 - pythonpath.insert(0, portage._pym_path)
15712 - env['PYTHONPATH'] = ":".join(pythonpath)
15713 -
15714 - quickpkg_proc = SpawnProcess(
15715 - args=[portage._python_interpreter, quickpkg_binary,
15716 - "=%s" % (backup_dblink.mycpv,)],
15717 - background=background, env=env,
15718 - scheduler=self._scheduler, logfile=logfile)
15719 - quickpkg_proc.start()
15720 -
15721 - return quickpkg_proc.wait()
15722 -
15723 - finally:
15724 - self.unlockdb()
15725 -
15726 - def merge(mycat, mypkg, pkgloc, infloc,
15727 - myroot=None, settings=None, myebuild=None,
15728 - mytree=None, mydbapi=None, vartree=None, prev_mtimes=None, blockers=None,
15729 - scheduler=None, fd_pipes=None):
15730 - """
15731 - @param myroot: ignored, settings['EROOT'] is used instead
15732 - """
15733 - myroot = None
15734 - if settings is None:
15735 - raise TypeError("settings argument is required")
15736 - if not os.access(settings['EROOT'], os.W_OK):
15737 - writemsg(_("Permission denied: access('%s', W_OK)\n") % settings['EROOT'],
15738 - noiselevel=-1)
15739 - return errno.EACCES
15740 - background = (settings.get('PORTAGE_BACKGROUND') == '1')
15741 - merge_task = MergeProcess(
15742 - mycat=mycat, mypkg=mypkg, settings=settings,
15743 - treetype=mytree, vartree=vartree,
15744 - scheduler=(scheduler or asyncio._safe_loop()),
15745 - background=background, blockers=blockers, pkgloc=pkgloc,
15746 - infloc=infloc, myebuild=myebuild, mydbapi=mydbapi,
15747 - prev_mtimes=prev_mtimes, logfile=settings.get('PORTAGE_LOG_FILE'),
15748 - fd_pipes=fd_pipes)
15749 - merge_task.start()
15750 - retcode = merge_task.wait()
15751 - return retcode
15752 -
15753 - def unmerge(cat, pkg, myroot=None, settings=None,
15754 - mytrimworld=None, vartree=None,
15755 - ldpath_mtimes=None, scheduler=None):
15756 - """
15757 - @param myroot: ignored, settings['EROOT'] is used instead
15758 - @param mytrimworld: ignored
15759 - """
15760 - myroot = None
15761 - if settings is None:
15762 - raise TypeError("settings argument is required")
15763 - mylink = dblink(cat, pkg, settings=settings, treetype="vartree",
15764 - vartree=vartree, scheduler=scheduler)
15765 - vartree = mylink.vartree
15766 - parallel_install = "parallel-install" in settings.features
15767 - if not parallel_install:
15768 - mylink.lockdb()
15769 - try:
15770 - if mylink.exists():
15771 - retval = mylink.unmerge(ldpath_mtimes=ldpath_mtimes)
15772 - if retval == os.EX_OK:
15773 - mylink.lockdb()
15774 - try:
15775 - mylink.delete()
15776 - finally:
15777 - mylink.unlockdb()
15778 - return retval
15779 - return os.EX_OK
15780 - finally:
15781 - if vartree.dbapi._linkmap is None:
15782 - # preserve-libs is entirely disabled
15783 - pass
15784 - else:
15785 - vartree.dbapi._linkmap._clear_cache()
15786 - if not parallel_install:
15787 - mylink.unlockdb()
15788 + """
15789 + This class provides an interface to the installed package database
15790 + At present this is implemented as a text backend in /var/db/pkg.
15791 + """
15792 +
15793 + _normalize_needed = re.compile(r"//|^[^/]|./$|(^|/)\.\.?(/|$)")
15794 +
15795 + _contents_re = re.compile(
15796 + r"^("
15797 + + r"(?P<dir>(dev|dir|fif) (.+))|"
15798 + + r"(?P<obj>(obj) (.+) (\S+) (\d+))|"
15799 + + r"(?P<sym>(sym) (.+) -> (.+) ((\d+)|(?P<oldsym>("
15800 + + r"\(\d+, \d+L, \d+L, \d+, \d+, \d+, \d+L, \d+, (\d+), \d+\)))))"
15801 + + r")$"
15802 + )
15803 +
15804 + # These files are generated by emerge, so we need to remove
15805 + # them when they are the only thing left in a directory.
15806 + _infodir_cleanup = frozenset(["dir", "dir.old"])
15807 +
15808 + _ignored_unlink_errnos = (errno.EBUSY, errno.ENOENT, errno.ENOTDIR, errno.EISDIR)
15809 +
15810 + _ignored_rmdir_errnos = (
15811 + errno.EEXIST,
15812 + errno.ENOTEMPTY,
15813 + errno.EBUSY,
15814 + errno.ENOENT,
15815 + errno.ENOTDIR,
15816 + errno.EISDIR,
15817 + errno.EPERM,
15818 + )
15819 +
15820 + def __init__(
15821 + self,
15822 + cat,
15823 + pkg,
15824 + myroot=None,
15825 + settings=None,
15826 + treetype=None,
15827 + vartree=None,
15828 + blockers=None,
15829 + scheduler=None,
15830 + pipe=None,
15831 + ):
15832 + """
15833 + Creates a DBlink object for a given CPV.
15834 + The given CPV may not be present in the database already.
15835 +
15836 + @param cat: Category
15837 + @type cat: String
15838 + @param pkg: Package (PV)
15839 + @type pkg: String
15840 + @param myroot: ignored, settings['ROOT'] is used instead
15841 + @type myroot: String (Path)
15842 + @param settings: Typically portage.settings
15843 + @type settings: portage.config
15844 + @param treetype: one of ['porttree','bintree','vartree']
15845 + @type treetype: String
15846 + @param vartree: an instance of vartree corresponding to myroot.
15847 + @type vartree: vartree
15848 + """
15849 +
15850 + if settings is None:
15851 + raise TypeError("settings argument is required")
15852 +
15853 + mysettings = settings
15854 + self._eroot = mysettings["EROOT"]
15855 + self.cat = cat
15856 + self.pkg = pkg
15857 + self.mycpv = self.cat + "/" + self.pkg
15858 + if self.mycpv == settings.mycpv and isinstance(settings.mycpv, _pkg_str):
15859 + self.mycpv = settings.mycpv
15860 + else:
15861 + self.mycpv = _pkg_str(self.mycpv)
15862 + self.mysplit = list(self.mycpv.cpv_split[1:])
15863 + self.mysplit[0] = self.mycpv.cp
15864 + self.treetype = treetype
15865 + if vartree is None:
15866 + vartree = portage.db[self._eroot]["vartree"]
15867 + self.vartree = vartree
15868 + self._blockers = blockers
15869 + self._scheduler = scheduler
15870 + self.dbroot = normalize_path(os.path.join(self._eroot, VDB_PATH))
15871 + self.dbcatdir = self.dbroot + "/" + cat
15872 + self.dbpkgdir = self.dbcatdir + "/" + pkg
15873 + self.dbtmpdir = self.dbcatdir + "/" + MERGING_IDENTIFIER + pkg
15874 + self.dbdir = self.dbpkgdir
15875 + self.settings = mysettings
15876 + self._verbose = self.settings.get("PORTAGE_VERBOSE") == "1"
15877 +
15878 + self.myroot = self.settings["ROOT"]
15879 + self._installed_instance = None
15880 + self.contentscache = None
15881 + self._contents_inodes = None
15882 + self._contents_basenames = None
15883 + self._linkmap_broken = False
15884 + self._device_path_map = {}
15885 + self._hardlink_merge_map = {}
15886 + self._hash_key = (self._eroot, self.mycpv)
15887 + self._protect_obj = None
15888 + self._pipe = pipe
15889 + self._postinst_failure = False
15890 +
15891 + # When necessary, this attribute is modified for
15892 + # compliance with RESTRICT=preserve-libs.
15893 + self._preserve_libs = "preserve-libs" in mysettings.features
15894 + self._contents = ContentsCaseSensitivityManager(self)
15895 + self._slot_locks = []
15896 +
15897 + def __hash__(self):
15898 + return hash(self._hash_key)
15899 +
15900 + def __eq__(self, other):
15901 + return isinstance(other, dblink) and self._hash_key == other._hash_key
15902 +
15903 + def _get_protect_obj(self):
15904 +
15905 + if self._protect_obj is None:
15906 + self._protect_obj = ConfigProtect(
15907 + self._eroot,
15908 + portage.util.shlex_split(self.settings.get("CONFIG_PROTECT", "")),
15909 + portage.util.shlex_split(self.settings.get("CONFIG_PROTECT_MASK", "")),
15910 + case_insensitive=("case-insensitive-fs" in self.settings.features),
15911 + )
15912 +
15913 + return self._protect_obj
15914 +
15915 + def isprotected(self, obj):
15916 + return self._get_protect_obj().isprotected(obj)
15917 +
15918 + def updateprotect(self):
15919 + self._get_protect_obj().updateprotect()
15920 +
15921 + def lockdb(self):
15922 + self.vartree.dbapi.lock()
15923 +
15924 + def unlockdb(self):
15925 + self.vartree.dbapi.unlock()
15926 +
15927 + def _slot_locked(f):
15928 + """
15929 + A decorator function which, when parallel-install is enabled,
15930 + acquires and releases slot locks for the current package and
15931 + blocked packages. This is required in order to account for
15932 + interactions with blocked packages (involving resolution of
15933 + file collisions).
15934 + """
15935 +
15936 + def wrapper(self, *args, **kwargs):
15937 + if "parallel-install" in self.settings.features:
15938 + self._acquire_slot_locks(kwargs.get("mydbapi", self.vartree.dbapi))
15939 + try:
15940 + return f(self, *args, **kwargs)
15941 + finally:
15942 + self._release_slot_locks()
15943 +
15944 + return wrapper
15945 +
15946 + def _acquire_slot_locks(self, db):
15947 + """
15948 + Acquire slot locks for the current package and blocked packages.
15949 + """
15950 +
15951 + slot_atoms = []
15952 +
15953 + try:
15954 + slot = self.mycpv.slot
15955 + except AttributeError:
15956 + (slot,) = db.aux_get(self.mycpv, ["SLOT"])
15957 + slot = slot.partition("/")[0]
15958 +
15959 + slot_atoms.append(portage.dep.Atom("%s:%s" % (self.mycpv.cp, slot)))
15960 +
15961 + for blocker in self._blockers or []:
15962 + slot_atoms.append(blocker.slot_atom)
15963 +
15964 + # Sort atoms so that locks are acquired in a predictable
15965 + # order, preventing deadlocks with competitors that may
15966 + # be trying to acquire overlapping locks.
15967 + slot_atoms.sort()
15968 + for slot_atom in slot_atoms:
15969 + self.vartree.dbapi._slot_lock(slot_atom)
15970 + self._slot_locks.append(slot_atom)
15971 +
15972 + def _release_slot_locks(self):
15973 + """
15974 + Release all slot locks.
15975 + """
15976 + while self._slot_locks:
15977 + self.vartree.dbapi._slot_unlock(self._slot_locks.pop())
15978 +
15979 + def getpath(self):
15980 + "return path to location of db information (for >>> informational display)"
15981 + return self.dbdir
15982 +
15983 + def exists(self):
15984 + "does the db entry exist? boolean."
15985 + return os.path.exists(self.dbdir)
15986 +
15987 + def delete(self):
15988 + """
15989 + Remove this entry from the database
15990 + """
15991 + try:
15992 + os.lstat(self.dbdir)
15993 + except OSError as e:
15994 + if e.errno not in (errno.ENOENT, errno.ENOTDIR, errno.ESTALE):
15995 + raise
15996 + return
15997 +
15998 + # Check validity of self.dbdir before attempting to remove it.
15999 + if not self.dbdir.startswith(self.dbroot):
16000 + writemsg(
16001 + _("portage.dblink.delete(): invalid dbdir: %s\n") % self.dbdir,
16002 + noiselevel=-1,
16003 + )
16004 + return
16005 +
16006 + if self.dbdir is self.dbpkgdir:
16007 + (counter,) = self.vartree.dbapi.aux_get(self.mycpv, ["COUNTER"])
16008 + self.vartree.dbapi._cache_delta.recordEvent(
16009 + "remove", self.mycpv, self.settings["SLOT"].split("/")[0], counter
16010 + )
16011 +
16012 + shutil.rmtree(self.dbdir)
16013 + # If empty, remove parent category directory.
16014 + try:
16015 + os.rmdir(os.path.dirname(self.dbdir))
16016 + except OSError:
16017 + pass
16018 + self.vartree.dbapi._remove(self)
16019 +
16020 + # Use self.dbroot since we need an existing path for syncfs.
16021 + try:
16022 + self._merged_path(self.dbroot, os.lstat(self.dbroot))
16023 + except OSError:
16024 + pass
16025 +
16026 + self._post_merge_sync()
16027 +
16028 + def clearcontents(self):
16029 + """
16030 + For a given db entry (self), erase the CONTENTS values.
16031 + """
16032 + self.lockdb()
16033 + try:
16034 + if os.path.exists(self.dbdir + "/CONTENTS"):
16035 + os.unlink(self.dbdir + "/CONTENTS")
16036 + finally:
16037 + self.unlockdb()
16038 +
16039 + def _clear_contents_cache(self):
16040 + self.contentscache = None
16041 + self._contents_inodes = None
16042 + self._contents_basenames = None
16043 + self._contents.clear_cache()
16044 +
16045 + def getcontents(self):
16046 + """
16047 + Get the installed files of a given package (aka what that package installed)
16048 + """
16049 + if self.contentscache is not None:
16050 + return self.contentscache
16051 + contents_file = os.path.join(self.dbdir, "CONTENTS")
16052 + pkgfiles = {}
16053 + try:
16054 + with io.open(
16055 + _unicode_encode(
16056 + contents_file, encoding=_encodings["fs"], errors="strict"
16057 + ),
16058 + mode="r",
16059 + encoding=_encodings["repo.content"],
16060 + errors="replace",
16061 + ) as f:
16062 + mylines = f.readlines()
16063 + except EnvironmentError as e:
16064 + if e.errno != errno.ENOENT:
16065 + raise
16066 + del e
16067 + self.contentscache = pkgfiles
16068 + return pkgfiles
16069 +
16070 + null_byte = "\0"
16071 + normalize_needed = self._normalize_needed
16072 + contents_re = self._contents_re
16073 + obj_index = contents_re.groupindex["obj"]
16074 + dir_index = contents_re.groupindex["dir"]
16075 + sym_index = contents_re.groupindex["sym"]
16076 + # The old symlink format may exist on systems that have packages
16077 + # which were installed many years ago (see bug #351814).
16078 + oldsym_index = contents_re.groupindex["oldsym"]
16079 + # CONTENTS files already contain EPREFIX
16080 + myroot = self.settings["ROOT"]
16081 + if myroot == os.path.sep:
16082 + myroot = None
16083 + # used to generate parent dir entries
16084 + dir_entry = ("dir",)
16085 + eroot_split_len = len(self.settings["EROOT"].split(os.sep)) - 1
16086 + pos = 0
16087 + errors = []
16088 + for pos, line in enumerate(mylines):
16089 + if null_byte in line:
16090 + # Null bytes are a common indication of corruption.
16091 + errors.append((pos + 1, _("Null byte found in CONTENTS entry")))
16092 + continue
16093 + line = line.rstrip("\n")
16094 + m = contents_re.match(line)
16095 + if m is None:
16096 + errors.append((pos + 1, _("Unrecognized CONTENTS entry")))
16097 + continue
16098 +
16099 + if m.group(obj_index) is not None:
16100 + base = obj_index
16101 + # format: type, mtime, md5sum
16102 + data = (m.group(base + 1), m.group(base + 4), m.group(base + 3))
16103 + elif m.group(dir_index) is not None:
16104 + base = dir_index
16105 + # format: type
16106 + data = (m.group(base + 1),)
16107 + elif m.group(sym_index) is not None:
16108 + base = sym_index
16109 + if m.group(oldsym_index) is None:
16110 + mtime = m.group(base + 5)
16111 + else:
16112 + mtime = m.group(base + 8)
16113 + # format: type, mtime, dest
16114 + data = (m.group(base + 1), mtime, m.group(base + 3))
16115 + else:
16116 + # This won't happen as long the regular expression
16117 + # is written to only match valid entries.
16118 + raise AssertionError(
16119 + _("required group not found " + "in CONTENTS entry: '%s'") % line
16120 + )
16121 +
16122 + path = m.group(base + 2)
16123 + if normalize_needed.search(path) is not None:
16124 + path = normalize_path(path)
16125 + if not path.startswith(os.path.sep):
16126 + path = os.path.sep + path
16127 +
16128 + if myroot is not None:
16129 + path = os.path.join(myroot, path.lstrip(os.path.sep))
16130 +
16131 + # Implicitly add parent directories, since we can't necessarily
16132 + # assume that they are explicitly listed in CONTENTS, and it's
16133 + # useful for callers if they can rely on parent directory entries
16134 + # being generated here (crucial for things like dblink.isowner()).
16135 + path_split = path.split(os.sep)
16136 + path_split.pop()
16137 + while len(path_split) > eroot_split_len:
16138 + parent = os.sep.join(path_split)
16139 + if parent in pkgfiles:
16140 + break
16141 + pkgfiles[parent] = dir_entry
16142 + path_split.pop()
16143 +
16144 + pkgfiles[path] = data
16145 +
16146 + if errors:
16147 + writemsg(_("!!! Parse error in '%s'\n") % contents_file, noiselevel=-1)
16148 + for pos, e in errors:
16149 + writemsg(_("!!! line %d: %s\n") % (pos, e), noiselevel=-1)
16150 + self.contentscache = pkgfiles
16151 + return pkgfiles
16152 +
16153 + def quickpkg(
16154 + self, output_file, include_config=False, include_unmodified_config=False
16155 + ):
16156 + """
16157 + Create a tar file appropriate for use by quickpkg.
16158 +
16159 + @param output_file: Write binary tar stream to file.
16160 + @type output_file: file
16161 + @param include_config: Include all files protected by CONFIG_PROTECT
16162 + (as a security precaution, default is False).
16163 + @type include_config: bool
16164 + @param include_unmodified_config: Include files protected by CONFIG_PROTECT
16165 + that have not been modified since installation (as a security precaution,
16166 + default is False).
16167 + @type include_unmodified_config: bool
16168 + @rtype: list
16169 + @return: Paths of protected configuration files which have been omitted.
16170 + """
16171 + settings = self.settings
16172 + cpv = self.mycpv
16173 + xattrs = "xattr" in settings.features
16174 + contents = self.getcontents()
16175 + excluded_config_files = []
16176 + protect = None
16177 +
16178 + if not include_config:
16179 + confprot = ConfigProtect(
16180 + settings["EROOT"],
16181 + portage.util.shlex_split(settings.get("CONFIG_PROTECT", "")),
16182 + portage.util.shlex_split(settings.get("CONFIG_PROTECT_MASK", "")),
16183 + case_insensitive=("case-insensitive-fs" in settings.features),
16184 + )
16185 +
16186 + def protect(filename):
16187 + if not confprot.isprotected(filename):
16188 + return False
16189 + if include_unmodified_config:
16190 + file_data = contents[filename]
16191 + if file_data[0] == "obj":
16192 + orig_md5 = file_data[2].lower()
16193 + cur_md5 = perform_md5(filename, calc_prelink=1)
16194 + if orig_md5 == cur_md5:
16195 + return False
16196 + excluded_config_files.append(filename)
16197 + return True
16198 +
16199 + # The tarfile module will write pax headers holding the
16200 + # xattrs only if PAX_FORMAT is specified here.
16201 + with tarfile.open(
16202 + fileobj=output_file,
16203 + mode="w|",
16204 + format=tarfile.PAX_FORMAT if xattrs else tarfile.DEFAULT_FORMAT,
16205 + ) as tar:
16206 + tar_contents(
16207 + contents, settings["ROOT"], tar, protect=protect, xattrs=xattrs
16208 + )
16209 +
16210 + return excluded_config_files
16211 +
16212 + def _prune_plib_registry(self, unmerge=False, needed=None, preserve_paths=None):
16213 + # remove preserved libraries that don't have any consumers left
16214 + if not (
16215 + self._linkmap_broken
16216 + or self.vartree.dbapi._linkmap is None
16217 + or self.vartree.dbapi._plib_registry is None
16218 + ):
16219 + self.vartree.dbapi._fs_lock()
16220 + plib_registry = self.vartree.dbapi._plib_registry
16221 + plib_registry.lock()
16222 + try:
16223 + plib_registry.load()
16224 +
16225 + unmerge_with_replacement = unmerge and preserve_paths is not None
16226 + if unmerge_with_replacement:
16227 + # If self.mycpv is about to be unmerged and we
16228 + # have a replacement package, we want to exclude
16229 + # the irrelevant NEEDED data that belongs to
16230 + # files which are being unmerged now.
16231 + exclude_pkgs = (self.mycpv,)
16232 + else:
16233 + exclude_pkgs = None
16234 +
16235 + self._linkmap_rebuild(
16236 + exclude_pkgs=exclude_pkgs,
16237 + include_file=needed,
16238 + preserve_paths=preserve_paths,
16239 + )
16240 +
16241 + if unmerge:
16242 + unmerge_preserve = None
16243 + if not unmerge_with_replacement:
16244 + unmerge_preserve = self._find_libs_to_preserve(unmerge=True)
16245 + counter = self.vartree.dbapi.cpv_counter(self.mycpv)
16246 + try:
16247 + slot = self.mycpv.slot
16248 + except AttributeError:
16249 + slot = _pkg_str(self.mycpv, slot=self.settings["SLOT"]).slot
16250 + plib_registry.unregister(self.mycpv, slot, counter)
16251 + if unmerge_preserve:
16252 + for path in sorted(unmerge_preserve):
16253 + contents_key = self._match_contents(path)
16254 + if not contents_key:
16255 + continue
16256 + obj_type = self.getcontents()[contents_key][0]
16257 + self._display_merge(
16258 + _(">>> needed %s %s\n") % (obj_type, contents_key),
16259 + noiselevel=-1,
16260 + )
16261 + plib_registry.register(
16262 + self.mycpv, slot, counter, unmerge_preserve
16263 + )
16264 + # Remove the preserved files from our contents
16265 + # so that they won't be unmerged.
16266 + self.vartree.dbapi.removeFromContents(self, unmerge_preserve)
16267 +
16268 + unmerge_no_replacement = unmerge and not unmerge_with_replacement
16269 + cpv_lib_map = self._find_unused_preserved_libs(unmerge_no_replacement)
16270 + if cpv_lib_map:
16271 + self._remove_preserved_libs(cpv_lib_map)
16272 + self.vartree.dbapi.lock()
16273 + try:
16274 + for cpv, removed in cpv_lib_map.items():
16275 + if not self.vartree.dbapi.cpv_exists(cpv):
16276 + continue
16277 + self.vartree.dbapi.removeFromContents(cpv, removed)
16278 + finally:
16279 + self.vartree.dbapi.unlock()
16280 +
16281 + plib_registry.store()
16282 + finally:
16283 + plib_registry.unlock()
16284 + self.vartree.dbapi._fs_unlock()
16285 +
16286 + @_slot_locked
16287 + def unmerge(
16288 + self,
16289 + pkgfiles=None,
16290 + trimworld=None,
16291 + cleanup=True,
16292 + ldpath_mtimes=None,
16293 + others_in_slot=None,
16294 + needed=None,
16295 + preserve_paths=None,
16296 + ):
16297 + """
16298 + Calls prerm
16299 + Unmerges a given package (CPV)
16300 + calls postrm
16301 + calls cleanrm
16302 + calls env_update
16303 +
16304 + @param pkgfiles: files to unmerge (generally self.getcontents() )
16305 + @type pkgfiles: Dictionary
16306 + @param trimworld: Unused
16307 + @type trimworld: Boolean
16308 + @param cleanup: cleanup to pass to doebuild (see doebuild)
16309 + @type cleanup: Boolean
16310 + @param ldpath_mtimes: mtimes to pass to env_update (see env_update)
16311 + @type ldpath_mtimes: Dictionary
16312 + @param others_in_slot: all dblink instances in this slot, excluding self
16313 + @type others_in_slot: list
16314 + @param needed: Filename containing libraries needed after unmerge.
16315 + @type needed: String
16316 + @param preserve_paths: Libraries preserved by a package instance that
16317 + is currently being merged. They need to be explicitly passed to the
16318 + LinkageMap, since they are not registered in the
16319 + PreservedLibsRegistry yet.
16320 + @type preserve_paths: set
16321 + @rtype: Integer
16322 + @return:
16323 + 1. os.EX_OK if everything went well.
16324 + 2. return code of the failed phase (for prerm, postrm, cleanrm)
16325 + """
16326 +
16327 + if trimworld is not None:
16328 + warnings.warn(
16329 + "The trimworld parameter of the "
16330 + + "portage.dbapi.vartree.dblink.unmerge()"
16331 + + " method is now unused.",
16332 + DeprecationWarning,
16333 + stacklevel=2,
16334 + )
16335 +
16336 + background = False
16337 + log_path = self.settings.get("PORTAGE_LOG_FILE")
16338 + if self._scheduler is None:
16339 + # We create a scheduler instance and use it to
16340 + # log unmerge output separately from merge output.
16341 + self._scheduler = SchedulerInterface(asyncio._safe_loop())
16342 + if self.settings.get("PORTAGE_BACKGROUND") == "subprocess":
16343 + if self.settings.get("PORTAGE_BACKGROUND_UNMERGE") == "1":
16344 + self.settings["PORTAGE_BACKGROUND"] = "1"
16345 + self.settings.backup_changes("PORTAGE_BACKGROUND")
16346 + background = True
16347 + elif self.settings.get("PORTAGE_BACKGROUND_UNMERGE") == "0":
16348 + self.settings["PORTAGE_BACKGROUND"] = "0"
16349 + self.settings.backup_changes("PORTAGE_BACKGROUND")
16350 + elif self.settings.get("PORTAGE_BACKGROUND") == "1":
16351 + background = True
16352 +
16353 + self.vartree.dbapi._bump_mtime(self.mycpv)
16354 + showMessage = self._display_merge
16355 + if self.vartree.dbapi._categories is not None:
16356 + self.vartree.dbapi._categories = None
16357 +
16358 + # When others_in_slot is not None, the backup has already been
16359 + # handled by the caller.
16360 + caller_handles_backup = others_in_slot is not None
16361 +
16362 + # When others_in_slot is supplied, the security check has already been
16363 + # done for this slot, so it shouldn't be repeated until the next
16364 + # replacement or unmerge operation.
16365 + if others_in_slot is None:
16366 + slot = self.vartree.dbapi._pkg_str(self.mycpv, None).slot
16367 + slot_matches = self.vartree.dbapi.match(
16368 + "%s:%s" % (portage.cpv_getkey(self.mycpv), slot)
16369 + )
16370 + others_in_slot = []
16371 + for cur_cpv in slot_matches:
16372 + if cur_cpv == self.mycpv:
16373 + continue
16374 + others_in_slot.append(
16375 + dblink(
16376 + self.cat,
16377 + catsplit(cur_cpv)[1],
16378 + settings=self.settings,
16379 + vartree=self.vartree,
16380 + treetype="vartree",
16381 + pipe=self._pipe,
16382 + )
16383 + )
16384 +
16385 + retval = self._security_check([self] + others_in_slot)
16386 + if retval:
16387 + return retval
16388 +
16389 + contents = self.getcontents()
16390 + # Now, don't assume that the name of the ebuild is the same as the
16391 + # name of the dir; the package may have been moved.
16392 + myebuildpath = os.path.join(self.dbdir, self.pkg + ".ebuild")
16393 + failures = 0
16394 + ebuild_phase = "prerm"
16395 + mystuff = os.listdir(self.dbdir)
16396 + for x in mystuff:
16397 + if x.endswith(".ebuild"):
16398 + if x[:-7] != self.pkg:
16399 + # Clean up after vardbapi.move_ent() breakage in
16400 + # portage versions before 2.1.2
16401 + os.rename(os.path.join(self.dbdir, x), myebuildpath)
16402 + write_atomic(os.path.join(self.dbdir, "PF"), self.pkg + "\n")
16403 + break
16404 +
16405 + if (
16406 + self.mycpv != self.settings.mycpv
16407 + or "EAPI" not in self.settings.configdict["pkg"]
16408 + ):
16409 + # We avoid a redundant setcpv call here when
16410 + # the caller has already taken care of it.
16411 + self.settings.setcpv(self.mycpv, mydb=self.vartree.dbapi)
16412 +
16413 + eapi_unsupported = False
16414 + try:
16415 + doebuild_environment(
16416 + myebuildpath, "prerm", settings=self.settings, db=self.vartree.dbapi
16417 + )
16418 + except UnsupportedAPIException as e:
16419 + eapi_unsupported = e
16420 +
16421 + if (
16422 + self._preserve_libs
16423 + and "preserve-libs" in self.settings["PORTAGE_RESTRICT"].split()
16424 + ):
16425 + self._preserve_libs = False
16426 +
16427 + builddir_lock = None
16428 + scheduler = self._scheduler
16429 + retval = os.EX_OK
16430 + try:
16431 + # Only create builddir_lock if the caller
16432 + # has not already acquired the lock.
16433 + if "PORTAGE_BUILDDIR_LOCKED" not in self.settings:
16434 + builddir_lock = EbuildBuildDir(
16435 + scheduler=scheduler, settings=self.settings
16436 + )
16437 + scheduler.run_until_complete(builddir_lock.async_lock())
16438 + prepare_build_dirs(settings=self.settings, cleanup=True)
16439 + log_path = self.settings.get("PORTAGE_LOG_FILE")
16440 +
16441 + # Do this before the following _prune_plib_registry call, since
16442 + # that removes preserved libraries from our CONTENTS, and we
16443 + # may want to backup those libraries first.
16444 + if not caller_handles_backup:
16445 + retval = self._pre_unmerge_backup(background)
16446 + if retval != os.EX_OK:
16447 + showMessage(
16448 + _("!!! FAILED prerm: quickpkg: %s\n") % retval,
16449 + level=logging.ERROR,
16450 + noiselevel=-1,
16451 + )
16452 + return retval
16453 +
16454 + self._prune_plib_registry(
16455 + unmerge=True, needed=needed, preserve_paths=preserve_paths
16456 + )
16457 +
16458 + # Log the error after PORTAGE_LOG_FILE is initialized
16459 + # by prepare_build_dirs above.
16460 + if eapi_unsupported:
16461 + # Sometimes this happens due to corruption of the EAPI file.
16462 + failures += 1
16463 + showMessage(
16464 + _("!!! FAILED prerm: %s\n") % os.path.join(self.dbdir, "EAPI"),
16465 + level=logging.ERROR,
16466 + noiselevel=-1,
16467 + )
16468 + showMessage(
16469 + "%s\n" % (eapi_unsupported,), level=logging.ERROR, noiselevel=-1
16470 + )
16471 + elif os.path.isfile(myebuildpath):
16472 + phase = EbuildPhase(
16473 + background=background,
16474 + phase=ebuild_phase,
16475 + scheduler=scheduler,
16476 + settings=self.settings,
16477 + )
16478 + phase.start()
16479 + retval = phase.wait()
16480 +
16481 + # XXX: Decide how to handle failures here.
16482 + if retval != os.EX_OK:
16483 + failures += 1
16484 + showMessage(
16485 + _("!!! FAILED prerm: %s\n") % retval,
16486 + level=logging.ERROR,
16487 + noiselevel=-1,
16488 + )
16489 +
16490 + self.vartree.dbapi._fs_lock()
16491 + try:
16492 + self._unmerge_pkgfiles(pkgfiles, others_in_slot)
16493 + finally:
16494 + self.vartree.dbapi._fs_unlock()
16495 + self._clear_contents_cache()
16496 +
16497 + if not eapi_unsupported and os.path.isfile(myebuildpath):
16498 + ebuild_phase = "postrm"
16499 + phase = EbuildPhase(
16500 + background=background,
16501 + phase=ebuild_phase,
16502 + scheduler=scheduler,
16503 + settings=self.settings,
16504 + )
16505 + phase.start()
16506 + retval = phase.wait()
16507 +
16508 + # XXX: Decide how to handle failures here.
16509 + if retval != os.EX_OK:
16510 + failures += 1
16511 + showMessage(
16512 + _("!!! FAILED postrm: %s\n") % retval,
16513 + level=logging.ERROR,
16514 + noiselevel=-1,
16515 + )
16516 +
16517 + finally:
16518 + self.vartree.dbapi._bump_mtime(self.mycpv)
16519 + try:
16520 + if not eapi_unsupported and os.path.isfile(myebuildpath):
16521 + if retval != os.EX_OK:
16522 + msg_lines = []
16523 + msg = _(
16524 + "The '%(ebuild_phase)s' "
16525 + "phase of the '%(cpv)s' package "
16526 + "has failed with exit value %(retval)s."
16527 + ) % {
16528 + "ebuild_phase": ebuild_phase,
16529 + "cpv": self.mycpv,
16530 + "retval": retval,
16531 + }
16532 + from textwrap import wrap
16533 +
16534 + msg_lines.extend(wrap(msg, 72))
16535 + msg_lines.append("")
16536 +
16537 + ebuild_name = os.path.basename(myebuildpath)
16538 + ebuild_dir = os.path.dirname(myebuildpath)
16539 + msg = _(
16540 + "The problem occurred while executing "
16541 + "the ebuild file named '%(ebuild_name)s' "
16542 + "located in the '%(ebuild_dir)s' directory. "
16543 + "If necessary, manually remove "
16544 + "the environment.bz2 file and/or the "
16545 + "ebuild file located in that directory."
16546 + ) % {"ebuild_name": ebuild_name, "ebuild_dir": ebuild_dir}
16547 + msg_lines.extend(wrap(msg, 72))
16548 + msg_lines.append("")
16549 +
16550 + msg = _(
16551 + "Removal "
16552 + "of the environment.bz2 file is "
16553 + "preferred since it may allow the "
16554 + "removal phases to execute successfully. "
16555 + "The ebuild will be "
16556 + "sourced and the eclasses "
16557 + "from the current ebuild repository will be used "
16558 + "when necessary. Removal of "
16559 + "the ebuild file will cause the "
16560 + "pkg_prerm() and pkg_postrm() removal "
16561 + "phases to be skipped entirely."
16562 + )
16563 + msg_lines.extend(wrap(msg, 72))
16564 +
16565 + self._eerror(ebuild_phase, msg_lines)
16566 +
16567 + self._elog_process(phasefilter=("prerm", "postrm"))
16568 +
16569 + if retval == os.EX_OK:
16570 + try:
16571 + doebuild_environment(
16572 + myebuildpath,
16573 + "cleanrm",
16574 + settings=self.settings,
16575 + db=self.vartree.dbapi,
16576 + )
16577 + except UnsupportedAPIException:
16578 + pass
16579 + phase = EbuildPhase(
16580 + background=background,
16581 + phase="cleanrm",
16582 + scheduler=scheduler,
16583 + settings=self.settings,
16584 + )
16585 + phase.start()
16586 + retval = phase.wait()
16587 + finally:
16588 + if builddir_lock is not None:
16589 + scheduler.run_until_complete(builddir_lock.async_unlock())
16590 +
16591 + if log_path is not None:
16592 +
16593 + if not failures and "unmerge-logs" not in self.settings.features:
16594 + try:
16595 + os.unlink(log_path)
16596 + except OSError:
16597 + pass
16598 +
16599 + try:
16600 + st = os.stat(log_path)
16601 + except OSError:
16602 + pass
16603 + else:
16604 + if st.st_size == 0:
16605 + try:
16606 + os.unlink(log_path)
16607 + except OSError:
16608 + pass
16609 +
16610 + if log_path is not None and os.path.exists(log_path):
16611 + # Restore this since it gets lost somewhere above and it
16612 + # needs to be set for _display_merge() to be able to log.
16613 + # Note that the log isn't necessarily supposed to exist
16614 + # since if PORTAGE_LOGDIR is unset then it's a temp file
16615 + # so it gets cleaned above.
16616 + self.settings["PORTAGE_LOG_FILE"] = log_path
16617 + else:
16618 + self.settings.pop("PORTAGE_LOG_FILE", None)
16619 +
16620 + env_update(
16621 + target_root=self.settings["ROOT"],
16622 + prev_mtimes=ldpath_mtimes,
16623 + contents=contents,
16624 + env=self.settings,
16625 + writemsg_level=self._display_merge,
16626 + vardbapi=self.vartree.dbapi,
16627 + )
16628 +
16629 + unmerge_with_replacement = preserve_paths is not None
16630 + if not unmerge_with_replacement:
16631 + # When there's a replacement package which calls us via treewalk,
16632 + # treewalk will automatically call _prune_plib_registry for us.
16633 + # Otherwise, we need to call _prune_plib_registry ourselves.
16634 + # Don't pass in the "unmerge=True" flag here, since that flag
16635 + # is intended to be used _prior_ to unmerge, not after.
16636 + self._prune_plib_registry()
16637 +
16638 + return os.EX_OK
16639 +
16640 + def _display_merge(self, msg, level=0, noiselevel=0):
16641 + if not self._verbose and noiselevel >= 0 and level < logging.WARN:
16642 + return
16643 + if self._scheduler is None:
16644 + writemsg_level(msg, level=level, noiselevel=noiselevel)
16645 + else:
16646 + log_path = None
16647 + if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
16648 + log_path = self.settings.get("PORTAGE_LOG_FILE")
16649 + background = self.settings.get("PORTAGE_BACKGROUND") == "1"
16650 +
16651 + if background and log_path is None:
16652 + if level >= logging.WARN:
16653 + writemsg_level(msg, level=level, noiselevel=noiselevel)
16654 + else:
16655 + self._scheduler.output(
16656 + msg,
16657 + log_path=log_path,
16658 + background=background,
16659 + level=level,
16660 + noiselevel=noiselevel,
16661 + )
16662 +
16663 + def _show_unmerge(self, zing, desc, file_type, file_name):
16664 + self._display_merge(
16665 + "%s %s %s %s\n" % (zing, desc.ljust(8), file_type, file_name)
16666 + )
16667 +
16668 + def _unmerge_pkgfiles(self, pkgfiles, others_in_slot):
16669 + """
16670 +
16671 + Unmerges the contents of a package from the liveFS
16672 + Removes the VDB entry for self
16673 +
16674 + @param pkgfiles: typically self.getcontents()
16675 + @type pkgfiles: Dictionary { filename: [ 'type', '?', 'md5sum' ] }
16676 + @param others_in_slot: all dblink instances in this slot, excluding self
16677 + @type others_in_slot: list
16678 + @rtype: None
16679 + """
16680 +
16681 + os = _os_merge
16682 + perf_md5 = perform_md5
16683 + showMessage = self._display_merge
16684 + show_unmerge = self._show_unmerge
16685 + ignored_unlink_errnos = self._ignored_unlink_errnos
16686 + ignored_rmdir_errnos = self._ignored_rmdir_errnos
16687 +
16688 + if not pkgfiles:
16689 + showMessage(_("No package files given... Grabbing a set.\n"))
16690 + pkgfiles = self.getcontents()
16691 +
16692 + if others_in_slot is None:
16693 + others_in_slot = []
16694 + slot = self.vartree.dbapi._pkg_str(self.mycpv, None).slot
16695 + slot_matches = self.vartree.dbapi.match(
16696 + "%s:%s" % (portage.cpv_getkey(self.mycpv), slot)
16697 + )
16698 + for cur_cpv in slot_matches:
16699 + if cur_cpv == self.mycpv:
16700 + continue
16701 + others_in_slot.append(
16702 + dblink(
16703 + self.cat,
16704 + catsplit(cur_cpv)[1],
16705 + settings=self.settings,
16706 + vartree=self.vartree,
16707 + treetype="vartree",
16708 + pipe=self._pipe,
16709 + )
16710 + )
16711 +
16712 + cfgfiledict = grabdict(self.vartree.dbapi._conf_mem_file)
16713 + stale_confmem = []
16714 + protected_symlinks = {}
16715 +
16716 + unmerge_orphans = "unmerge-orphans" in self.settings.features
16717 + calc_prelink = "prelink-checksums" in self.settings.features
16718 +
16719 + if pkgfiles:
16720 + self.updateprotect()
16721 + mykeys = list(pkgfiles)
16722 + mykeys.sort()
16723 + mykeys.reverse()
16724 +
16725 + # process symlinks second-to-last, directories last.
16726 + mydirs = set()
16727 +
16728 + uninstall_ignore = portage.util.shlex_split(
16729 + self.settings.get("UNINSTALL_IGNORE", "")
16730 + )
16731 +
16732 + def unlink(file_name, lstatobj):
16733 + if bsd_chflags:
16734 + if lstatobj.st_flags != 0:
16735 + bsd_chflags.lchflags(file_name, 0)
16736 + parent_name = os.path.dirname(file_name)
16737 + # Use normal stat/chflags for the parent since we want to
16738 + # follow any symlinks to the real parent directory.
16739 + pflags = os.stat(parent_name).st_flags
16740 + if pflags != 0:
16741 + bsd_chflags.chflags(parent_name, 0)
16742 + try:
16743 + if not stat.S_ISLNK(lstatobj.st_mode):
16744 + # Remove permissions to ensure that any hardlinks to
16745 + # suid/sgid files are rendered harmless.
16746 + os.chmod(file_name, 0)
16747 + os.unlink(file_name)
16748 + except OSError as ose:
16749 + # If the chmod or unlink fails, you are in trouble.
16750 + # With Prefix this can be because the file is owned
16751 + # by someone else (a screwup by root?), on a normal
16752 + # system maybe filesystem corruption. In any case,
16753 + # if we backtrace and die here, we leave the system
16754 + # in a totally undefined state, hence we just bleed
16755 + # like hell and continue to hopefully finish all our
16756 + # administrative and pkg_postinst stuff.
16757 + self._eerror(
16758 + "postrm",
16759 + ["Could not chmod or unlink '%s': %s" % (file_name, ose)],
16760 + )
16761 + else:
16762 +
16763 + # Even though the file no longer exists, we log it
16764 + # here so that _unmerge_dirs can see that we've
16765 + # removed a file from this device, and will record
16766 + # the parent directory for a syncfs call.
16767 + self._merged_path(file_name, lstatobj, exists=False)
16768 +
16769 + finally:
16770 + if bsd_chflags and pflags != 0:
16771 + # Restore the parent flags we saved before unlinking
16772 + bsd_chflags.chflags(parent_name, pflags)
16773 +
16774 + unmerge_desc = {}
16775 + unmerge_desc["cfgpro"] = _("cfgpro")
16776 + unmerge_desc["replaced"] = _("replaced")
16777 + unmerge_desc["!dir"] = _("!dir")
16778 + unmerge_desc["!empty"] = _("!empty")
16779 + unmerge_desc["!fif"] = _("!fif")
16780 + unmerge_desc["!found"] = _("!found")
16781 + unmerge_desc["!md5"] = _("!md5")
16782 + unmerge_desc["!mtime"] = _("!mtime")
16783 + unmerge_desc["!obj"] = _("!obj")
16784 + unmerge_desc["!sym"] = _("!sym")
16785 + unmerge_desc["!prefix"] = _("!prefix")
16786 +
16787 + real_root = self.settings["ROOT"]
16788 + real_root_len = len(real_root) - 1
16789 + eroot = self.settings["EROOT"]
16790 +
16791 + infodirs = frozenset(
16792 + infodir
16793 + for infodir in chain(
16794 + self.settings.get("INFOPATH", "").split(":"),
16795 + self.settings.get("INFODIR", "").split(":"),
16796 + )
16797 + if infodir
16798 + )
16799 + infodirs_inodes = set()
16800 + for infodir in infodirs:
16801 + infodir = os.path.join(real_root, infodir.lstrip(os.sep))
16802 + try:
16803 + statobj = os.stat(infodir)
16804 + except OSError:
16805 + pass
16806 + else:
16807 + infodirs_inodes.add((statobj.st_dev, statobj.st_ino))
16808 +
16809 + for i, objkey in enumerate(mykeys):
16810 +
16811 + obj = normalize_path(objkey)
16812 + if os is _os_merge:
16813 + try:
16814 + _unicode_encode(
16815 + obj, encoding=_encodings["merge"], errors="strict"
16816 + )
16817 + except UnicodeEncodeError:
16818 + # The package appears to have been merged with a
16819 + # different value of sys.getfilesystemencoding(),
16820 + # so fall back to utf_8 if appropriate.
16821 + try:
16822 + _unicode_encode(
16823 + obj, encoding=_encodings["fs"], errors="strict"
16824 + )
16825 + except UnicodeEncodeError:
16826 + pass
16827 + else:
16828 + os = portage.os
16829 + perf_md5 = portage.checksum.perform_md5
16830 +
16831 + file_data = pkgfiles[objkey]
16832 + file_type = file_data[0]
16833 +
16834 + # don't try to unmerge the prefix offset itself
16835 + if len(obj) <= len(eroot) or not obj.startswith(eroot):
16836 + show_unmerge("---", unmerge_desc["!prefix"], file_type, obj)
16837 + continue
16838 +
16839 + statobj = None
16840 + try:
16841 + statobj = os.stat(obj)
16842 + except OSError:
16843 + pass
16844 + lstatobj = None
16845 + try:
16846 + lstatobj = os.lstat(obj)
16847 + except (OSError, AttributeError):
16848 + pass
16849 + islink = lstatobj is not None and stat.S_ISLNK(lstatobj.st_mode)
16850 + if lstatobj is None:
16851 + show_unmerge("---", unmerge_desc["!found"], file_type, obj)
16852 + continue
16853 +
16854 + f_match = obj[len(eroot) - 1 :]
16855 + ignore = False
16856 + for pattern in uninstall_ignore:
16857 + if fnmatch.fnmatch(f_match, pattern):
16858 + ignore = True
16859 + break
16860 +
16861 + if not ignore:
16862 + if islink and f_match in ("/lib", "/usr/lib", "/usr/local/lib"):
16863 + # Ignore libdir symlinks for bug #423127.
16864 + ignore = True
16865 +
16866 + if ignore:
16867 + show_unmerge("---", unmerge_desc["cfgpro"], file_type, obj)
16868 + continue
16869 +
16870 + # don't use EROOT, CONTENTS entries already contain EPREFIX
16871 + if obj.startswith(real_root):
16872 + relative_path = obj[real_root_len:]
16873 + is_owned = False
16874 + for dblnk in others_in_slot:
16875 + if dblnk.isowner(relative_path):
16876 + is_owned = True
16877 + break
16878 +
16879 + if (
16880 + is_owned
16881 + and islink
16882 + and file_type in ("sym", "dir")
16883 + and statobj
16884 + and stat.S_ISDIR(statobj.st_mode)
16885 + ):
16886 + # A new instance of this package claims the file, so
16887 + # don't unmerge it. If the file is symlink to a
16888 + # directory and the unmerging package installed it as
16889 + # a symlink, but the new owner has it listed as a
16890 + # directory, then we'll produce a warning since the
16891 + # symlink is a sort of orphan in this case (see
16892 + # bug #326685).
16893 + symlink_orphan = False
16894 + for dblnk in others_in_slot:
16895 + parent_contents_key = dblnk._match_contents(relative_path)
16896 + if not parent_contents_key:
16897 + continue
16898 + if not parent_contents_key.startswith(real_root):
16899 + continue
16900 + if dblnk.getcontents()[parent_contents_key][0] == "dir":
16901 + symlink_orphan = True
16902 + break
16903 +
16904 + if symlink_orphan:
16905 + protected_symlinks.setdefault(
16906 + (statobj.st_dev, statobj.st_ino), []
16907 + ).append(relative_path)
16908 +
16909 + if is_owned:
16910 + show_unmerge("---", unmerge_desc["replaced"], file_type, obj)
16911 + continue
16912 + elif relative_path in cfgfiledict:
16913 + stale_confmem.append(relative_path)
16914 +
16915 + # Don't unlink symlinks to directories here since that can
16916 + # remove /lib and /usr/lib symlinks.
16917 + if (
16918 + unmerge_orphans
16919 + and lstatobj
16920 + and not stat.S_ISDIR(lstatobj.st_mode)
16921 + and not (islink and statobj and stat.S_ISDIR(statobj.st_mode))
16922 + and not self.isprotected(obj)
16923 + ):
16924 + try:
16925 + unlink(obj, lstatobj)
16926 + except EnvironmentError as e:
16927 + if e.errno not in ignored_unlink_errnos:
16928 + raise
16929 + del e
16930 + show_unmerge("<<<", "", file_type, obj)
16931 + continue
16932 +
16933 + lmtime = str(lstatobj[stat.ST_MTIME])
16934 + if (pkgfiles[objkey][0] not in ("dir", "fif", "dev")) and (
16935 + lmtime != pkgfiles[objkey][1]
16936 + ):
16937 + show_unmerge("---", unmerge_desc["!mtime"], file_type, obj)
16938 + continue
16939 +
16940 + if file_type == "dir" and not islink:
16941 + if lstatobj is None or not stat.S_ISDIR(lstatobj.st_mode):
16942 + show_unmerge("---", unmerge_desc["!dir"], file_type, obj)
16943 + continue
16944 + mydirs.add((obj, (lstatobj.st_dev, lstatobj.st_ino)))
16945 + elif file_type == "sym" or (file_type == "dir" and islink):
16946 + if not islink:
16947 + show_unmerge("---", unmerge_desc["!sym"], file_type, obj)
16948 + continue
16949 +
16950 + # If this symlink points to a directory then we don't want
16951 + # to unmerge it if there are any other packages that
16952 + # installed files into the directory via this symlink
16953 + # (see bug #326685).
16954 + # TODO: Resolving a symlink to a directory will require
16955 + # simulation if $ROOT != / and the link is not relative.
16956 + if (
16957 + islink
16958 + and statobj
16959 + and stat.S_ISDIR(statobj.st_mode)
16960 + and obj.startswith(real_root)
16961 + ):
16962 +
16963 + relative_path = obj[real_root_len:]
16964 + try:
16965 + target_dir_contents = os.listdir(obj)
16966 + except OSError:
16967 + pass
16968 + else:
16969 + if target_dir_contents:
16970 + # If all the children are regular files owned
16971 + # by this package, then the symlink should be
16972 + # safe to unmerge.
16973 + all_owned = True
16974 + for child in target_dir_contents:
16975 + child = os.path.join(relative_path, child)
16976 + if not self.isowner(child):
16977 + all_owned = False
16978 + break
16979 + try:
16980 + child_lstat = os.lstat(
16981 + os.path.join(
16982 + real_root, child.lstrip(os.sep)
16983 + )
16984 + )
16985 + except OSError:
16986 + continue
16987 +
16988 + if not stat.S_ISREG(child_lstat.st_mode):
16989 + # Nested symlinks or directories make
16990 + # the issue very complex, so just
16991 + # preserve the symlink in order to be
16992 + # on the safe side.
16993 + all_owned = False
16994 + break
16995 +
16996 + if not all_owned:
16997 + protected_symlinks.setdefault(
16998 + (statobj.st_dev, statobj.st_ino), []
16999 + ).append(relative_path)
17000 + show_unmerge(
17001 + "---", unmerge_desc["!empty"], file_type, obj
17002 + )
17003 + continue
17004 +
17005 + # Go ahead and unlink symlinks to directories here when
17006 + # they're actually recorded as symlinks in the contents.
17007 + # Normally, symlinks such as /lib -> lib64 are not recorded
17008 + # as symlinks in the contents of a package. If a package
17009 + # installs something into ${D}/lib/, it is recorded in the
17010 + # contents as a directory even if it happens to correspond
17011 + # to a symlink when it's merged to the live filesystem.
17012 + try:
17013 + unlink(obj, lstatobj)
17014 + show_unmerge("<<<", "", file_type, obj)
17015 + except (OSError, IOError) as e:
17016 + if e.errno not in ignored_unlink_errnos:
17017 + raise
17018 + del e
17019 + show_unmerge("!!!", "", file_type, obj)
17020 + elif pkgfiles[objkey][0] == "obj":
17021 + if statobj is None or not stat.S_ISREG(statobj.st_mode):
17022 + show_unmerge("---", unmerge_desc["!obj"], file_type, obj)
17023 + continue
17024 + mymd5 = None
17025 + try:
17026 + mymd5 = perf_md5(obj, calc_prelink=calc_prelink)
17027 + except FileNotFound as e:
17028 + # the file has disappeared between now and our stat call
17029 + show_unmerge("---", unmerge_desc["!obj"], file_type, obj)
17030 + continue
17031 +
17032 + # string.lower is needed because db entries used to be in upper-case. The
17033 + # string.lower allows for backwards compatibility.
17034 + if mymd5 != pkgfiles[objkey][2].lower():
17035 + show_unmerge("---", unmerge_desc["!md5"], file_type, obj)
17036 + continue
17037 + try:
17038 + unlink(obj, lstatobj)
17039 + except (OSError, IOError) as e:
17040 + if e.errno not in ignored_unlink_errnos:
17041 + raise
17042 + del e
17043 + show_unmerge("<<<", "", file_type, obj)
17044 + elif pkgfiles[objkey][0] == "fif":
17045 + if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
17046 + show_unmerge("---", unmerge_desc["!fif"], file_type, obj)
17047 + continue
17048 + show_unmerge("---", "", file_type, obj)
17049 + elif pkgfiles[objkey][0] == "dev":
17050 + show_unmerge("---", "", file_type, obj)
17051 +
17052 + self._unmerge_dirs(
17053 + mydirs, infodirs_inodes, protected_symlinks, unmerge_desc, unlink, os
17054 + )
17055 + mydirs.clear()
17056 +
17057 + if protected_symlinks:
17058 + self._unmerge_protected_symlinks(
17059 + others_in_slot,
17060 + infodirs_inodes,
17061 + protected_symlinks,
17062 + unmerge_desc,
17063 + unlink,
17064 + os,
17065 + )
17066 +
17067 + if protected_symlinks:
17068 + msg = (
17069 + "One or more symlinks to directories have been "
17070 + + "preserved in order to ensure that files installed "
17071 + + "via these symlinks remain accessible. "
17072 + + "This indicates that the mentioned symlink(s) may "
17073 + + "be obsolete remnants of an old install, and it "
17074 + + "may be appropriate to replace a given symlink "
17075 + + "with the directory that it points to."
17076 + )
17077 + lines = textwrap.wrap(msg, 72)
17078 + lines.append("")
17079 + flat_list = set()
17080 + flat_list.update(*protected_symlinks.values())
17081 + flat_list = sorted(flat_list)
17082 + for f in flat_list:
17083 + lines.append("\t%s" % (os.path.join(real_root, f.lstrip(os.sep))))
17084 + lines.append("")
17085 + self._elog("elog", "postrm", lines)
17086 +
17087 + # Remove stale entries from config memory.
17088 + if stale_confmem:
17089 + for filename in stale_confmem:
17090 + del cfgfiledict[filename]
17091 + writedict(cfgfiledict, self.vartree.dbapi._conf_mem_file)
17092 +
17093 + # remove self from vartree database so that our own virtual gets zapped if we're the last node
17094 + self.vartree.zap(self.mycpv)
17095 +
17096 + def _unmerge_protected_symlinks(
17097 + self,
17098 + others_in_slot,
17099 + infodirs_inodes,
17100 + protected_symlinks,
17101 + unmerge_desc,
17102 + unlink,
17103 + os,
17104 + ):
17105 +
17106 + real_root = self.settings["ROOT"]
17107 + show_unmerge = self._show_unmerge
17108 + ignored_unlink_errnos = self._ignored_unlink_errnos
17109 +
17110 + flat_list = set()
17111 + flat_list.update(*protected_symlinks.values())
17112 + flat_list = sorted(flat_list)
17113 +
17114 + for f in flat_list:
17115 + for dblnk in others_in_slot:
17116 + if dblnk.isowner(f):
17117 + # If another package in the same slot installed
17118 + # a file via a protected symlink, return early
17119 + # and don't bother searching for any other owners.
17120 + return
17121 +
17122 + msg = []
17123 + msg.append("")
17124 + msg.append(_("Directory symlink(s) may need protection:"))
17125 + msg.append("")
17126 +
17127 + for f in flat_list:
17128 + msg.append("\t%s" % os.path.join(real_root, f.lstrip(os.path.sep)))
17129 +
17130 + msg.append("")
17131 + msg.append("Use the UNINSTALL_IGNORE variable to exempt specific symlinks")
17132 + msg.append("from the following search (see the make.conf man page).")
17133 + msg.append("")
17134 + msg.append(
17135 + _(
17136 + "Searching all installed"
17137 + " packages for files installed via above symlink(s)..."
17138 + )
17139 + )
17140 + msg.append("")
17141 + self._elog("elog", "postrm", msg)
17142 +
17143 + self.lockdb()
17144 + try:
17145 + owners = self.vartree.dbapi._owners.get_owners(flat_list)
17146 + self.vartree.dbapi.flush_cache()
17147 + finally:
17148 + self.unlockdb()
17149 +
17150 + for owner in list(owners):
17151 + if owner.mycpv == self.mycpv:
17152 + owners.pop(owner, None)
17153 +
17154 + if not owners:
17155 + msg = []
17156 + msg.append(
17157 + _(
17158 + "The above directory symlink(s) are all "
17159 + "safe to remove. Removing them now..."
17160 + )
17161 + )
17162 + msg.append("")
17163 + self._elog("elog", "postrm", msg)
17164 + dirs = set()
17165 + for unmerge_syms in protected_symlinks.values():
17166 + for relative_path in unmerge_syms:
17167 + obj = os.path.join(real_root, relative_path.lstrip(os.sep))
17168 + parent = os.path.dirname(obj)
17169 + while len(parent) > len(self._eroot):
17170 + try:
17171 + lstatobj = os.lstat(parent)
17172 + except OSError:
17173 + break
17174 + else:
17175 + dirs.add((parent, (lstatobj.st_dev, lstatobj.st_ino)))
17176 + parent = os.path.dirname(parent)
17177 + try:
17178 + unlink(obj, os.lstat(obj))
17179 + show_unmerge("<<<", "", "sym", obj)
17180 + except (OSError, IOError) as e:
17181 + if e.errno not in ignored_unlink_errnos:
17182 + raise
17183 + del e
17184 + show_unmerge("!!!", "", "sym", obj)
17185 +
17186 + protected_symlinks.clear()
17187 + self._unmerge_dirs(
17188 + dirs, infodirs_inodes, protected_symlinks, unmerge_desc, unlink, os
17189 + )
17190 + dirs.clear()
17191 +
17192 + def _unmerge_dirs(
17193 + self, dirs, infodirs_inodes, protected_symlinks, unmerge_desc, unlink, os
17194 + ):
17195 +
17196 + show_unmerge = self._show_unmerge
17197 + infodir_cleanup = self._infodir_cleanup
17198 + ignored_unlink_errnos = self._ignored_unlink_errnos
17199 + ignored_rmdir_errnos = self._ignored_rmdir_errnos
17200 + real_root = self.settings["ROOT"]
17201 +
17202 + dirs = sorted(dirs)
17203 + revisit = {}
17204 +
17205 + while True:
17206 + try:
17207 + obj, inode_key = dirs.pop()
17208 + except IndexError:
17209 + break
17210 + # Treat any directory named "info" as a candidate here,
17211 + # since it might have been in INFOPATH previously even
17212 + # though it may not be there now.
17213 + if inode_key in infodirs_inodes or os.path.basename(obj) == "info":
17214 + try:
17215 + remaining = os.listdir(obj)
17216 + except OSError:
17217 + pass
17218 + else:
17219 + cleanup_info_dir = ()
17220 + if remaining and len(remaining) <= len(infodir_cleanup):
17221 + if not set(remaining).difference(infodir_cleanup):
17222 + cleanup_info_dir = remaining
17223 +
17224 + for child in cleanup_info_dir:
17225 + child = os.path.join(obj, child)
17226 + try:
17227 + lstatobj = os.lstat(child)
17228 + if stat.S_ISREG(lstatobj.st_mode):
17229 + unlink(child, lstatobj)
17230 + show_unmerge("<<<", "", "obj", child)
17231 + except EnvironmentError as e:
17232 + if e.errno not in ignored_unlink_errnos:
17233 + raise
17234 + del e
17235 + show_unmerge("!!!", "", "obj", child)
17236 +
17237 + try:
17238 + parent_name = os.path.dirname(obj)
17239 + parent_stat = os.stat(parent_name)
17240 +
17241 + if bsd_chflags:
17242 + lstatobj = os.lstat(obj)
17243 + if lstatobj.st_flags != 0:
17244 + bsd_chflags.lchflags(obj, 0)
17245 +
17246 + # Use normal stat/chflags for the parent since we want to
17247 + # follow any symlinks to the real parent directory.
17248 + pflags = parent_stat.st_flags
17249 + if pflags != 0:
17250 + bsd_chflags.chflags(parent_name, 0)
17251 + try:
17252 + os.rmdir(obj)
17253 + finally:
17254 + if bsd_chflags and pflags != 0:
17255 + # Restore the parent flags we saved before unlinking
17256 + bsd_chflags.chflags(parent_name, pflags)
17257 +
17258 + # Record the parent directory for use in syncfs calls.
17259 + # Note that we use a realpath and a regular stat here, since
17260 + # we want to follow any symlinks back to the real device where
17261 + # the real parent directory resides.
17262 + self._merged_path(os.path.realpath(parent_name), parent_stat)
17263 +
17264 + show_unmerge("<<<", "", "dir", obj)
17265 + except EnvironmentError as e:
17266 + if e.errno not in ignored_rmdir_errnos:
17267 + raise
17268 + if e.errno != errno.ENOENT:
17269 + show_unmerge("---", unmerge_desc["!empty"], "dir", obj)
17270 + revisit[obj] = inode_key
17271 +
17272 + # Since we didn't remove this directory, record the directory
17273 + # itself for use in syncfs calls, if we have removed another
17274 + # file from the same device.
17275 + # Note that we use a realpath and a regular stat here, since
17276 + # we want to follow any symlinks back to the real device where
17277 + # the real directory resides.
17278 + try:
17279 + dir_stat = os.stat(obj)
17280 + except OSError:
17281 + pass
17282 + else:
17283 + if dir_stat.st_dev in self._device_path_map:
17284 + self._merged_path(os.path.realpath(obj), dir_stat)
17285 +
17286 + else:
17287 + # When a directory is successfully removed, there's
17288 + # no need to protect symlinks that point to it.
17289 + unmerge_syms = protected_symlinks.pop(inode_key, None)
17290 + if unmerge_syms is not None:
17291 + parents = []
17292 + for relative_path in unmerge_syms:
17293 + obj = os.path.join(real_root, relative_path.lstrip(os.sep))
17294 + try:
17295 + unlink(obj, os.lstat(obj))
17296 + show_unmerge("<<<", "", "sym", obj)
17297 + except (OSError, IOError) as e:
17298 + if e.errno not in ignored_unlink_errnos:
17299 + raise
17300 + del e
17301 + show_unmerge("!!!", "", "sym", obj)
17302 + else:
17303 + parents.append(os.path.dirname(obj))
17304 +
17305 + if parents:
17306 + # Revisit parents recursively (bug 640058).
17307 + recursive_parents = []
17308 + for parent in set(parents):
17309 + while parent in revisit:
17310 + recursive_parents.append(parent)
17311 + parent = os.path.dirname(parent)
17312 + if parent == "/":
17313 + break
17314 +
17315 + for parent in sorted(set(recursive_parents)):
17316 + dirs.append((parent, revisit.pop(parent)))
17317 +
17318 + def isowner(self, filename, destroot=None):
17319 + """
17320 + Check if a file belongs to this package. This may
17321 + result in a stat call for the parent directory of
17322 + every installed file, since the inode numbers are
17323 + used to work around the problem of ambiguous paths
17324 + caused by symlinked directories. The results of
17325 + stat calls are cached to optimize multiple calls
17326 + to this method.
17327 +
17328 + @param filename:
17329 + @type filename:
17330 + @param destroot:
17331 + @type destroot:
17332 + @rtype: Boolean
17333 + @return:
17334 + 1. True if this package owns the file.
17335 + 2. False if this package does not own the file.
17336 + """
17337 +
17338 + if destroot is not None and destroot != self._eroot:
17339 + warnings.warn(
17340 + "The second parameter of the "
17341 + + "portage.dbapi.vartree.dblink.isowner()"
17342 + + " is now unused. Instead "
17343 + + "self.settings['EROOT'] will be used.",
17344 + DeprecationWarning,
17345 + stacklevel=2,
17346 + )
17347 +
17348 + return bool(self._match_contents(filename))
17349 +
17350 + def _match_contents(self, filename, destroot=None):
17351 + """
17352 + The matching contents entry is returned, which is useful
17353 + since the path may differ from the one given by the caller,
17354 + due to symlinks.
17355 +
17356 + @rtype: String
17357 + @return: the contents entry corresponding to the given path, or False
17358 + if the file is not owned by this package.
17359 + """
17360 +
17361 + filename = _unicode_decode(
17362 + filename, encoding=_encodings["content"], errors="strict"
17363 + )
17364 +
17365 + if destroot is not None and destroot != self._eroot:
17366 + warnings.warn(
17367 + "The second parameter of the "
17368 + + "portage.dbapi.vartree.dblink._match_contents()"
17369 + + " is now unused. Instead "
17370 + + "self.settings['ROOT'] will be used.",
17371 + DeprecationWarning,
17372 + stacklevel=2,
17373 + )
17374 +
17375 + # don't use EROOT here, image already contains EPREFIX
17376 + destroot = self.settings["ROOT"]
17377 +
17378 + # The given filename argument might have a different encoding than the
17379 + # the filenames contained in the contents, so use separate wrapped os
17380 + # modules for each. The basename is more likely to contain non-ascii
17381 + # characters than the directory path, so use os_filename_arg for all
17382 + # operations involving the basename of the filename arg.
17383 + os_filename_arg = _os_merge
17384 + os = _os_merge
17385 +
17386 + try:
17387 + _unicode_encode(filename, encoding=_encodings["merge"], errors="strict")
17388 + except UnicodeEncodeError:
17389 + # The package appears to have been merged with a
17390 + # different value of sys.getfilesystemencoding(),
17391 + # so fall back to utf_8 if appropriate.
17392 + try:
17393 + _unicode_encode(filename, encoding=_encodings["fs"], errors="strict")
17394 + except UnicodeEncodeError:
17395 + pass
17396 + else:
17397 + os_filename_arg = portage.os
17398 +
17399 + destfile = normalize_path(
17400 + os_filename_arg.path.join(
17401 + destroot, filename.lstrip(os_filename_arg.path.sep)
17402 + )
17403 + )
17404 +
17405 + if "case-insensitive-fs" in self.settings.features:
17406 + destfile = destfile.lower()
17407 +
17408 + if self._contents.contains(destfile):
17409 + return self._contents.unmap_key(destfile)
17410 +
17411 + if self.getcontents():
17412 + basename = os_filename_arg.path.basename(destfile)
17413 + if self._contents_basenames is None:
17414 +
17415 + try:
17416 + for x in self._contents.keys():
17417 + _unicode_encode(
17418 + x, encoding=_encodings["merge"], errors="strict"
17419 + )
17420 + except UnicodeEncodeError:
17421 + # The package appears to have been merged with a
17422 + # different value of sys.getfilesystemencoding(),
17423 + # so fall back to utf_8 if appropriate.
17424 + try:
17425 + for x in self._contents.keys():
17426 + _unicode_encode(
17427 + x, encoding=_encodings["fs"], errors="strict"
17428 + )
17429 + except UnicodeEncodeError:
17430 + pass
17431 + else:
17432 + os = portage.os
17433 +
17434 + self._contents_basenames = set(
17435 + os.path.basename(x) for x in self._contents.keys()
17436 + )
17437 + if basename not in self._contents_basenames:
17438 + # This is a shortcut that, in most cases, allows us to
17439 + # eliminate this package as an owner without the need
17440 + # to examine inode numbers of parent directories.
17441 + return False
17442 +
17443 + # Use stat rather than lstat since we want to follow
17444 + # any symlinks to the real parent directory.
17445 + parent_path = os_filename_arg.path.dirname(destfile)
17446 + try:
17447 + parent_stat = os_filename_arg.stat(parent_path)
17448 + except EnvironmentError as e:
17449 + if e.errno != errno.ENOENT:
17450 + raise
17451 + del e
17452 + return False
17453 + if self._contents_inodes is None:
17454 +
17455 + if os is _os_merge:
17456 + try:
17457 + for x in self._contents.keys():
17458 + _unicode_encode(
17459 + x, encoding=_encodings["merge"], errors="strict"
17460 + )
17461 + except UnicodeEncodeError:
17462 + # The package appears to have been merged with a
17463 + # different value of sys.getfilesystemencoding(),
17464 + # so fall back to utf_8 if appropriate.
17465 + try:
17466 + for x in self._contents.keys():
17467 + _unicode_encode(
17468 + x, encoding=_encodings["fs"], errors="strict"
17469 + )
17470 + except UnicodeEncodeError:
17471 + pass
17472 + else:
17473 + os = portage.os
17474 +
17475 + self._contents_inodes = {}
17476 + parent_paths = set()
17477 + for x in self._contents.keys():
17478 + p_path = os.path.dirname(x)
17479 + if p_path in parent_paths:
17480 + continue
17481 + parent_paths.add(p_path)
17482 + try:
17483 + s = os.stat(p_path)
17484 + except OSError:
17485 + pass
17486 + else:
17487 + inode_key = (s.st_dev, s.st_ino)
17488 + # Use lists of paths in case multiple
17489 + # paths reference the same inode.
17490 + p_path_list = self._contents_inodes.get(inode_key)
17491 + if p_path_list is None:
17492 + p_path_list = []
17493 + self._contents_inodes[inode_key] = p_path_list
17494 + if p_path not in p_path_list:
17495 + p_path_list.append(p_path)
17496 +
17497 + p_path_list = self._contents_inodes.get(
17498 + (parent_stat.st_dev, parent_stat.st_ino)
17499 + )
17500 + if p_path_list:
17501 + for p_path in p_path_list:
17502 + x = os_filename_arg.path.join(p_path, basename)
17503 + if self._contents.contains(x):
17504 + return self._contents.unmap_key(x)
17505 +
17506 + return False
17507 +
17508 + def _linkmap_rebuild(self, **kwargs):
17509 + """
17510 + Rebuild the self._linkmap if it's not broken due to missing
17511 + scanelf binary. Also, return early if preserve-libs is disabled
17512 + and the preserve-libs registry is empty.
17513 + """
17514 + if (
17515 + self._linkmap_broken
17516 + or self.vartree.dbapi._linkmap is None
17517 + or self.vartree.dbapi._plib_registry is None
17518 + or (
17519 + "preserve-libs" not in self.settings.features
17520 + and not self.vartree.dbapi._plib_registry.hasEntries()
17521 + )
17522 + ):
17523 + return
17524 + try:
17525 + self.vartree.dbapi._linkmap.rebuild(**kwargs)
17526 + except CommandNotFound as e:
17527 + self._linkmap_broken = True
17528 + self._display_merge(
17529 + _(
17530 + "!!! Disabling preserve-libs "
17531 + "due to error: Command Not Found: %s\n"
17532 + )
17533 + % (e,),
17534 + level=logging.ERROR,
17535 + noiselevel=-1,
17536 + )
17537 +
17538 + def _find_libs_to_preserve(self, unmerge=False):
17539 + """
17540 + Get set of relative paths for libraries to be preserved. When
17541 + unmerge is False, file paths to preserve are selected from
17542 + self._installed_instance. Otherwise, paths are selected from
17543 + self.
17544 + """
17545 + if (
17546 + self._linkmap_broken
17547 + or self.vartree.dbapi._linkmap is None
17548 + or self.vartree.dbapi._plib_registry is None
17549 + or (not unmerge and self._installed_instance is None)
17550 + or not self._preserve_libs
17551 + ):
17552 + return set()
17553 +
17554 + os = _os_merge
17555 + linkmap = self.vartree.dbapi._linkmap
17556 + if unmerge:
17557 + installed_instance = self
17558 + else:
17559 + installed_instance = self._installed_instance
17560 + old_contents = installed_instance.getcontents()
17561 + root = self.settings["ROOT"]
17562 + root_len = len(root) - 1
17563 + lib_graph = digraph()
17564 + path_node_map = {}
17565 +
17566 + def path_to_node(path):
17567 + node = path_node_map.get(path)
17568 + if node is None:
17569 + node = LinkageMap._LibGraphNode(linkmap._obj_key(path))
17570 + alt_path_node = lib_graph.get(node)
17571 + if alt_path_node is not None:
17572 + node = alt_path_node
17573 + node.alt_paths.add(path)
17574 + path_node_map[path] = node
17575 + return node
17576 +
17577 + consumer_map = {}
17578 + provider_nodes = set()
17579 + # Create provider nodes and add them to the graph.
17580 + for f_abs in old_contents:
17581 +
17582 + if os is _os_merge:
17583 + try:
17584 + _unicode_encode(
17585 + f_abs, encoding=_encodings["merge"], errors="strict"
17586 + )
17587 + except UnicodeEncodeError:
17588 + # The package appears to have been merged with a
17589 + # different value of sys.getfilesystemencoding(),
17590 + # so fall back to utf_8 if appropriate.
17591 + try:
17592 + _unicode_encode(
17593 + f_abs, encoding=_encodings["fs"], errors="strict"
17594 + )
17595 + except UnicodeEncodeError:
17596 + pass
17597 + else:
17598 + os = portage.os
17599 +
17600 + f = f_abs[root_len:]
17601 + try:
17602 + consumers = linkmap.findConsumers(
17603 + f, exclude_providers=(installed_instance.isowner,)
17604 + )
17605 + except KeyError:
17606 + continue
17607 + if not consumers:
17608 + continue
17609 + provider_node = path_to_node(f)
17610 + lib_graph.add(provider_node, None)
17611 + provider_nodes.add(provider_node)
17612 + consumer_map[provider_node] = consumers
17613 +
17614 + # Create consumer nodes and add them to the graph.
17615 + # Note that consumers can also be providers.
17616 + for provider_node, consumers in consumer_map.items():
17617 + for c in consumers:
17618 + consumer_node = path_to_node(c)
17619 + if (
17620 + installed_instance.isowner(c)
17621 + and consumer_node not in provider_nodes
17622 + ):
17623 + # This is not a provider, so it will be uninstalled.
17624 + continue
17625 + lib_graph.add(provider_node, consumer_node)
17626 +
17627 + # Locate nodes which should be preserved. They consist of all
17628 + # providers that are reachable from consumers that are not
17629 + # providers themselves.
17630 + preserve_nodes = set()
17631 + for consumer_node in lib_graph.root_nodes():
17632 + if consumer_node in provider_nodes:
17633 + continue
17634 + # Preserve all providers that are reachable from this consumer.
17635 + node_stack = lib_graph.child_nodes(consumer_node)
17636 + while node_stack:
17637 + provider_node = node_stack.pop()
17638 + if provider_node in preserve_nodes:
17639 + continue
17640 + preserve_nodes.add(provider_node)
17641 + node_stack.extend(lib_graph.child_nodes(provider_node))
17642 +
17643 + preserve_paths = set()
17644 + for preserve_node in preserve_nodes:
17645 + # Preserve the library itself, and also preserve the
17646 + # soname symlink which is the only symlink that is
17647 + # strictly required.
17648 + hardlinks = set()
17649 + soname_symlinks = set()
17650 + soname = linkmap.getSoname(next(iter(preserve_node.alt_paths)))
17651 + have_replacement_soname_link = False
17652 + have_replacement_hardlink = False
17653 + for f in preserve_node.alt_paths:
17654 + f_abs = os.path.join(root, f.lstrip(os.sep))
17655 + try:
17656 + if stat.S_ISREG(os.lstat(f_abs).st_mode):
17657 + hardlinks.add(f)
17658 + if not unmerge and self.isowner(f):
17659 + have_replacement_hardlink = True
17660 + if os.path.basename(f) == soname:
17661 + have_replacement_soname_link = True
17662 + elif os.path.basename(f) == soname:
17663 + soname_symlinks.add(f)
17664 + if not unmerge and self.isowner(f):
17665 + have_replacement_soname_link = True
17666 + except OSError:
17667 + pass
17668 +
17669 + if have_replacement_hardlink and have_replacement_soname_link:
17670 + continue
17671 +
17672 + if hardlinks:
17673 + preserve_paths.update(hardlinks)
17674 + preserve_paths.update(soname_symlinks)
17675 +
17676 + return preserve_paths
17677 +
17678 + def _add_preserve_libs_to_contents(self, preserve_paths):
17679 + """
17680 + Preserve libs returned from _find_libs_to_preserve().
17681 + """
17682 +
17683 + if not preserve_paths:
17684 + return
17685 +
17686 + os = _os_merge
17687 + showMessage = self._display_merge
17688 + root = self.settings["ROOT"]
17689 +
17690 + # Copy contents entries from the old package to the new one.
17691 + new_contents = self.getcontents().copy()
17692 + old_contents = self._installed_instance.getcontents()
17693 + for f in sorted(preserve_paths):
17694 + f = _unicode_decode(f, encoding=_encodings["content"], errors="strict")
17695 + f_abs = os.path.join(root, f.lstrip(os.sep))
17696 + contents_entry = old_contents.get(f_abs)
17697 + if contents_entry is None:
17698 + # This will probably never happen, but it might if one of the
17699 + # paths returned from findConsumers() refers to one of the libs
17700 + # that should be preserved yet the path is not listed in the
17701 + # contents. Such a path might belong to some other package, so
17702 + # it shouldn't be preserved here.
17703 + showMessage(
17704 + _(
17705 + "!!! File '%s' will not be preserved "
17706 + "due to missing contents entry\n"
17707 + )
17708 + % (f_abs,),
17709 + level=logging.ERROR,
17710 + noiselevel=-1,
17711 + )
17712 + preserve_paths.remove(f)
17713 + continue
17714 + new_contents[f_abs] = contents_entry
17715 + obj_type = contents_entry[0]
17716 + showMessage(_(">>> needed %s %s\n") % (obj_type, f_abs), noiselevel=-1)
17717 + # Add parent directories to contents if necessary.
17718 + parent_dir = os.path.dirname(f_abs)
17719 + while len(parent_dir) > len(root):
17720 + new_contents[parent_dir] = ["dir"]
17721 + prev = parent_dir
17722 + parent_dir = os.path.dirname(parent_dir)
17723 + if prev == parent_dir:
17724 + break
17725 + outfile = atomic_ofstream(os.path.join(self.dbtmpdir, "CONTENTS"))
17726 + write_contents(new_contents, root, outfile)
17727 + outfile.close()
17728 + self._clear_contents_cache()
17729 +
17730 + def _find_unused_preserved_libs(self, unmerge_no_replacement):
17731 + """
17732 + Find preserved libraries that don't have any consumers left.
17733 + """
17734 +
17735 + if (
17736 + self._linkmap_broken
17737 + or self.vartree.dbapi._linkmap is None
17738 + or self.vartree.dbapi._plib_registry is None
17739 + or not self.vartree.dbapi._plib_registry.hasEntries()
17740 + ):
17741 + return {}
17742 +
17743 + # Since preserved libraries can be consumers of other preserved
17744 + # libraries, use a graph to track consumer relationships.
17745 + plib_dict = self.vartree.dbapi._plib_registry.getPreservedLibs()
17746 + linkmap = self.vartree.dbapi._linkmap
17747 + lib_graph = digraph()
17748 + preserved_nodes = set()
17749 + preserved_paths = set()
17750 + path_cpv_map = {}
17751 + path_node_map = {}
17752 + root = self.settings["ROOT"]
17753 +
17754 + def path_to_node(path):
17755 + node = path_node_map.get(path)
17756 + if node is None:
17757 - node = LinkageMap._LibGraphNode(linkmap._obj_key(path))
17758 ++ chost = self.settings.get('CHOST')
17759 ++ if chost.find('darwin') >= 0:
17760 ++ node = LinkageMapMachO._LibGraphNode(linkmap._obj_key(path))
17761 ++ elif chost.find('interix') >= 0 or chost.find('winnt') >= 0:
17762 ++ node = LinkageMapPeCoff._LibGraphNode(linkmap._obj_key(path))
17763 ++ elif chost.find('aix') >= 0:
17764 ++ node = LinkageMapXCoff._LibGraphNode(linkmap._obj_key(path))
17765 ++ else
17766 ++ node = LinkageMap._LibGraphNode(linkmap._obj_key(path))
17767 + alt_path_node = lib_graph.get(node)
17768 + if alt_path_node is not None:
17769 + node = alt_path_node
17770 + node.alt_paths.add(path)
17771 + path_node_map[path] = node
17772 + return node
17773 +
17774 + for cpv, plibs in plib_dict.items():
17775 + for f in plibs:
17776 + path_cpv_map[f] = cpv
17777 + preserved_node = path_to_node(f)
17778 + if not preserved_node.file_exists():
17779 + continue
17780 + lib_graph.add(preserved_node, None)
17781 + preserved_paths.add(f)
17782 + preserved_nodes.add(preserved_node)
17783 + for c in self.vartree.dbapi._linkmap.findConsumers(f):
17784 + consumer_node = path_to_node(c)
17785 + if not consumer_node.file_exists():
17786 + continue
17787 + # Note that consumers may also be providers.
17788 + lib_graph.add(preserved_node, consumer_node)
17789 +
17790 + # Eliminate consumers having providers with the same soname as an
17791 + # installed library that is not preserved. This eliminates
17792 + # libraries that are erroneously preserved due to a move from one
17793 + # directory to another.
17794 + # Also eliminate consumers that are going to be unmerged if
17795 + # unmerge_no_replacement is True.
17796 + provider_cache = {}
17797 + for preserved_node in preserved_nodes:
17798 + soname = linkmap.getSoname(preserved_node)
17799 + for consumer_node in lib_graph.parent_nodes(preserved_node):
17800 + if consumer_node in preserved_nodes:
17801 + continue
17802 + if unmerge_no_replacement:
17803 + will_be_unmerged = True
17804 + for path in consumer_node.alt_paths:
17805 + if not self.isowner(path):
17806 + will_be_unmerged = False
17807 + break
17808 + if will_be_unmerged:
17809 + # This consumer is not preserved and it is
17810 + # being unmerged, so drop this edge.
17811 + lib_graph.remove_edge(preserved_node, consumer_node)
17812 + continue
17813 +
17814 + providers = provider_cache.get(consumer_node)
17815 + if providers is None:
17816 + providers = linkmap.findProviders(consumer_node)
17817 + provider_cache[consumer_node] = providers
17818 + providers = providers.get(soname)
17819 + if providers is None:
17820 + continue
17821 + for provider in providers:
17822 + if provider in preserved_paths:
17823 + continue
17824 + provider_node = path_to_node(provider)
17825 + if not provider_node.file_exists():
17826 + continue
17827 + if provider_node in preserved_nodes:
17828 + continue
17829 + # An alternative provider seems to be
17830 + # installed, so drop this edge.
17831 + lib_graph.remove_edge(preserved_node, consumer_node)
17832 + break
17833 +
17834 + cpv_lib_map = {}
17835 + while lib_graph:
17836 + root_nodes = preserved_nodes.intersection(lib_graph.root_nodes())
17837 + if not root_nodes:
17838 + break
17839 + lib_graph.difference_update(root_nodes)
17840 + unlink_list = set()
17841 + for node in root_nodes:
17842 + unlink_list.update(node.alt_paths)
17843 + unlink_list = sorted(unlink_list)
17844 + for obj in unlink_list:
17845 + cpv = path_cpv_map.get(obj)
17846 + if cpv is None:
17847 + # This means that a symlink is in the preserved libs
17848 + # registry, but the actual lib it points to is not.
17849 + self._display_merge(
17850 + _(
17851 + "!!! symlink to lib is preserved, "
17852 + "but not the lib itself:\n!!! '%s'\n"
17853 + )
17854 + % (obj,),
17855 + level=logging.ERROR,
17856 + noiselevel=-1,
17857 + )
17858 + continue
17859 + removed = cpv_lib_map.get(cpv)
17860 + if removed is None:
17861 + removed = set()
17862 + cpv_lib_map[cpv] = removed
17863 + removed.add(obj)
17864 +
17865 + return cpv_lib_map
17866 +
17867 + def _remove_preserved_libs(self, cpv_lib_map):
17868 + """
17869 + Remove files returned from _find_unused_preserved_libs().
17870 + """
17871 +
17872 + os = _os_merge
17873 +
17874 + files_to_remove = set()
17875 + for files in cpv_lib_map.values():
17876 + files_to_remove.update(files)
17877 + files_to_remove = sorted(files_to_remove)
17878 + showMessage = self._display_merge
17879 + root = self.settings["ROOT"]
17880 +
17881 + parent_dirs = set()
17882 + for obj in files_to_remove:
17883 + obj = os.path.join(root, obj.lstrip(os.sep))
17884 + parent_dirs.add(os.path.dirname(obj))
17885 + if os.path.islink(obj):
17886 + obj_type = _("sym")
17887 + else:
17888 + obj_type = _("obj")
17889 + try:
17890 + os.unlink(obj)
17891 + except OSError as e:
17892 + if e.errno != errno.ENOENT:
17893 + raise
17894 + del e
17895 + else:
17896 + showMessage(_("<<< !needed %s %s\n") % (obj_type, obj), noiselevel=-1)
17897 +
17898 + # Remove empty parent directories if possible.
17899 + while parent_dirs:
17900 + x = parent_dirs.pop()
17901 + while True:
17902 + try:
17903 + os.rmdir(x)
17904 + except OSError:
17905 + break
17906 + prev = x
17907 + x = os.path.dirname(x)
17908 + if x == prev:
17909 + break
17910 +
17911 + self.vartree.dbapi._plib_registry.pruneNonExisting()
17912 +
17913 + def _collision_protect(self, srcroot, destroot, mypkglist, file_list, symlink_list):
17914 +
17915 + os = _os_merge
17916 +
17917 + real_relative_paths = {}
17918 +
17919 + collision_ignore = []
17920 + for x in portage.util.shlex_split(self.settings.get("COLLISION_IGNORE", "")):
17921 + if os.path.isdir(os.path.join(self._eroot, x.lstrip(os.sep))):
17922 + x = normalize_path(x)
17923 + x += "/*"
17924 + collision_ignore.append(x)
17925 +
17926 + # For collisions with preserved libraries, the current package
17927 + # will assume ownership and the libraries will be unregistered.
17928 + if self.vartree.dbapi._plib_registry is None:
17929 + # preserve-libs is entirely disabled
17930 + plib_cpv_map = None
17931 + plib_paths = None
17932 + plib_inodes = {}
17933 + else:
17934 + plib_dict = self.vartree.dbapi._plib_registry.getPreservedLibs()
17935 + plib_cpv_map = {}
17936 + plib_paths = set()
17937 + for cpv, paths in plib_dict.items():
17938 + plib_paths.update(paths)
17939 + for f in paths:
17940 + plib_cpv_map[f] = cpv
17941 + plib_inodes = self._lstat_inode_map(plib_paths)
17942 +
17943 + plib_collisions = {}
17944 +
17945 + showMessage = self._display_merge
17946 + stopmerge = False
17947 + collisions = []
17948 + dirs = set()
17949 + dirs_ro = set()
17950 + symlink_collisions = []
17951 + destroot = self.settings["ROOT"]
17952 + totfiles = len(file_list) + len(symlink_list)
17953 + previous = time.monotonic()
17954 + progress_shown = False
17955 + report_interval = 1.7 # seconds
17956 + falign = len("%d" % totfiles)
17957 + showMessage(
17958 + _(" %s checking %d files for package collisions\n")
17959 + % (colorize("GOOD", "*"), totfiles)
17960 + )
17961 + for i, (f, f_type) in enumerate(
17962 + chain(((f, "reg") for f in file_list), ((f, "sym") for f in symlink_list))
17963 + ):
17964 + current = time.monotonic()
17965 + if current - previous > report_interval:
17966 + showMessage(
17967 + _("%3d%% done, %*d files remaining ...\n")
17968 + % (i * 100 / totfiles, falign, totfiles - i)
17969 + )
17970 + previous = current
17971 + progress_shown = True
17972 +
17973 + dest_path = normalize_path(os.path.join(destroot, f.lstrip(os.path.sep)))
17974 +
17975 + # Relative path with symbolic links resolved only in parent directories
17976 + real_relative_path = os.path.join(
17977 + os.path.realpath(os.path.dirname(dest_path)),
17978 + os.path.basename(dest_path),
17979 + )[len(destroot) :]
17980 +
17981 + real_relative_paths.setdefault(real_relative_path, []).append(
17982 + f.lstrip(os.path.sep)
17983 + )
17984 +
17985 + parent = os.path.dirname(dest_path)
17986 + if parent not in dirs:
17987 + for x in iter_parents(parent):
17988 + if x in dirs:
17989 + break
17990 + dirs.add(x)
17991 + if os.path.isdir(x):
17992 + if not os.access(x, os.W_OK):
17993 + dirs_ro.add(x)
17994 + break
17995 +
17996 + try:
17997 + dest_lstat = os.lstat(dest_path)
17998 + except EnvironmentError as e:
17999 + if e.errno == errno.ENOENT:
18000 + del e
18001 + continue
18002 + elif e.errno == errno.ENOTDIR:
18003 + del e
18004 + # A non-directory is in a location where this package
18005 + # expects to have a directory.
18006 + dest_lstat = None
18007 + parent_path = dest_path
18008 + while len(parent_path) > len(destroot):
18009 + parent_path = os.path.dirname(parent_path)
18010 + try:
18011 + dest_lstat = os.lstat(parent_path)
18012 + break
18013 + except EnvironmentError as e:
18014 + if e.errno != errno.ENOTDIR:
18015 + raise
18016 + del e
18017 + if not dest_lstat:
18018 + raise AssertionError(
18019 + "unable to find non-directory "
18020 + + "parent for '%s'" % dest_path
18021 + )
18022 + dest_path = parent_path
18023 + f = os.path.sep + dest_path[len(destroot) :]
18024 + if f in collisions:
18025 + continue
18026 + else:
18027 + raise
18028 + if f[0] != "/":
18029 + f = "/" + f
18030 +
18031 + if stat.S_ISDIR(dest_lstat.st_mode):
18032 + if f_type == "sym":
18033 + # This case is explicitly banned
18034 + # by PMS (see bug #326685).
18035 + symlink_collisions.append(f)
18036 + collisions.append(f)
18037 + continue
18038 +
18039 + plibs = plib_inodes.get((dest_lstat.st_dev, dest_lstat.st_ino))
18040 + if plibs:
18041 + for path in plibs:
18042 + cpv = plib_cpv_map[path]
18043 + paths = plib_collisions.get(cpv)
18044 + if paths is None:
18045 + paths = set()
18046 + plib_collisions[cpv] = paths
18047 + paths.add(path)
18048 + # The current package will assume ownership and the
18049 + # libraries will be unregistered, so exclude this
18050 + # path from the normal collisions.
18051 + continue
18052 +
18053 + isowned = False
18054 + full_path = os.path.join(destroot, f.lstrip(os.path.sep))
18055 + for ver in mypkglist:
18056 + if ver.isowner(f):
18057 + isowned = True
18058 + break
18059 + if not isowned and self.isprotected(full_path):
18060 + isowned = True
18061 + if not isowned:
18062 + f_match = full_path[len(self._eroot) - 1 :]
18063 + stopmerge = True
18064 + for pattern in collision_ignore:
18065 + if fnmatch.fnmatch(f_match, pattern):
18066 + stopmerge = False
18067 + break
18068 + if stopmerge:
18069 + collisions.append(f)
18070 +
18071 + internal_collisions = {}
18072 + for real_relative_path, files in real_relative_paths.items():
18073 + # Detect internal collisions between non-identical files.
18074 + if len(files) >= 2:
18075 + files.sort()
18076 + for i in range(len(files) - 1):
18077 + file1 = normalize_path(os.path.join(srcroot, files[i]))
18078 + file2 = normalize_path(os.path.join(srcroot, files[i + 1]))
18079 + # Compare files, ignoring differences in times.
18080 + differences = compare_files(
18081 + file1, file2, skipped_types=("atime", "mtime", "ctime")
18082 + )
18083 + if differences:
18084 + internal_collisions.setdefault(real_relative_path, {})[
18085 + (files[i], files[i + 1])
18086 + ] = differences
18087 +
18088 + if progress_shown:
18089 + showMessage(_("100% done\n"))
18090 +
18091 + return (
18092 + collisions,
18093 + internal_collisions,
18094 + dirs_ro,
18095 + symlink_collisions,
18096 + plib_collisions,
18097 + )
18098 +
18099 + def _lstat_inode_map(self, path_iter):
18100 + """
18101 + Use lstat to create a map of the form:
18102 + {(st_dev, st_ino) : set([path1, path2, ...])}
18103 + Multiple paths may reference the same inode due to hardlinks.
18104 + All lstat() calls are relative to self.myroot.
18105 + """
18106 +
18107 + os = _os_merge
18108 +
18109 + root = self.settings["ROOT"]
18110 + inode_map = {}
18111 + for f in path_iter:
18112 + path = os.path.join(root, f.lstrip(os.sep))
18113 + try:
18114 + st = os.lstat(path)
18115 + except OSError as e:
18116 + if e.errno not in (errno.ENOENT, errno.ENOTDIR):
18117 + raise
18118 + del e
18119 + continue
18120 + key = (st.st_dev, st.st_ino)
18121 + paths = inode_map.get(key)
18122 + if paths is None:
18123 + paths = set()
18124 + inode_map[key] = paths
18125 + paths.add(f)
18126 + return inode_map
18127 +
18128 + def _security_check(self, installed_instances):
18129 + if not installed_instances:
18130 + return 0
18131 +
18132 + os = _os_merge
18133 +
18134 + showMessage = self._display_merge
18135 +
18136 + file_paths = set()
18137 + for dblnk in installed_instances:
18138 + file_paths.update(dblnk.getcontents())
18139 + inode_map = {}
18140 + real_paths = set()
18141 + for i, path in enumerate(file_paths):
18142 +
18143 + if os is _os_merge:
18144 + try:
18145 + _unicode_encode(path, encoding=_encodings["merge"], errors="strict")
18146 + except UnicodeEncodeError:
18147 + # The package appears to have been merged with a
18148 + # different value of sys.getfilesystemencoding(),
18149 + # so fall back to utf_8 if appropriate.
18150 + try:
18151 + _unicode_encode(
18152 + path, encoding=_encodings["fs"], errors="strict"
18153 + )
18154 + except UnicodeEncodeError:
18155 + pass
18156 + else:
18157 + os = portage.os
18158 +
18159 + try:
18160 + s = os.lstat(path)
18161 + except OSError as e:
18162 + if e.errno not in (errno.ENOENT, errno.ENOTDIR):
18163 + raise
18164 + del e
18165 + continue
18166 + if not stat.S_ISREG(s.st_mode):
18167 + continue
18168 + path = os.path.realpath(path)
18169 + if path in real_paths:
18170 + continue
18171 + real_paths.add(path)
18172 + if s.st_nlink > 1 and s.st_mode & (stat.S_ISUID | stat.S_ISGID):
18173 + k = (s.st_dev, s.st_ino)
18174 + inode_map.setdefault(k, []).append((path, s))
18175 + suspicious_hardlinks = []
18176 + for path_list in inode_map.values():
18177 + path, s = path_list[0]
18178 + if len(path_list) == s.st_nlink:
18179 + # All hardlinks seem to be owned by this package.
18180 + continue
18181 + suspicious_hardlinks.append(path_list)
18182 + if not suspicious_hardlinks:
18183 + return 0
18184 +
18185 + msg = []
18186 + msg.append(_("suid/sgid file(s) " "with suspicious hardlink(s):"))
18187 + msg.append("")
18188 + for path_list in suspicious_hardlinks:
18189 + for path, s in path_list:
18190 + msg.append("\t%s" % path)
18191 + msg.append("")
18192 + msg.append(
18193 + _("See the Gentoo Security Handbook " "guide for advice on how to proceed.")
18194 + )
18195 +
18196 + self._eerror("preinst", msg)
18197 +
18198 + return 1
18199 +
18200 + def _eqawarn(self, phase, lines):
18201 + self._elog("eqawarn", phase, lines)
18202 +
18203 + def _eerror(self, phase, lines):
18204 + self._elog("eerror", phase, lines)
18205 +
18206 + def _elog(self, funcname, phase, lines):
18207 + func = getattr(portage.elog.messages, funcname)
18208 + if self._scheduler is None:
18209 + for l in lines:
18210 + func(l, phase=phase, key=self.mycpv)
18211 + else:
18212 + background = self.settings.get("PORTAGE_BACKGROUND") == "1"
18213 + log_path = None
18214 + if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
18215 + log_path = self.settings.get("PORTAGE_LOG_FILE")
18216 + out = io.StringIO()
18217 + for line in lines:
18218 + func(line, phase=phase, key=self.mycpv, out=out)
18219 + msg = out.getvalue()
18220 + self._scheduler.output(msg, background=background, log_path=log_path)
18221 +
18222 + def _elog_process(self, phasefilter=None):
18223 + cpv = self.mycpv
18224 + if self._pipe is None:
18225 + elog_process(cpv, self.settings, phasefilter=phasefilter)
18226 + else:
18227 + logdir = os.path.join(self.settings["T"], "logging")
18228 + ebuild_logentries = collect_ebuild_messages(logdir)
18229 + # phasefilter is irrelevant for the above collect_ebuild_messages
18230 + # call, since this package instance has a private logdir. However,
18231 + # it may be relevant for the following collect_messages call.
18232 + py_logentries = collect_messages(key=cpv, phasefilter=phasefilter).get(
18233 + cpv, {}
18234 + )
18235 + logentries = _merge_logentries(py_logentries, ebuild_logentries)
18236 + funcnames = {
18237 + "INFO": "einfo",
18238 + "LOG": "elog",
18239 + "WARN": "ewarn",
18240 + "QA": "eqawarn",
18241 + "ERROR": "eerror",
18242 + }
18243 + str_buffer = []
18244 + for phase, messages in logentries.items():
18245 + for key, lines in messages:
18246 + funcname = funcnames[key]
18247 + if isinstance(lines, str):
18248 + lines = [lines]
18249 + for line in lines:
18250 + for line in line.split("\n"):
18251 + fields = (funcname, phase, cpv, line)
18252 + str_buffer.append(" ".join(fields))
18253 + str_buffer.append("\n")
18254 + if str_buffer:
18255 + str_buffer = _unicode_encode("".join(str_buffer))
18256 + while str_buffer:
18257 + str_buffer = str_buffer[os.write(self._pipe, str_buffer) :]
18258 +
18259 + def _emerge_log(self, msg):
18260 + emergelog(False, msg)
18261 +
18262 + def treewalk(
18263 + self,
18264 + srcroot,
18265 + destroot,
18266 + inforoot,
18267 + myebuild,
18268 + cleanup=0,
18269 + mydbapi=None,
18270 + prev_mtimes=None,
18271 + counter=None,
18272 + ):
18273 + """
18274 +
18275 + This function does the following:
18276 +
18277 + calls doebuild(mydo=instprep)
18278 + calls get_ro_checker to retrieve a function for checking whether Portage
18279 + will write to a read-only filesystem, then runs it against the directory list
18280 + calls self._preserve_libs if FEATURES=preserve-libs
18281 + calls self._collision_protect if FEATURES=collision-protect
18282 + calls doebuild(mydo=pkg_preinst)
18283 + Merges the package to the livefs
18284 + unmerges old version (if required)
18285 + calls doebuild(mydo=pkg_postinst)
18286 + calls env_update
18287 +
18288 + @param srcroot: Typically this is ${D}
18289 + @type srcroot: String (Path)
18290 + @param destroot: ignored, self.settings['ROOT'] is used instead
18291 + @type destroot: String (Path)
18292 + @param inforoot: root of the vardb entry ?
18293 + @type inforoot: String (Path)
18294 + @param myebuild: path to the ebuild that we are processing
18295 + @type myebuild: String (Path)
18296 + @param mydbapi: dbapi which is handed to doebuild.
18297 + @type mydbapi: portdbapi instance
18298 + @param prev_mtimes: { Filename:mtime } mapping for env_update
18299 + @type prev_mtimes: Dictionary
18300 + @rtype: Boolean
18301 + @return:
18302 + 1. 0 on success
18303 + 2. 1 on failure
18304 +
18305 + secondhand is a list of symlinks that have been skipped due to their target
18306 + not existing; we will merge these symlinks at a later time.
18307 + """
18308 +
18309 + os = _os_merge
18310 +
18311 + srcroot = _unicode_decode(
18312 + srcroot, encoding=_encodings["content"], errors="strict"
18313 + )
18314 + destroot = self.settings["ROOT"]
18315 + inforoot = _unicode_decode(
18316 + inforoot, encoding=_encodings["content"], errors="strict"
18317 + )
18318 + myebuild = _unicode_decode(
18319 + myebuild, encoding=_encodings["content"], errors="strict"
18320 + )
18321 +
18322 + showMessage = self._display_merge
18323 + srcroot = normalize_path(srcroot).rstrip(os.path.sep) + os.path.sep
18324 +
18325 + if not os.path.isdir(srcroot):
18326 + showMessage(
18327 + _("!!! Directory Not Found: D='%s'\n") % srcroot,
18328 + level=logging.ERROR,
18329 + noiselevel=-1,
18330 + )
18331 + return 1
18332 +
18333 + # run instprep internal phase
18334 + doebuild_environment(myebuild, "instprep", settings=self.settings, db=mydbapi)
18335 + phase = EbuildPhase(
18336 + background=False,
18337 + phase="instprep",
18338 + scheduler=self._scheduler,
18339 + settings=self.settings,
18340 + )
18341 + phase.start()
18342 + if phase.wait() != os.EX_OK:
18343 + showMessage(_("!!! instprep failed\n"), level=logging.ERROR, noiselevel=-1)
18344 + return 1
18345 +
18346 + is_binpkg = self.settings.get("EMERGE_FROM") == "binary"
18347 + slot = ""
18348 + for var_name in ("CHOST", "SLOT"):
18349 + try:
18350 + with io.open(
18351 + _unicode_encode(
18352 + os.path.join(inforoot, var_name),
18353 + encoding=_encodings["fs"],
18354 + errors="strict",
18355 + ),
18356 + mode="r",
18357 + encoding=_encodings["repo.content"],
18358 + errors="replace",
18359 + ) as f:
18360 + val = f.readline().strip()
18361 + except EnvironmentError as e:
18362 + if e.errno != errno.ENOENT:
18363 + raise
18364 + del e
18365 + val = ""
18366 +
18367 + if var_name == "SLOT":
18368 + slot = val
18369 +
18370 + if not slot.strip():
18371 + slot = self.settings.get(var_name, "")
18372 + if not slot.strip():
18373 + showMessage(
18374 + _("!!! SLOT is undefined\n"),
18375 + level=logging.ERROR,
18376 + noiselevel=-1,
18377 + )
18378 + return 1
18379 + write_atomic(os.path.join(inforoot, var_name), slot + "\n")
18380 +
18381 + # This check only applies when built from source, since
18382 + # inforoot values are written just after src_install.
18383 + if not is_binpkg and val != self.settings.get(var_name, ""):
18384 + self._eqawarn(
18385 + "preinst",
18386 + [
18387 + _(
18388 + "QA Notice: Expected %(var_name)s='%(expected_value)s', got '%(actual_value)s'\n"
18389 + )
18390 + % {
18391 + "var_name": var_name,
18392 + "expected_value": self.settings.get(var_name, ""),
18393 + "actual_value": val,
18394 + }
18395 + ],
18396 + )
18397 +
18398 + def eerror(lines):
18399 + self._eerror("preinst", lines)
18400 +
18401 + if not os.path.exists(self.dbcatdir):
18402 + ensure_dirs(self.dbcatdir)
18403 +
18404 + # NOTE: We use SLOT obtained from the inforoot
18405 + # directory, in order to support USE=multislot.
18406 + # Use _pkg_str discard the sub-slot part if necessary.
18407 + slot = _pkg_str(self.mycpv, slot=slot).slot
18408 + cp = self.mysplit[0]
18409 + slot_atom = "%s:%s" % (cp, slot)
18410 +
18411 + self.lockdb()
18412 + try:
18413 + # filter any old-style virtual matches
18414 + slot_matches = [
18415 + cpv
18416 + for cpv in self.vartree.dbapi.match(slot_atom)
18417 + if cpv_getkey(cpv) == cp
18418 + ]
18419 +
18420 + if self.mycpv not in slot_matches and self.vartree.dbapi.cpv_exists(
18421 + self.mycpv
18422 + ):
18423 + # handle multislot or unapplied slotmove
18424 + slot_matches.append(self.mycpv)
18425 +
18426 + others_in_slot = []
18427 + for cur_cpv in slot_matches:
18428 + # Clone the config in case one of these has to be unmerged,
18429 + # since we need it to have private ${T} etc... for things
18430 + # like elog.
18431 + settings_clone = portage.config(clone=self.settings)
18432 + # This reset ensures that there is no unintended leakage
18433 + # of variables which should not be shared.
18434 + settings_clone.reset()
18435 + settings_clone.setcpv(cur_cpv, mydb=self.vartree.dbapi)
18436 + if (
18437 + self._preserve_libs
18438 + and "preserve-libs" in settings_clone["PORTAGE_RESTRICT"].split()
18439 + ):
18440 + self._preserve_libs = False
18441 + others_in_slot.append(
18442 + dblink(
18443 + self.cat,
18444 + catsplit(cur_cpv)[1],
18445 + settings=settings_clone,
18446 + vartree=self.vartree,
18447 + treetype="vartree",
18448 + scheduler=self._scheduler,
18449 + pipe=self._pipe,
18450 + )
18451 + )
18452 + finally:
18453 + self.unlockdb()
18454 +
18455 + # If any instance has RESTRICT=preserve-libs, then
18456 + # restrict it for all instances.
18457 + if not self._preserve_libs:
18458 + for dblnk in others_in_slot:
18459 + dblnk._preserve_libs = False
18460 +
18461 + retval = self._security_check(others_in_slot)
18462 + if retval:
18463 + return retval
18464 +
18465 + if slot_matches:
18466 + # Used by self.isprotected().
18467 + max_dblnk = None
18468 + max_counter = -1
18469 + for dblnk in others_in_slot:
18470 + cur_counter = self.vartree.dbapi.cpv_counter(dblnk.mycpv)
18471 + if cur_counter > max_counter:
18472 + max_counter = cur_counter
18473 + max_dblnk = dblnk
18474 + self._installed_instance = max_dblnk
18475 +
18476 + # Apply INSTALL_MASK before collision-protect, since it may
18477 + # be useful to avoid collisions in some scenarios.
18478 + # We cannot detect if this is needed or not here as INSTALL_MASK can be
18479 + # modified by bashrc files.
18480 + phase = MiscFunctionsProcess(
18481 + background=False,
18482 + commands=["preinst_mask"],
18483 + phase="preinst",
18484 + scheduler=self._scheduler,
18485 + settings=self.settings,
18486 + )
18487 + phase.start()
18488 + phase.wait()
18489 + try:
18490 + with io.open(
18491 + _unicode_encode(
18492 + os.path.join(inforoot, "INSTALL_MASK"),
18493 + encoding=_encodings["fs"],
18494 + errors="strict",
18495 + ),
18496 + mode="r",
18497 + encoding=_encodings["repo.content"],
18498 + errors="replace",
18499 + ) as f:
18500 + install_mask = InstallMask(f.read())
18501 + except EnvironmentError:
18502 + install_mask = None
18503 +
18504 + if install_mask:
18505 + install_mask_dir(self.settings["ED"], install_mask)
18506 + if any(x in self.settings.features for x in ("nodoc", "noman", "noinfo")):
18507 + try:
18508 + os.rmdir(os.path.join(self.settings["ED"], "usr", "share"))
18509 + except OSError:
18510 + pass
18511 +
18512 + # We check for unicode encoding issues after src_install. However,
18513 + # the check must be repeated here for binary packages (it's
18514 + # inexpensive since we call os.walk() here anyway).
18515 + unicode_errors = []
18516 + line_ending_re = re.compile("[\n\r]")
18517 + srcroot_len = len(srcroot)
18518 + ed_len = len(self.settings["ED"])
18519 + eprefix_len = len(self.settings["EPREFIX"])
18520 +
18521 + while True:
18522 +
18523 + unicode_error = False
18524 + eagain_error = False
18525 +
18526 + filelist = []
18527 + linklist = []
18528 + paths_with_newlines = []
18529 +
18530 + def onerror(e):
18531 + raise
18532 +
18533 + walk_iter = os.walk(srcroot, onerror=onerror)
18534 + while True:
18535 + try:
18536 + parent, dirs, files = next(walk_iter)
18537 + except StopIteration:
18538 + break
18539 + except OSError as e:
18540 + if e.errno != errno.EAGAIN:
18541 + raise
18542 + # Observed with PyPy 1.8.
18543 + eagain_error = True
18544 + break
18545 +
18546 + try:
18547 + parent = _unicode_decode(
18548 + parent, encoding=_encodings["merge"], errors="strict"
18549 + )
18550 + except UnicodeDecodeError:
18551 + new_parent = _unicode_decode(
18552 + parent, encoding=_encodings["merge"], errors="replace"
18553 + )
18554 + new_parent = _unicode_encode(
18555 + new_parent, encoding="ascii", errors="backslashreplace"
18556 + )
18557 + new_parent = _unicode_decode(
18558 + new_parent, encoding=_encodings["merge"], errors="replace"
18559 + )
18560 + os.rename(parent, new_parent)
18561 + unicode_error = True
18562 + unicode_errors.append(new_parent[ed_len:])
18563 + break
18564 +
18565 + for fname in files:
18566 + try:
18567 + fname = _unicode_decode(
18568 + fname, encoding=_encodings["merge"], errors="strict"
18569 + )
18570 + except UnicodeDecodeError:
18571 + fpath = portage._os.path.join(
18572 + parent.encode(_encodings["merge"]), fname
18573 + )
18574 + new_fname = _unicode_decode(
18575 + fname, encoding=_encodings["merge"], errors="replace"
18576 + )
18577 + new_fname = _unicode_encode(
18578 + new_fname, encoding="ascii", errors="backslashreplace"
18579 + )
18580 + new_fname = _unicode_decode(
18581 + new_fname, encoding=_encodings["merge"], errors="replace"
18582 + )
18583 + new_fpath = os.path.join(parent, new_fname)
18584 + os.rename(fpath, new_fpath)
18585 + unicode_error = True
18586 + unicode_errors.append(new_fpath[ed_len:])
18587 + fname = new_fname
18588 + fpath = new_fpath
18589 + else:
18590 + fpath = os.path.join(parent, fname)
18591 +
18592 + relative_path = fpath[srcroot_len:]
18593 +
18594 + if line_ending_re.search(relative_path) is not None:
18595 + paths_with_newlines.append(relative_path)
18596 +
18597 + file_mode = os.lstat(fpath).st_mode
18598 + if stat.S_ISREG(file_mode):
18599 + filelist.append(relative_path)
18600 + elif stat.S_ISLNK(file_mode):
18601 + # Note: os.walk puts symlinks to directories in the "dirs"
18602 + # list and it does not traverse them since that could lead
18603 + # to an infinite recursion loop.
18604 + linklist.append(relative_path)
18605 +
18606 + myto = _unicode_decode(
18607 + _os.readlink(
18608 + _unicode_encode(
18609 + fpath, encoding=_encodings["merge"], errors="strict"
18610 + )
18611 + ),
18612 + encoding=_encodings["merge"],
18613 + errors="replace",
18614 + )
18615 + if line_ending_re.search(myto) is not None:
18616 + paths_with_newlines.append(relative_path)
18617 +
18618 + if unicode_error:
18619 + break
18620 +
18621 + if not (unicode_error or eagain_error):
18622 + break
18623 +
18624 + if unicode_errors:
18625 + self._elog("eqawarn", "preinst", _merge_unicode_error(unicode_errors))
18626 +
18627 + if paths_with_newlines:
18628 + msg = []
18629 + msg.append(
18630 + _(
18631 + "This package installs one or more files containing line ending characters:"
18632 + )
18633 + )
18634 + msg.append("")
18635 + paths_with_newlines.sort()
18636 + for f in paths_with_newlines:
18637 + msg.append("\t/%s" % (f.replace("\n", "\\n").replace("\r", "\\r")))
18638 + msg.append("")
18639 + msg.append(_("package %s NOT merged") % self.mycpv)
18640 + msg.append("")
18641 + eerror(msg)
18642 + return 1
18643 +
18644 + # If there are no files to merge, and an installed package in the same
18645 + # slot has files, it probably means that something went wrong.
18646 + if (
18647 + self.settings.get("PORTAGE_PACKAGE_EMPTY_ABORT") == "1"
18648 + and not filelist
18649 + and not linklist
18650 + and others_in_slot
18651 + ):
18652 + installed_files = None
18653 + for other_dblink in others_in_slot:
18654 + installed_files = other_dblink.getcontents()
18655 + if not installed_files:
18656 + continue
18657 + from textwrap import wrap
18658 +
18659 + wrap_width = 72
18660 + msg = []
18661 + d = {"new_cpv": self.mycpv, "old_cpv": other_dblink.mycpv}
18662 + msg.extend(
18663 + wrap(
18664 + _(
18665 + "The '%(new_cpv)s' package will not install "
18666 + "any files, but the currently installed '%(old_cpv)s'"
18667 + " package has the following files: "
18668 + )
18669 + % d,
18670 + wrap_width,
18671 + )
18672 + )
18673 + msg.append("")
18674 + msg.extend(sorted(installed_files))
18675 + msg.append("")
18676 + msg.append(_("package %s NOT merged") % self.mycpv)
18677 + msg.append("")
18678 + msg.extend(
18679 + wrap(
18680 + _(
18681 + "Manually run `emerge --unmerge =%s` if you "
18682 + "really want to remove the above files. Set "
18683 + 'PORTAGE_PACKAGE_EMPTY_ABORT="0" in '
18684 + "/etc/portage/make.conf if you do not want to "
18685 + "abort in cases like this."
18686 + )
18687 + % other_dblink.mycpv,
18688 + wrap_width,
18689 + )
18690 + )
18691 + eerror(msg)
18692 + if installed_files:
18693 + return 1
18694 +
18695 + # Make sure the ebuild environment is initialized and that ${T}/elog
18696 + # exists for logging of collision-protect eerror messages.
18697 + if myebuild is None:
18698 + myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
18699 + doebuild_environment(myebuild, "preinst", settings=self.settings, db=mydbapi)
18700 + self.settings["REPLACING_VERSIONS"] = " ".join(
18701 + [portage.versions.cpv_getversion(other.mycpv) for other in others_in_slot]
18702 + )
18703 + prepare_build_dirs(settings=self.settings, cleanup=cleanup)
18704 +
18705 + # check for package collisions
18706 + blockers = []
18707 + for blocker in self._blockers or []:
18708 + blocker = self.vartree.dbapi._dblink(blocker.cpv)
18709 + # It may have been unmerged before lock(s)
18710 + # were aquired.
18711 + if blocker.exists():
18712 + blockers.append(blocker)
18713 +
18714 + (
18715 + collisions,
18716 + internal_collisions,
18717 + dirs_ro,
18718 + symlink_collisions,
18719 + plib_collisions,
18720 + ) = self._collision_protect(
18721 + srcroot, destroot, others_in_slot + blockers, filelist, linklist
18722 + )
18723 +
18724 + # Check for read-only filesystems.
18725 + ro_checker = get_ro_checker()
18726 + rofilesystems = ro_checker(dirs_ro)
18727 +
18728 + if rofilesystems:
18729 + msg = _(
18730 + "One or more files installed to this package are "
18731 + "set to be installed to read-only filesystems. "
18732 + "Please mount the following filesystems as read-write "
18733 + "and retry."
18734 + )
18735 + msg = textwrap.wrap(msg, 70)
18736 + msg.append("")
18737 + for f in rofilesystems:
18738 + msg.append("\t%s" % f)
18739 + msg.append("")
18740 + self._elog("eerror", "preinst", msg)
18741 +
18742 + msg = (
18743 + _("Package '%s' NOT merged due to read-only file systems.")
18744 + % self.settings.mycpv
18745 + )
18746 + msg += _(
18747 + " If necessary, refer to your elog "
18748 + "messages for the whole content of the above message."
18749 + )
18750 + msg = textwrap.wrap(msg, 70)
18751 + eerror(msg)
18752 + return 1
18753 +
18754 + if internal_collisions:
18755 + msg = (
18756 + _(
18757 + "Package '%s' has internal collisions between non-identical files "
18758 + "(located in separate directories in the installation image (${D}) "
18759 + "corresponding to merged directories in the target "
18760 + "filesystem (${ROOT})):"
18761 + )
18762 + % self.settings.mycpv
18763 + )
18764 + msg = textwrap.wrap(msg, 70)
18765 + msg.append("")
18766 + for k, v in sorted(internal_collisions.items(), key=operator.itemgetter(0)):
18767 + msg.append("\t%s" % os.path.join(destroot, k.lstrip(os.path.sep)))
18768 + for (file1, file2), differences in sorted(v.items()):
18769 + msg.append(
18770 + "\t\t%s" % os.path.join(destroot, file1.lstrip(os.path.sep))
18771 + )
18772 + msg.append(
18773 + "\t\t%s" % os.path.join(destroot, file2.lstrip(os.path.sep))
18774 + )
18775 + msg.append("\t\t\tDifferences: %s" % ", ".join(differences))
18776 + msg.append("")
18777 + self._elog("eerror", "preinst", msg)
18778 +
18779 + msg = (
18780 + _(
18781 + "Package '%s' NOT merged due to internal collisions "
18782 + "between non-identical files."
18783 + )
18784 + % self.settings.mycpv
18785 + )
18786 + msg += _(
18787 + " If necessary, refer to your elog messages for the whole "
18788 + "content of the above message."
18789 + )
18790 + eerror(textwrap.wrap(msg, 70))
18791 + return 1
18792 +
18793 + if symlink_collisions:
18794 + # Symlink collisions need to be distinguished from other types
18795 + # of collisions, in order to avoid confusion (see bug #409359).
18796 + msg = _(
18797 + "Package '%s' has one or more collisions "
18798 + "between symlinks and directories, which is explicitly "
18799 + "forbidden by PMS section 13.4 (see bug #326685):"
18800 + ) % (self.settings.mycpv,)
18801 + msg = textwrap.wrap(msg, 70)
18802 + msg.append("")
18803 + for f in symlink_collisions:
18804 + msg.append("\t%s" % os.path.join(destroot, f.lstrip(os.path.sep)))
18805 + msg.append("")
18806 + self._elog("eerror", "preinst", msg)
18807 +
18808 + if collisions:
18809 + collision_protect = "collision-protect" in self.settings.features
18810 + protect_owned = "protect-owned" in self.settings.features
18811 + msg = _(
18812 + "This package will overwrite one or more files that"
18813 + " may belong to other packages (see list below)."
18814 + )
18815 + if not (collision_protect or protect_owned):
18816 + msg += _(
18817 + ' Add either "collision-protect" or'
18818 + ' "protect-owned" to FEATURES in'
18819 + " make.conf if you would like the merge to abort"
18820 + " in cases like this. See the make.conf man page for"
18821 + " more information about these features."
18822 + )
18823 + if self.settings.get("PORTAGE_QUIET") != "1":
18824 + msg += _(
18825 + " You can use a command such as"
18826 + " `portageq owners / <filename>` to identify the"
18827 + " installed package that owns a file. If portageq"
18828 + " reports that only one package owns a file then do NOT"
18829 + " file a bug report. A bug report is only useful if it"
18830 + " identifies at least two or more packages that are known"
18831 + " to install the same file(s)."
18832 + " If a collision occurs and you"
18833 + " can not explain where the file came from then you"
18834 + " should simply ignore the collision since there is not"
18835 + " enough information to determine if a real problem"
18836 + " exists. Please do NOT file a bug report at"
18837 + " https://bugs.gentoo.org/ unless you report exactly which"
18838 + " two packages install the same file(s). See"
18839 + " https://wiki.gentoo.org/wiki/Knowledge_Base:Blockers"
18840 + " for tips on how to solve the problem. And once again,"
18841 + " please do NOT file a bug report unless you have"
18842 + " completely understood the above message."
18843 + )
18844 +
18845 + self.settings["EBUILD_PHASE"] = "preinst"
18846 + from textwrap import wrap
18847 +
18848 + msg = wrap(msg, 70)
18849 + if collision_protect:
18850 + msg.append("")
18851 + msg.append(_("package %s NOT merged") % self.settings.mycpv)
18852 + msg.append("")
18853 + msg.append(_("Detected file collision(s):"))
18854 + msg.append("")
18855 +
18856 + for f in collisions:
18857 + msg.append("\t%s" % os.path.join(destroot, f.lstrip(os.path.sep)))
18858 +
18859 + eerror(msg)
18860 +
18861 + owners = None
18862 + if collision_protect or protect_owned or symlink_collisions:
18863 + msg = []
18864 + msg.append("")
18865 + msg.append(
18866 + _("Searching all installed" " packages for file collisions...")
18867 + )
18868 + msg.append("")
18869 + msg.append(_("Press Ctrl-C to Stop"))
18870 + msg.append("")
18871 + eerror(msg)
18872 +
18873 + if len(collisions) > 20:
18874 + # get_owners is slow for large numbers of files, so
18875 + # don't look them all up.
18876 + collisions = collisions[:20]
18877 +
18878 + pkg_info_strs = {}
18879 + self.lockdb()
18880 + try:
18881 + owners = self.vartree.dbapi._owners.get_owners(collisions)
18882 + self.vartree.dbapi.flush_cache()
18883 +
18884 + for pkg in owners:
18885 + pkg = self.vartree.dbapi._pkg_str(pkg.mycpv, None)
18886 + pkg_info_str = "%s%s%s" % (pkg, _slot_separator, pkg.slot)
18887 + if pkg.repo != _unknown_repo:
18888 + pkg_info_str += "%s%s" % (_repo_separator, pkg.repo)
18889 + pkg_info_strs[pkg] = pkg_info_str
18890 +
18891 + finally:
18892 + self.unlockdb()
18893 +
18894 + for pkg, owned_files in owners.items():
18895 + msg = []
18896 + msg.append(pkg_info_strs[pkg.mycpv])
18897 + for f in sorted(owned_files):
18898 + msg.append(
18899 + "\t%s" % os.path.join(destroot, f.lstrip(os.path.sep))
18900 + )
18901 + msg.append("")
18902 + eerror(msg)
18903 +
18904 + if not owners:
18905 + eerror(
18906 + [_("None of the installed" " packages claim the file(s)."), ""]
18907 + )
18908 +
18909 + symlink_abort_msg = _(
18910 + "Package '%s' NOT merged since it has "
18911 + "one or more collisions between symlinks and directories, "
18912 + "which is explicitly forbidden by PMS section 13.4 "
18913 + "(see bug #326685)."
18914 + )
18915 +
18916 + # The explanation about the collision and how to solve
18917 + # it may not be visible via a scrollback buffer, especially
18918 + # if the number of file collisions is large. Therefore,
18919 + # show a summary at the end.
18920 + abort = False
18921 + if symlink_collisions:
18922 + abort = True
18923 + msg = symlink_abort_msg % (self.settings.mycpv,)
18924 + elif collision_protect:
18925 + abort = True
18926 + msg = (
18927 + _("Package '%s' NOT merged due to file collisions.")
18928 + % self.settings.mycpv
18929 + )
18930 + elif protect_owned and owners:
18931 + abort = True
18932 + msg = (
18933 + _("Package '%s' NOT merged due to file collisions.")
18934 + % self.settings.mycpv
18935 + )
18936 + else:
18937 + msg = (
18938 + _("Package '%s' merged despite file collisions.")
18939 + % self.settings.mycpv
18940 + )
18941 + msg += _(
18942 + " If necessary, refer to your elog "
18943 + "messages for the whole content of the above message."
18944 + )
18945 + eerror(wrap(msg, 70))
18946 +
18947 + if abort:
18948 + return 1
18949 +
18950 + # The merge process may move files out of the image directory,
18951 + # which causes invalidation of the .installed flag.
18952 + try:
18953 + os.unlink(
18954 + os.path.join(os.path.dirname(normalize_path(srcroot)), ".installed")
18955 + )
18956 + except OSError as e:
18957 + if e.errno != errno.ENOENT:
18958 + raise
18959 + del e
18960 +
18961 + self.dbdir = self.dbtmpdir
18962 + self.delete()
18963 + ensure_dirs(self.dbtmpdir)
18964 +
18965 + downgrade = False
18966 + if (
18967 + self._installed_instance is not None
18968 + and vercmp(self.mycpv.version, self._installed_instance.mycpv.version) < 0
18969 + ):
18970 + downgrade = True
18971 +
18972 + if self._installed_instance is not None:
18973 + rval = self._pre_merge_backup(self._installed_instance, downgrade)
18974 + if rval != os.EX_OK:
18975 + showMessage(
18976 + _("!!! FAILED preinst: ") + "quickpkg: %s\n" % rval,
18977 + level=logging.ERROR,
18978 + noiselevel=-1,
18979 + )
18980 + return rval
18981 +
18982 + # run preinst script
18983 + showMessage(
18984 + _(">>> Merging %(cpv)s to %(destroot)s\n")
18985 + % {"cpv": self.mycpv, "destroot": destroot}
18986 + )
18987 + phase = EbuildPhase(
18988 + background=False,
18989 + phase="preinst",
18990 + scheduler=self._scheduler,
18991 + settings=self.settings,
18992 + )
18993 + phase.start()
18994 + a = phase.wait()
18995 +
18996 + # XXX: Decide how to handle failures here.
18997 + if a != os.EX_OK:
18998 + showMessage(
18999 + _("!!! FAILED preinst: ") + str(a) + "\n",
19000 + level=logging.ERROR,
19001 + noiselevel=-1,
19002 + )
19003 + return a
19004 +
19005 + # copy "info" files (like SLOT, CFLAGS, etc.) into the database
19006 + for x in os.listdir(inforoot):
19007 + self.copyfile(inforoot + "/" + x)
19008 +
19009 + # write local package counter for recording
19010 + if counter is None:
19011 + counter = self.vartree.dbapi.counter_tick(mycpv=self.mycpv)
19012 + with io.open(
19013 + _unicode_encode(
19014 + os.path.join(self.dbtmpdir, "COUNTER"),
19015 + encoding=_encodings["fs"],
19016 + errors="strict",
19017 + ),
19018 + mode="w",
19019 + encoding=_encodings["repo.content"],
19020 + errors="backslashreplace",
19021 + ) as f:
19022 + f.write("%s" % counter)
19023 +
19024 + self.updateprotect()
19025 +
19026 + # if we have a file containing previously-merged config file md5sums, grab it.
19027 + self.vartree.dbapi._fs_lock()
19028 + try:
19029 + # This prunes any libraries from the registry that no longer
19030 + # exist on disk, in case they have been manually removed.
19031 + # This has to be done prior to merge, since after merge it
19032 + # is non-trivial to distinguish these files from files
19033 + # that have just been merged.
19034 + plib_registry = self.vartree.dbapi._plib_registry
19035 + if plib_registry:
19036 + plib_registry.lock()
19037 + try:
19038 + plib_registry.load()
19039 + plib_registry.store()
19040 + finally:
19041 + plib_registry.unlock()
19042 +
19043 + # Always behave like --noconfmem is enabled for downgrades
19044 + # so that people who don't know about this option are less
19045 + # likely to get confused when doing upgrade/downgrade cycles.
19046 + cfgfiledict = grabdict(self.vartree.dbapi._conf_mem_file)
19047 + if "NOCONFMEM" in self.settings or downgrade:
19048 + cfgfiledict["IGNORE"] = 1
19049 + else:
19050 + cfgfiledict["IGNORE"] = 0
19051 +
19052 + rval = self._merge_contents(srcroot, destroot, cfgfiledict)
19053 + if rval != os.EX_OK:
19054 + return rval
19055 + finally:
19056 + self.vartree.dbapi._fs_unlock()
19057 +
19058 + # These caches are populated during collision-protect and the data
19059 + # they contain is now invalid. It's very important to invalidate
19060 + # the contents_inodes cache so that FEATURES=unmerge-orphans
19061 + # doesn't unmerge anything that belongs to this package that has
19062 + # just been merged.
19063 + for dblnk in others_in_slot:
19064 + dblnk._clear_contents_cache()
19065 + self._clear_contents_cache()
19066 +
19067 + linkmap = self.vartree.dbapi._linkmap
19068 + plib_registry = self.vartree.dbapi._plib_registry
19069 + # We initialize preserve_paths to an empty set rather
19070 + # than None here because it plays an important role
19071 + # in prune_plib_registry logic by serving to indicate
19072 + # that we have a replacement for a package that's
19073 + # being unmerged.
19074 +
19075 + preserve_paths = set()
19076 + needed = None
19077 + if not (self._linkmap_broken or linkmap is None or plib_registry is None):
19078 + self.vartree.dbapi._fs_lock()
19079 + plib_registry.lock()
19080 + try:
19081 + plib_registry.load()
19082 + needed = os.path.join(inforoot, linkmap._needed_aux_key)
19083 + self._linkmap_rebuild(include_file=needed)
19084 +
19085 + # Preserve old libs if they are still in use
19086 + # TODO: Handle cases where the previous instance
19087 + # has already been uninstalled but it still has some
19088 + # preserved libraries in the registry that we may
19089 + # want to preserve here.
19090 + preserve_paths = self._find_libs_to_preserve()
19091 + finally:
19092 + plib_registry.unlock()
19093 + self.vartree.dbapi._fs_unlock()
19094 +
19095 + if preserve_paths:
19096 + self._add_preserve_libs_to_contents(preserve_paths)
19097 +
19098 + # If portage is reinstalling itself, remove the old
19099 + # version now since we want to use the temporary
19100 + # PORTAGE_BIN_PATH that will be removed when we return.
19101 + reinstall_self = False
19102 + if self.myroot == "/" and match_from_list(PORTAGE_PACKAGE_ATOM, [self.mycpv]):
19103 + reinstall_self = True
19104 +
19105 + emerge_log = self._emerge_log
19106 +
19107 + # If we have any preserved libraries then autoclean
19108 + # is forced so that preserve-libs logic doesn't have
19109 + # to account for the additional complexity of the
19110 + # AUTOCLEAN=no mode.
19111 + autoclean = self.settings.get("AUTOCLEAN", "yes") == "yes" or preserve_paths
19112 +
19113 + if autoclean:
19114 + emerge_log(_(" >>> AUTOCLEAN: %s") % (slot_atom,))
19115 +
19116 + others_in_slot.append(self) # self has just been merged
19117 + for dblnk in list(others_in_slot):
19118 + if dblnk is self:
19119 + continue
19120 + if not (autoclean or dblnk.mycpv == self.mycpv or reinstall_self):
19121 + continue
19122 + showMessage(_(">>> Safely unmerging already-installed instance...\n"))
19123 + emerge_log(_(" === Unmerging... (%s)") % (dblnk.mycpv,))
19124 + others_in_slot.remove(dblnk) # dblnk will unmerge itself now
19125 + dblnk._linkmap_broken = self._linkmap_broken
19126 + dblnk.settings["REPLACED_BY_VERSION"] = portage.versions.cpv_getversion(
19127 + self.mycpv
19128 + )
19129 + dblnk.settings.backup_changes("REPLACED_BY_VERSION")
19130 + unmerge_rval = dblnk.unmerge(
19131 + ldpath_mtimes=prev_mtimes,
19132 + others_in_slot=others_in_slot,
19133 + needed=needed,
19134 + preserve_paths=preserve_paths,
19135 + )
19136 + dblnk.settings.pop("REPLACED_BY_VERSION", None)
19137 +
19138 + if unmerge_rval == os.EX_OK:
19139 + emerge_log(_(" >>> unmerge success: %s") % (dblnk.mycpv,))
19140 + else:
19141 + emerge_log(_(" !!! unmerge FAILURE: %s") % (dblnk.mycpv,))
19142 +
19143 + self.lockdb()
19144 + try:
19145 + # TODO: Check status and abort if necessary.
19146 + dblnk.delete()
19147 + finally:
19148 + self.unlockdb()
19149 + showMessage(_(">>> Original instance of package unmerged safely.\n"))
19150 +
19151 + if len(others_in_slot) > 1:
19152 + showMessage(
19153 + colorize("WARN", _("WARNING:"))
19154 + + _(
19155 + " AUTOCLEAN is disabled. This can cause serious"
19156 + " problems due to overlapping packages.\n"
19157 + ),
19158 + level=logging.WARN,
19159 + noiselevel=-1,
19160 + )
19161 +
19162 + # We hold both directory locks.
19163 + self.dbdir = self.dbpkgdir
19164 + self.lockdb()
19165 + try:
19166 + self.delete()
19167 + _movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
19168 + self._merged_path(self.dbpkgdir, os.lstat(self.dbpkgdir))
19169 + self.vartree.dbapi._cache_delta.recordEvent(
19170 + "add", self.mycpv, slot, counter
19171 + )
19172 + finally:
19173 + self.unlockdb()
19174 +
19175 + # Check for file collisions with blocking packages
19176 + # and remove any colliding files from their CONTENTS
19177 + # since they now belong to this package.
19178 + self._clear_contents_cache()
19179 + contents = self.getcontents()
19180 + destroot_len = len(destroot) - 1
19181 + self.lockdb()
19182 + try:
19183 + for blocker in blockers:
19184 + self.vartree.dbapi.removeFromContents(
19185 + blocker, iter(contents), relative_paths=False
19186 + )
19187 + finally:
19188 + self.unlockdb()
19189 +
19190 + plib_registry = self.vartree.dbapi._plib_registry
19191 + if plib_registry:
19192 + self.vartree.dbapi._fs_lock()
19193 + plib_registry.lock()
19194 + try:
19195 + plib_registry.load()
19196 +
19197 + if preserve_paths:
19198 + # keep track of the libs we preserved
19199 + plib_registry.register(
19200 + self.mycpv, slot, counter, sorted(preserve_paths)
19201 + )
19202 +
19203 + # Unregister any preserved libs that this package has overwritten
19204 + # and update the contents of the packages that owned them.
19205 + plib_dict = plib_registry.getPreservedLibs()
19206 + for cpv, paths in plib_collisions.items():
19207 + if cpv not in plib_dict:
19208 + continue
19209 + has_vdb_entry = False
19210 + if cpv != self.mycpv:
19211 + # If we've replaced another instance with the
19212 + # same cpv then the vdb entry no longer belongs
19213 + # to it, so we'll have to get the slot and counter
19214 + # from plib_registry._data instead.
19215 + self.vartree.dbapi.lock()
19216 + try:
19217 + try:
19218 + slot = self.vartree.dbapi._pkg_str(cpv, None).slot
19219 + counter = self.vartree.dbapi.cpv_counter(cpv)
19220 + except (KeyError, InvalidData):
19221 + pass
19222 + else:
19223 + has_vdb_entry = True
19224 + self.vartree.dbapi.removeFromContents(cpv, paths)
19225 + finally:
19226 + self.vartree.dbapi.unlock()
19227 +
19228 + if not has_vdb_entry:
19229 + # It's possible for previously unmerged packages
19230 + # to have preserved libs in the registry, so try
19231 + # to retrieve the slot and counter from there.
19232 + has_registry_entry = False
19233 + for plib_cps, (
19234 + plib_cpv,
19235 + plib_counter,
19236 + plib_paths,
19237 + ) in plib_registry._data.items():
19238 + if plib_cpv != cpv:
19239 + continue
19240 + try:
19241 + cp, slot = plib_cps.split(":", 1)
19242 + except ValueError:
19243 + continue
19244 + counter = plib_counter
19245 + has_registry_entry = True
19246 + break
19247 +
19248 + if not has_registry_entry:
19249 + continue
19250 +
19251 + remaining = [f for f in plib_dict[cpv] if f not in paths]
19252 + plib_registry.register(cpv, slot, counter, remaining)
19253 +
19254 + plib_registry.store()
19255 + finally:
19256 + plib_registry.unlock()
19257 + self.vartree.dbapi._fs_unlock()
19258 +
19259 + self.vartree.dbapi._add(self)
19260 + contents = self.getcontents()
19261 +
19262 + # do postinst script
19263 + self.settings["PORTAGE_UPDATE_ENV"] = os.path.join(
19264 + self.dbpkgdir, "environment.bz2"
19265 + )
19266 + self.settings.backup_changes("PORTAGE_UPDATE_ENV")
19267 + try:
19268 + phase = EbuildPhase(
19269 + background=False,
19270 + phase="postinst",
19271 + scheduler=self._scheduler,
19272 + settings=self.settings,
19273 + )
19274 + phase.start()
19275 + a = phase.wait()
19276 + if a == os.EX_OK:
19277 + showMessage(_(">>> %s merged.\n") % self.mycpv)
19278 + finally:
19279 + self.settings.pop("PORTAGE_UPDATE_ENV", None)
19280 +
19281 + if a != os.EX_OK:
19282 + # It's stupid to bail out here, so keep going regardless of
19283 + # phase return code.
19284 + self._postinst_failure = True
19285 + self._elog(
19286 + "eerror",
19287 + "postinst",
19288 + [
19289 + _("FAILED postinst: %s") % (a,),
19290 + ],
19291 + )
19292 +
19293 + # update environment settings, library paths. DO NOT change symlinks.
19294 + env_update(
19295 + target_root=self.settings["ROOT"],
19296 + prev_mtimes=prev_mtimes,
19297 + contents=contents,
19298 + env=self.settings,
19299 + writemsg_level=self._display_merge,
19300 + vardbapi=self.vartree.dbapi,
19301 + )
19302 +
19303 + # For gcc upgrades, preserved libs have to be removed after the
19304 + # the library path has been updated.
19305 + self._prune_plib_registry()
19306 + self._post_merge_sync()
19307 +
19308 + return os.EX_OK
19309 +
19310 + def _new_backup_path(self, p):
19311 + """
19312 + The works for any type path, such as a regular file, symlink,
19313 + or directory. The parent directory is assumed to exist.
19314 + The returned filename is of the form p + '.backup.' + x, where
19315 + x guarantees that the returned path does not exist yet.
19316 + """
19317 + os = _os_merge
19318 +
19319 + x = -1
19320 + while True:
19321 + x += 1
19322 + backup_p = "%s.backup.%04d" % (p, x)
19323 + try:
19324 + os.lstat(backup_p)
19325 + except OSError:
19326 + break
19327 +
19328 + return backup_p
19329 +
19330 + def _merge_contents(self, srcroot, destroot, cfgfiledict):
19331 +
19332 + cfgfiledict_orig = cfgfiledict.copy()
19333 +
19334 + # open CONTENTS file (possibly overwriting old one) for recording
19335 + # Use atomic_ofstream for automatic coercion of raw bytes to
19336 + # unicode, in order to prevent TypeError when writing raw bytes
19337 + # to TextIOWrapper with python2.
19338 + outfile = atomic_ofstream(
19339 + _unicode_encode(
19340 + os.path.join(self.dbtmpdir, "CONTENTS"),
19341 + encoding=_encodings["fs"],
19342 + errors="strict",
19343 + ),
19344 + mode="w",
19345 + encoding=_encodings["repo.content"],
19346 + errors="backslashreplace",
19347 + )
19348 +
19349 + # Don't bump mtimes on merge since some application require
19350 + # preservation of timestamps. This means that the unmerge phase must
19351 + # check to see if file belongs to an installed instance in the same
19352 + # slot.
19353 + mymtime = None
19354 +
19355 + # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
19356 + prevmask = os.umask(0)
19357 + secondhand = []
19358 +
19359 + # we do a first merge; this will recurse through all files in our srcroot but also build up a
19360 + # "second hand" of symlinks to merge later
19361 + if self.mergeme(
19362 + srcroot,
19363 + destroot,
19364 + outfile,
19365 + secondhand,
19366 + self.settings["EPREFIX"].lstrip(os.sep),
19367 + cfgfiledict,
19368 + mymtime,
19369 + ):
19370 + return 1
19371 +
19372 + # now, it's time for dealing our second hand; we'll loop until we can't merge anymore. The rest are
19373 + # broken symlinks. We'll merge them too.
19374 + lastlen = 0
19375 + while len(secondhand) and len(secondhand) != lastlen:
19376 + # clear the thirdhand. Anything from our second hand that
19377 + # couldn't get merged will be added to thirdhand.
19378 +
19379 + thirdhand = []
19380 + if self.mergeme(
19381 + srcroot, destroot, outfile, thirdhand, secondhand, cfgfiledict, mymtime
19382 + ):
19383 + return 1
19384 +
19385 + # swap hands
19386 + lastlen = len(secondhand)
19387 +
19388 + # our thirdhand now becomes our secondhand. It's ok to throw
19389 + # away secondhand since thirdhand contains all the stuff that
19390 + # couldn't be merged.
19391 + secondhand = thirdhand
19392 +
19393 + if len(secondhand):
19394 + # force merge of remaining symlinks (broken or circular; oh well)
19395 + if self.mergeme(
19396 + srcroot, destroot, outfile, None, secondhand, cfgfiledict, mymtime
19397 + ):
19398 + return 1
19399 +
19400 + # restore umask
19401 + os.umask(prevmask)
19402 +
19403 + # if we opened it, close it
19404 + outfile.flush()
19405 + outfile.close()
19406 +
19407 + # write out our collection of md5sums
19408 + if cfgfiledict != cfgfiledict_orig:
19409 + cfgfiledict.pop("IGNORE", None)
19410 + try:
19411 + writedict(cfgfiledict, self.vartree.dbapi._conf_mem_file)
19412 + except InvalidLocation:
19413 + self.settings._init_dirs()
19414 + writedict(cfgfiledict, self.vartree.dbapi._conf_mem_file)
19415 +
19416 + return os.EX_OK
19417 +
19418 + def mergeme(
19419 + self,
19420 + srcroot,
19421 + destroot,
19422 + outfile,
19423 + secondhand,
19424 + stufftomerge,
19425 + cfgfiledict,
19426 + thismtime,
19427 + ):
19428 + """
19429 +
19430 + This function handles actual merging of the package contents to the livefs.
19431 + It also handles config protection.
19432 +
19433 + @param srcroot: Where are we copying files from (usually ${D})
19434 + @type srcroot: String (Path)
19435 + @param destroot: Typically ${ROOT}
19436 + @type destroot: String (Path)
19437 + @param outfile: File to log operations to
19438 + @type outfile: File Object
19439 + @param secondhand: A set of items to merge in pass two (usually
19440 + or symlinks that point to non-existing files that may get merged later)
19441 + @type secondhand: List
19442 + @param stufftomerge: Either a diretory to merge, or a list of items.
19443 + @type stufftomerge: String or List
19444 + @param cfgfiledict: { File:mtime } mapping for config_protected files
19445 + @type cfgfiledict: Dictionary
19446 + @param thismtime: None or new mtime for merged files (expressed in seconds
19447 + in Python <3.3 and nanoseconds in Python >=3.3)
19448 + @type thismtime: None or Int
19449 + @rtype: None or Boolean
19450 + @return:
19451 + 1. True on failure
19452 + 2. None otherwise
19453 +
19454 + """
19455 +
19456 + showMessage = self._display_merge
19457 + writemsg = self._display_merge
19458 +
19459 + os = _os_merge
19460 + sep = os.sep
19461 + join = os.path.join
19462 + srcroot = normalize_path(srcroot).rstrip(sep) + sep
19463 + destroot = normalize_path(destroot).rstrip(sep) + sep
19464 + calc_prelink = "prelink-checksums" in self.settings.features
19465 +
19466 + protect_if_modified = (
19467 + "config-protect-if-modified" in self.settings.features
19468 + and self._installed_instance is not None
19469 + )
19470 +
19471 + # this is supposed to merge a list of files. There will be 2 forms of argument passing.
19472 + if isinstance(stufftomerge, str):
19473 + # A directory is specified. Figure out protection paths, listdir() it and process it.
19474 + mergelist = [
19475 + join(stufftomerge, child)
19476 + for child in os.listdir(join(srcroot, stufftomerge))
19477 + ]
19478 + else:
19479 + mergelist = stufftomerge[:]
19480 +
19481 + while mergelist:
19482 +
19483 + relative_path = mergelist.pop()
19484 + mysrc = join(srcroot, relative_path)
19485 + mydest = join(destroot, relative_path)
19486 + # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
19487 + myrealdest = join(sep, relative_path)
19488 + # stat file once, test using S_* macros many times (faster that way)
19489 + mystat = os.lstat(mysrc)
19490 + mymode = mystat[stat.ST_MODE]
19491 + mymd5 = None
19492 + myto = None
19493 +
19494 + mymtime = mystat.st_mtime_ns
19495 +
19496 + if stat.S_ISREG(mymode):
19497 + mymd5 = perform_md5(mysrc, calc_prelink=calc_prelink)
19498 + elif stat.S_ISLNK(mymode):
19499 + # The file name of mysrc and the actual file that it points to
19500 + # will have earlier been forcefully converted to the 'merge'
19501 + # encoding if necessary, but the content of the symbolic link
19502 + # may need to be forcefully converted here.
19503 + myto = _os.readlink(
19504 + _unicode_encode(
19505 + mysrc, encoding=_encodings["merge"], errors="strict"
19506 + )
19507 + )
19508 + try:
19509 + myto = _unicode_decode(
19510 + myto, encoding=_encodings["merge"], errors="strict"
19511 + )
19512 + except UnicodeDecodeError:
19513 + myto = _unicode_decode(
19514 + myto, encoding=_encodings["merge"], errors="replace"
19515 + )
19516 + myto = _unicode_encode(
19517 + myto, encoding="ascii", errors="backslashreplace"
19518 + )
19519 + myto = _unicode_decode(
19520 + myto, encoding=_encodings["merge"], errors="replace"
19521 + )
19522 + os.unlink(mysrc)
19523 + os.symlink(myto, mysrc)
19524 +
19525 + mymd5 = md5(_unicode_encode(myto)).hexdigest()
19526 +
19527 + protected = False
19528 + if stat.S_ISLNK(mymode) or stat.S_ISREG(mymode):
19529 + protected = self.isprotected(mydest)
19530 +
19531 + if (
19532 + stat.S_ISREG(mymode)
19533 + and mystat.st_size == 0
19534 + and os.path.basename(mydest).startswith(".keep")
19535 + ):
19536 + protected = False
19537 +
19538 + destmd5 = None
19539 + mydest_link = None
19540 + # handy variables; mydest is the target object on the live filesystems;
19541 + # mysrc is the source object in the temporary install dir
19542 + try:
19543 + mydstat = os.lstat(mydest)
19544 + mydmode = mydstat.st_mode
19545 + if protected:
19546 + if stat.S_ISLNK(mydmode):
19547 + # Read symlink target as bytes, in case the
19548 + # target path has a bad encoding.
19549 + mydest_link = _os.readlink(
19550 + _unicode_encode(
19551 + mydest, encoding=_encodings["merge"], errors="strict"
19552 + )
19553 + )
19554 + mydest_link = _unicode_decode(
19555 + mydest_link, encoding=_encodings["merge"], errors="replace"
19556 + )
19557 +
19558 + # For protection of symlinks, the md5
19559 + # of the link target path string is used
19560 + # for cfgfiledict (symlinks are
19561 + # protected since bug #485598).
19562 + destmd5 = md5(_unicode_encode(mydest_link)).hexdigest()
19563 +
19564 + elif stat.S_ISREG(mydmode):
19565 + destmd5 = perform_md5(mydest, calc_prelink=calc_prelink)
19566 + except (FileNotFound, OSError) as e:
19567 + if isinstance(e, OSError) and e.errno != errno.ENOENT:
19568 + raise
19569 + # dest file doesn't exist
19570 + mydstat = None
19571 + mydmode = None
19572 + mydest_link = None
19573 + destmd5 = None
19574 +
19575 + moveme = True
19576 + if protected:
19577 + mydest, protected, moveme = self._protect(
19578 + cfgfiledict,
19579 + protect_if_modified,
19580 + mymd5,
19581 + myto,
19582 + mydest,
19583 + myrealdest,
19584 + mydmode,
19585 + destmd5,
19586 + mydest_link,
19587 + )
19588 +
19589 + zing = "!!!"
19590 + if not moveme:
19591 + # confmem rejected this update
19592 + zing = "---"
19593 +
19594 + if stat.S_ISLNK(mymode):
19595 + # we are merging a symbolic link
19596 + # Pass in the symlink target in order to bypass the
19597 + # os.readlink() call inside abssymlink(), since that
19598 + # call is unsafe if the merge encoding is not ascii
19599 + # or utf_8 (see bug #382021).
19600 + myabsto = abssymlink(mysrc, target=myto)
19601 +
19602 + if myabsto.startswith(srcroot):
19603 + myabsto = myabsto[len(srcroot) :]
19604 + myabsto = myabsto.lstrip(sep)
19605 + if self.settings and self.settings["D"]:
19606 + if myto.startswith(self.settings["D"]):
19607 + myto = myto[len(self.settings["D"]) - 1 :]
19608 + # myrealto contains the path of the real file to which this symlink points.
19609 + # we can simply test for existence of this file to see if the target has been merged yet
19610 + myrealto = normalize_path(os.path.join(destroot, myabsto))
19611 + if mydmode is not None and stat.S_ISDIR(mydmode):
19612 + if not protected:
19613 + # we can't merge a symlink over a directory
19614 + newdest = self._new_backup_path(mydest)
19615 + msg = []
19616 + msg.append("")
19617 + msg.append(
19618 + _("Installation of a symlink is blocked by a directory:")
19619 + )
19620 + msg.append(" '%s'" % mydest)
19621 + msg.append(
19622 + _("This symlink will be merged with a different name:")
19623 + )
19624 + msg.append(" '%s'" % newdest)
19625 + msg.append("")
19626 + self._eerror("preinst", msg)
19627 + mydest = newdest
19628 +
19629 + # if secondhand is None it means we're operating in "force" mode and should not create a second hand.
19630 + if (secondhand != None) and (not os.path.exists(myrealto)):
19631 + # either the target directory doesn't exist yet or the target file doesn't exist -- or
19632 + # the target is a broken symlink. We will add this file to our "second hand" and merge
19633 + # it later.
19634 + secondhand.append(mysrc[len(srcroot) :])
19635 + continue
19636 + # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
19637 + if moveme:
19638 + zing = ">>>"
19639 + mymtime = movefile(
19640 + mysrc,
19641 + mydest,
19642 + newmtime=thismtime,
19643 + sstat=mystat,
19644 + mysettings=self.settings,
19645 + encoding=_encodings["merge"],
19646 + )
19647 +
19648 + try:
19649 + self._merged_path(mydest, os.lstat(mydest))
19650 + except OSError:
19651 + pass
19652 +
19653 + if mymtime != None:
19654 + # Use lexists, since if the target happens to be a broken
19655 + # symlink then that should trigger an independent warning.
19656 + if not (
19657 + os.path.lexists(myrealto)
19658 + or os.path.lexists(join(srcroot, myabsto))
19659 + ):
19660 + self._eqawarn(
19661 + "preinst",
19662 + [
19663 + _(
19664 + "QA Notice: Symbolic link /%s points to /%s which does not exist."
19665 + )
19666 + % (relative_path, myabsto)
19667 + ],
19668 + )
19669 +
19670 + showMessage("%s %s -> %s\n" % (zing, mydest, myto))
19671 + outfile.write(
19672 + self._format_contents_line(
19673 + node_type="sym",
19674 + abs_path=myrealdest,
19675 + symlink_target=myto,
19676 + mtime_ns=mymtime,
19677 + )
19678 + )
19679 + else:
19680 + showMessage(
19681 + _("!!! Failed to move file.\n"),
19682 + level=logging.ERROR,
19683 + noiselevel=-1,
19684 + )
19685 + showMessage(
19686 + "!!! %s -> %s\n" % (mydest, myto),
19687 + level=logging.ERROR,
19688 + noiselevel=-1,
19689 + )
19690 + return 1
19691 + elif stat.S_ISDIR(mymode):
19692 + # we are merging a directory
19693 + if mydmode != None:
19694 + # destination exists
19695 +
19696 + if bsd_chflags:
19697 + # Save then clear flags on dest.
19698 + dflags = mydstat.st_flags
19699 + if dflags != 0:
19700 + bsd_chflags.lchflags(mydest, 0)
19701 +
19702 + if not stat.S_ISLNK(mydmode) and not os.access(mydest, os.W_OK):
19703 + pkgstuff = pkgsplit(self.pkg)
19704 + writemsg(
19705 + _("\n!!! Cannot write to '%s'.\n") % mydest, noiselevel=-1
19706 + )
19707 + writemsg(
19708 + _(
19709 + "!!! Please check permissions and directories for broken symlinks.\n"
19710 + )
19711 + )
19712 + writemsg(
19713 + _(
19714 + "!!! You may start the merge process again by using ebuild:\n"
19715 + )
19716 + )
19717 + writemsg(
19718 + "!!! ebuild "
19719 + + self.settings["PORTDIR"]
19720 + + "/"
19721 + + self.cat
19722 + + "/"
19723 + + pkgstuff[0]
19724 + + "/"
19725 + + self.pkg
19726 + + ".ebuild merge\n"
19727 + )
19728 + writemsg(_("!!! And finish by running this: env-update\n\n"))
19729 + return 1
19730 +
19731 + if stat.S_ISDIR(mydmode) or (
19732 + stat.S_ISLNK(mydmode) and os.path.isdir(mydest)
19733 + ):
19734 + # a symlink to an existing directory will work for us; keep it:
19735 + showMessage("--- %s/\n" % mydest)
19736 + if bsd_chflags:
19737 + bsd_chflags.lchflags(mydest, dflags)
19738 + else:
19739 + # a non-directory and non-symlink-to-directory. Won't work for us. Move out of the way.
19740 + backup_dest = self._new_backup_path(mydest)
19741 + msg = []
19742 + msg.append("")
19743 + msg.append(
19744 + _("Installation of a directory is blocked by a file:")
19745 + )
19746 + msg.append(" '%s'" % mydest)
19747 + msg.append(_("This file will be renamed to a different name:"))
19748 + msg.append(" '%s'" % backup_dest)
19749 + msg.append("")
19750 + self._eerror("preinst", msg)
19751 + if (
19752 + movefile(
19753 + mydest,
19754 + backup_dest,
19755 + mysettings=self.settings,
19756 + encoding=_encodings["merge"],
19757 + )
19758 + is None
19759 + ):
19760 + return 1
19761 + showMessage(
19762 + _("bak %s %s.backup\n") % (mydest, mydest),
19763 + level=logging.ERROR,
19764 + noiselevel=-1,
19765 + )
19766 + # now create our directory
19767 + try:
19768 + if self.settings.selinux_enabled():
19769 + _selinux_merge.mkdir(mydest, mysrc)
19770 + else:
19771 + os.mkdir(mydest)
19772 + except OSError as e:
19773 + # Error handling should be equivalent to
19774 + # portage.util.ensure_dirs() for cases
19775 + # like bug #187518.
19776 + if e.errno in (errno.EEXIST,):
19777 + pass
19778 + elif os.path.isdir(mydest):
19779 + pass
19780 + else:
19781 + raise
19782 + del e
19783 +
19784 + if bsd_chflags:
19785 + bsd_chflags.lchflags(mydest, dflags)
19786 + os.chmod(mydest, mystat[0])
19787 + os.chown(mydest, mystat[4], mystat[5])
19788 + showMessage(">>> %s/\n" % mydest)
19789 + else:
19790 + try:
19791 + # destination doesn't exist
19792 + if self.settings.selinux_enabled():
19793 + _selinux_merge.mkdir(mydest, mysrc)
19794 + else:
19795 + os.mkdir(mydest)
19796 + except OSError as e:
19797 + # Error handling should be equivalent to
19798 + # portage.util.ensure_dirs() for cases
19799 + # like bug #187518.
19800 + if e.errno in (errno.EEXIST,):
19801 + pass
19802 + elif os.path.isdir(mydest):
19803 + pass
19804 + else:
19805 + raise
19806 + del e
19807 + os.chmod(mydest, mystat[0])
19808 + os.chown(mydest, mystat[4], mystat[5])
19809 + showMessage(">>> %s/\n" % mydest)
19810 +
19811 + try:
19812 + self._merged_path(mydest, os.lstat(mydest))
19813 + except OSError:
19814 + pass
19815 +
19816 + outfile.write(
19817 + self._format_contents_line(node_type="dir", abs_path=myrealdest)
19818 + )
19819 + # recurse and merge this directory
19820 + mergelist.extend(
19821 + join(relative_path, child)
19822 + for child in os.listdir(join(srcroot, relative_path))
19823 + )
19824 +
19825 + elif stat.S_ISREG(mymode):
19826 + # we are merging a regular file
19827 + if not protected and mydmode is not None and stat.S_ISDIR(mydmode):
19828 + # install of destination is blocked by an existing directory with the same name
19829 + newdest = self._new_backup_path(mydest)
19830 + msg = []
19831 + msg.append("")
19832 + msg.append(
19833 + _("Installation of a regular file is blocked by a directory:")
19834 + )
19835 + msg.append(" '%s'" % mydest)
19836 + msg.append(_("This file will be merged with a different name:"))
19837 + msg.append(" '%s'" % newdest)
19838 + msg.append("")
19839 + self._eerror("preinst", msg)
19840 + mydest = newdest
19841 +
19842 + # whether config protection or not, we merge the new file the
19843 + # same way. Unless moveme=0 (blocking directory)
19844 + if moveme:
19845 + # Create hardlinks only for source files that already exist
19846 + # as hardlinks (having identical st_dev and st_ino).
19847 + hardlink_key = (mystat.st_dev, mystat.st_ino)
19848 +
19849 + hardlink_candidates = self._hardlink_merge_map.get(hardlink_key)
19850 + if hardlink_candidates is None:
19851 + hardlink_candidates = []
19852 + self._hardlink_merge_map[hardlink_key] = hardlink_candidates
19853 +
19854 + mymtime = movefile(
19855 + mysrc,
19856 + mydest,
19857 + newmtime=thismtime,
19858 + sstat=mystat,
19859 + mysettings=self.settings,
19860 + hardlink_candidates=hardlink_candidates,
19861 + encoding=_encodings["merge"],
19862 + )
19863 + if mymtime is None:
19864 + return 1
19865 + hardlink_candidates.append(mydest)
19866 + zing = ">>>"
19867 +
19868 + try:
19869 + self._merged_path(mydest, os.lstat(mydest))
19870 + except OSError:
19871 + pass
19872 +
19873 + if mymtime != None:
19874 + outfile.write(
19875 + self._format_contents_line(
19876 + node_type="obj",
19877 + abs_path=myrealdest,
19878 + md5_digest=mymd5,
19879 + mtime_ns=mymtime,
19880 + )
19881 + )
19882 + showMessage("%s %s\n" % (zing, mydest))
19883 + else:
19884 + # we are merging a fifo or device node
19885 + zing = "!!!"
19886 + if mydmode is None:
19887 + # destination doesn't exist
19888 + if (
19889 + movefile(
19890 + mysrc,
19891 + mydest,
19892 + newmtime=thismtime,
19893 + sstat=mystat,
19894 + mysettings=self.settings,
19895 + encoding=_encodings["merge"],
19896 + )
19897 + is not None
19898 + ):
19899 + zing = ">>>"
19900 +
19901 + try:
19902 + self._merged_path(mydest, os.lstat(mydest))
19903 + except OSError:
19904 + pass
19905 +
19906 + else:
19907 + return 1
19908 + if stat.S_ISFIFO(mymode):
19909 + outfile.write(
19910 + self._format_contents_line(node_type="fif", abs_path=myrealdest)
19911 + )
19912 + else:
19913 + outfile.write(
19914 + self._format_contents_line(node_type="dev", abs_path=myrealdest)
19915 + )
19916 + showMessage(zing + " " + mydest + "\n")
19917 +
19918 + def _protect(
19919 + self,
19920 + cfgfiledict,
19921 + protect_if_modified,
19922 + src_md5,
19923 + src_link,
19924 + dest,
19925 + dest_real,
19926 + dest_mode,
19927 + dest_md5,
19928 + dest_link,
19929 + ):
19930 +
19931 + move_me = True
19932 + protected = True
19933 + force = False
19934 + k = False
19935 + if self._installed_instance is not None:
19936 + k = self._installed_instance._match_contents(dest_real)
19937 + if k is not False:
19938 + if dest_mode is None:
19939 + # If the file doesn't exist, then it may
19940 + # have been deleted or renamed by the
19941 + # admin. Therefore, force the file to be
19942 + # merged with a ._cfg name, so that the
19943 + # admin will be prompted for this update
19944 + # (see bug #523684).
19945 + force = True
19946 +
19947 + elif protect_if_modified:
19948 + data = self._installed_instance.getcontents()[k]
19949 + if data[0] == "obj" and data[2] == dest_md5:
19950 + protected = False
19951 + elif data[0] == "sym" and data[2] == dest_link:
19952 + protected = False
19953 +
19954 + if protected and dest_mode is not None:
19955 + # we have a protection path; enable config file management.
19956 + if src_md5 == dest_md5:
19957 + protected = False
19958 +
19959 + elif src_md5 == cfgfiledict.get(dest_real, [None])[0]:
19960 + # An identical update has previously been
19961 + # merged. Skip it unless the user has chosen
19962 + # --noconfmem.
19963 + move_me = protected = bool(cfgfiledict["IGNORE"])
19964 +
19965 + if (
19966 + protected
19967 + and (dest_link is not None or src_link is not None)
19968 + and dest_link != src_link
19969 + ):
19970 + # If either one is a symlink, and they are not
19971 + # identical symlinks, then force config protection.
19972 + force = True
19973 +
19974 + if move_me:
19975 + # Merging a new file, so update confmem.
19976 + cfgfiledict[dest_real] = [src_md5]
19977 + elif dest_md5 == cfgfiledict.get(dest_real, [None])[0]:
19978 + # A previously remembered update has been
19979 + # accepted, so it is removed from confmem.
19980 + del cfgfiledict[dest_real]
19981 +
19982 + if protected and move_me:
19983 + dest = new_protect_filename(
19984 + dest, newmd5=(dest_link or src_md5), force=force
19985 + )
19986 +
19987 + return dest, protected, move_me
19988 +
19989 + def _format_contents_line(
19990 + self, node_type, abs_path, md5_digest=None, symlink_target=None, mtime_ns=None
19991 + ):
19992 + fields = [node_type, abs_path]
19993 + if md5_digest is not None:
19994 + fields.append(md5_digest)
19995 + elif symlink_target is not None:
19996 + fields.append("-> {}".format(symlink_target))
19997 + if mtime_ns is not None:
19998 + fields.append(str(mtime_ns // 1000000000))
19999 + return "{}\n".format(" ".join(fields))
20000 +
20001 + def _merged_path(self, path, lstatobj, exists=True):
20002 + previous_path = self._device_path_map.get(lstatobj.st_dev)
20003 + if (
20004 + previous_path is None
20005 + or previous_path is False
20006 + or (exists and len(path) < len(previous_path))
20007 + ):
20008 + if exists:
20009 + self._device_path_map[lstatobj.st_dev] = path
20010 + else:
20011 + # This entry is used to indicate that we've unmerged
20012 + # a file from this device, and later, this entry is
20013 + # replaced by a parent directory.
20014 + self._device_path_map[lstatobj.st_dev] = False
20015 +
20016 + def _post_merge_sync(self):
20017 + """
20018 + Call this after merge or unmerge, in order to sync relevant files to
20019 + disk and avoid data-loss in the event of a power failure. This method
20020 + does nothing if FEATURES=merge-sync is disabled.
20021 + """
20022 + if not self._device_path_map or "merge-sync" not in self.settings.features:
20023 + return
20024 +
20025 + returncode = None
20026 + if platform.system() == "Linux":
20027 +
20028 + paths = []
20029 + for path in self._device_path_map.values():
20030 + if path is not False:
20031 + paths.append(path)
20032 + paths = tuple(paths)
20033 +
20034 + proc = SyncfsProcess(
20035 + paths=paths, scheduler=(self._scheduler or asyncio._safe_loop())
20036 + )
20037 + proc.start()
20038 + returncode = proc.wait()
20039 +
20040 + if returncode is None or returncode != os.EX_OK:
20041 + try:
20042 + proc = subprocess.Popen(["sync"])
20043 + except EnvironmentError:
20044 + pass
20045 + else:
20046 + proc.wait()
20047 +
20048 + @_slot_locked
20049 + def merge(
20050 + self,
20051 + mergeroot,
20052 + inforoot,
20053 + myroot=None,
20054 + myebuild=None,
20055 + cleanup=0,
20056 + mydbapi=None,
20057 + prev_mtimes=None,
20058 + counter=None,
20059 + ):
20060 + """
20061 + @param myroot: ignored, self._eroot is used instead
20062 + """
20063 + myroot = None
20064 + retval = -1
20065 + parallel_install = "parallel-install" in self.settings.features
20066 + if not parallel_install:
20067 + self.lockdb()
20068 + self.vartree.dbapi._bump_mtime(self.mycpv)
20069 + if self._scheduler is None:
20070 + self._scheduler = SchedulerInterface(asyncio._safe_loop())
20071 + try:
20072 + retval = self.treewalk(
20073 + mergeroot,
20074 + myroot,
20075 + inforoot,
20076 + myebuild,
20077 + cleanup=cleanup,
20078 + mydbapi=mydbapi,
20079 + prev_mtimes=prev_mtimes,
20080 + counter=counter,
20081 + )
20082 +
20083 + # If PORTAGE_BUILDDIR doesn't exist, then it probably means
20084 + # fail-clean is enabled, and the success/die hooks have
20085 + # already been called by EbuildPhase.
20086 + if os.path.isdir(self.settings["PORTAGE_BUILDDIR"]):
20087 +
20088 + if retval == os.EX_OK:
20089 + phase = "success_hooks"
20090 + else:
20091 + phase = "die_hooks"
20092 +
20093 + ebuild_phase = MiscFunctionsProcess(
20094 + background=False,
20095 + commands=[phase],
20096 + scheduler=self._scheduler,
20097 + settings=self.settings,
20098 + )
20099 + ebuild_phase.start()
20100 + ebuild_phase.wait()
20101 + self._elog_process()
20102 +
20103 + if "noclean" not in self.settings.features and (
20104 + retval == os.EX_OK or "fail-clean" in self.settings.features
20105 + ):
20106 + if myebuild is None:
20107 + myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
20108 +
20109 + doebuild_environment(
20110 + myebuild, "clean", settings=self.settings, db=mydbapi
20111 + )
20112 + phase = EbuildPhase(
20113 + background=False,
20114 + phase="clean",
20115 + scheduler=self._scheduler,
20116 + settings=self.settings,
20117 + )
20118 + phase.start()
20119 + phase.wait()
20120 + finally:
20121 + self.settings.pop("REPLACING_VERSIONS", None)
20122 + if self.vartree.dbapi._linkmap is None:
20123 + # preserve-libs is entirely disabled
20124 + pass
20125 + else:
20126 + self.vartree.dbapi._linkmap._clear_cache()
20127 + self.vartree.dbapi._bump_mtime(self.mycpv)
20128 + if not parallel_install:
20129 + self.unlockdb()
20130 +
20131 + if retval == os.EX_OK and self._postinst_failure:
20132 + retval = portage.const.RETURNCODE_POSTINST_FAILURE
20133 +
20134 + return retval
20135 +
20136 + def getstring(self, name):
20137 + "returns contents of a file with whitespace converted to spaces"
20138 + if not os.path.exists(self.dbdir + "/" + name):
20139 + return ""
20140 + with io.open(
20141 + _unicode_encode(
20142 + os.path.join(self.dbdir, name),
20143 + encoding=_encodings["fs"],
20144 + errors="strict",
20145 + ),
20146 + mode="r",
20147 + encoding=_encodings["repo.content"],
20148 + errors="replace",
20149 + ) as f:
20150 + mydata = f.read().split()
20151 + return " ".join(mydata)
20152 +
20153 + def copyfile(self, fname):
20154 + shutil.copyfile(fname, self.dbdir + "/" + os.path.basename(fname))
20155 +
20156 + def getfile(self, fname):
20157 + if not os.path.exists(self.dbdir + "/" + fname):
20158 + return ""
20159 + with io.open(
20160 + _unicode_encode(
20161 + os.path.join(self.dbdir, fname),
20162 + encoding=_encodings["fs"],
20163 + errors="strict",
20164 + ),
20165 + mode="r",
20166 + encoding=_encodings["repo.content"],
20167 + errors="replace",
20168 + ) as f:
20169 + return f.read()
20170 +
20171 + def setfile(self, fname, data):
20172 + kwargs = {}
20173 + if fname == "environment.bz2" or not isinstance(data, str):
20174 + kwargs["mode"] = "wb"
20175 + else:
20176 + kwargs["mode"] = "w"
20177 + kwargs["encoding"] = _encodings["repo.content"]
20178 + write_atomic(os.path.join(self.dbdir, fname), data, **kwargs)
20179 +
20180 + def getelements(self, ename):
20181 + if not os.path.exists(self.dbdir + "/" + ename):
20182 + return []
20183 + with io.open(
20184 + _unicode_encode(
20185 + os.path.join(self.dbdir, ename),
20186 + encoding=_encodings["fs"],
20187 + errors="strict",
20188 + ),
20189 + mode="r",
20190 + encoding=_encodings["repo.content"],
20191 + errors="replace",
20192 + ) as f:
20193 + mylines = f.readlines()
20194 + myreturn = []
20195 + for x in mylines:
20196 + for y in x[:-1].split():
20197 + myreturn.append(y)
20198 + return myreturn
20199 +
20200 + def setelements(self, mylist, ename):
20201 + with io.open(
20202 + _unicode_encode(
20203 + os.path.join(self.dbdir, ename),
20204 + encoding=_encodings["fs"],
20205 + errors="strict",
20206 + ),
20207 + mode="w",
20208 + encoding=_encodings["repo.content"],
20209 + errors="backslashreplace",
20210 + ) as f:
20211 + for x in mylist:
20212 + f.write("%s\n" % x)
20213 +
20214 + def isregular(self):
20215 + "Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
20216 + return os.path.exists(os.path.join(self.dbdir, "CATEGORY"))
20217 +
20218 + def _pre_merge_backup(self, backup_dblink, downgrade):
20219 +
20220 + if "unmerge-backup" in self.settings.features or (
20221 + downgrade and "downgrade-backup" in self.settings.features
20222 + ):
20223 + return self._quickpkg_dblink(backup_dblink, False, None)
20224 +
20225 + return os.EX_OK
20226 +
20227 + def _pre_unmerge_backup(self, background):
20228 +
20229 + if "unmerge-backup" in self.settings.features:
20230 + logfile = None
20231 + if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
20232 + logfile = self.settings.get("PORTAGE_LOG_FILE")
20233 + return self._quickpkg_dblink(self, background, logfile)
20234 +
20235 + return os.EX_OK
20236 +
20237 + def _quickpkg_dblink(self, backup_dblink, background, logfile):
20238 +
20239 + build_time = backup_dblink.getfile("BUILD_TIME")
20240 + try:
20241 + build_time = int(build_time.strip())
20242 + except ValueError:
20243 + build_time = 0
20244 +
20245 + trees = QueryCommand.get_db()[self.settings["EROOT"]]
20246 + bintree = trees["bintree"]
20247 +
20248 + for binpkg in reversed(bintree.dbapi.match("={}".format(backup_dblink.mycpv))):
20249 + if binpkg.build_time == build_time:
20250 + return os.EX_OK
20251 +
20252 + self.lockdb()
20253 + try:
20254 +
20255 + if not backup_dblink.exists():
20256 + # It got unmerged by a concurrent process.
20257 + return os.EX_OK
20258 +
20259 + # Call quickpkg for support of QUICKPKG_DEFAULT_OPTS and stuff.
20260 + quickpkg_binary = os.path.join(
20261 + self.settings["PORTAGE_BIN_PATH"], "quickpkg"
20262 + )
20263 +
20264 + if not os.access(quickpkg_binary, os.X_OK):
20265 + # If not running from the source tree, use PATH.
20266 + quickpkg_binary = find_binary("quickpkg")
20267 + if quickpkg_binary is None:
20268 + self._display_merge(
20269 + _("%s: command not found") % "quickpkg",
20270 + level=logging.ERROR,
20271 + noiselevel=-1,
20272 + )
20273 + return 127
20274 +
20275 + # Let quickpkg inherit the global vartree config's env.
20276 + env = dict(self.vartree.settings.items())
20277 + env["__PORTAGE_INHERIT_VARDB_LOCK"] = "1"
20278 +
20279 + pythonpath = [x for x in env.get("PYTHONPATH", "").split(":") if x]
20280 + if not pythonpath or not os.path.samefile(pythonpath[0], portage._pym_path):
20281 + pythonpath.insert(0, portage._pym_path)
20282 + env["PYTHONPATH"] = ":".join(pythonpath)
20283 +
20284 + quickpkg_proc = SpawnProcess(
20285 + args=[
20286 + portage._python_interpreter,
20287 + quickpkg_binary,
20288 + "=%s" % (backup_dblink.mycpv,),
20289 + ],
20290 + background=background,
20291 + env=env,
20292 + scheduler=self._scheduler,
20293 + logfile=logfile,
20294 + )
20295 + quickpkg_proc.start()
20296 +
20297 + return quickpkg_proc.wait()
20298 +
20299 + finally:
20300 + self.unlockdb()
20301 +
20302 +
20303 + def merge(
20304 + mycat,
20305 + mypkg,
20306 + pkgloc,
20307 + infloc,
20308 + myroot=None,
20309 + settings=None,
20310 + myebuild=None,
20311 + mytree=None,
20312 + mydbapi=None,
20313 + vartree=None,
20314 + prev_mtimes=None,
20315 + blockers=None,
20316 + scheduler=None,
20317 + fd_pipes=None,
20318 + ):
20319 + """
20320 + @param myroot: ignored, settings['EROOT'] is used instead
20321 + """
20322 + myroot = None
20323 + if settings is None:
20324 + raise TypeError("settings argument is required")
20325 + if not os.access(settings["EROOT"], os.W_OK):
20326 + writemsg(
20327 + _("Permission denied: access('%s', W_OK)\n") % settings["EROOT"],
20328 + noiselevel=-1,
20329 + )
20330 + return errno.EACCES
20331 + background = settings.get("PORTAGE_BACKGROUND") == "1"
20332 + merge_task = MergeProcess(
20333 + mycat=mycat,
20334 + mypkg=mypkg,
20335 + settings=settings,
20336 + treetype=mytree,
20337 + vartree=vartree,
20338 + scheduler=(scheduler or asyncio._safe_loop()),
20339 + background=background,
20340 + blockers=blockers,
20341 + pkgloc=pkgloc,
20342 + infloc=infloc,
20343 + myebuild=myebuild,
20344 + mydbapi=mydbapi,
20345 + prev_mtimes=prev_mtimes,
20346 + logfile=settings.get("PORTAGE_LOG_FILE"),
20347 + fd_pipes=fd_pipes,
20348 + )
20349 + merge_task.start()
20350 + retcode = merge_task.wait()
20351 + return retcode
20352 +
20353 +
20354 + def unmerge(
20355 + cat,
20356 + pkg,
20357 + myroot=None,
20358 + settings=None,
20359 + mytrimworld=None,
20360 + vartree=None,
20361 + ldpath_mtimes=None,
20362 + scheduler=None,
20363 + ):
20364 + """
20365 + @param myroot: ignored, settings['EROOT'] is used instead
20366 + @param mytrimworld: ignored
20367 + """
20368 + myroot = None
20369 + if settings is None:
20370 + raise TypeError("settings argument is required")
20371 + mylink = dblink(
20372 + cat,
20373 + pkg,
20374 + settings=settings,
20375 + treetype="vartree",
20376 + vartree=vartree,
20377 + scheduler=scheduler,
20378 + )
20379 + vartree = mylink.vartree
20380 + parallel_install = "parallel-install" in settings.features
20381 + if not parallel_install:
20382 + mylink.lockdb()
20383 + try:
20384 + if mylink.exists():
20385 + retval = mylink.unmerge(ldpath_mtimes=ldpath_mtimes)
20386 + if retval == os.EX_OK:
20387 + mylink.lockdb()
20388 + try:
20389 + mylink.delete()
20390 + finally:
20391 + mylink.unlockdb()
20392 + return retval
20393 + return os.EX_OK
20394 + finally:
20395 + if vartree.dbapi._linkmap is None:
20396 + # preserve-libs is entirely disabled
20397 + pass
20398 + else:
20399 + vartree.dbapi._linkmap._clear_cache()
20400 + if not parallel_install:
20401 + mylink.unlockdb()
20402 +
20403
20404 def write_contents(contents, root, f):
20405 - """
20406 - Write contents to any file like object. The file will be left open.
20407 - """
20408 - root_len = len(root) - 1
20409 - for filename in sorted(contents):
20410 - entry_data = contents[filename]
20411 - entry_type = entry_data[0]
20412 - relative_filename = filename[root_len:]
20413 - if entry_type == "obj":
20414 - entry_type, mtime, md5sum = entry_data
20415 - line = "%s %s %s %s\n" % \
20416 - (entry_type, relative_filename, md5sum, mtime)
20417 - elif entry_type == "sym":
20418 - entry_type, mtime, link = entry_data
20419 - line = "%s %s -> %s %s\n" % \
20420 - (entry_type, relative_filename, link, mtime)
20421 - else: # dir, dev, fif
20422 - line = "%s %s\n" % (entry_type, relative_filename)
20423 - f.write(line)
20424 -
20425 - def tar_contents(contents, root, tar, protect=None, onProgress=None,
20426 - xattrs=False):
20427 - os = _os_merge
20428 - encoding = _encodings['merge']
20429 -
20430 - try:
20431 - for x in contents:
20432 - _unicode_encode(x,
20433 - encoding=_encodings['merge'],
20434 - errors='strict')
20435 - except UnicodeEncodeError:
20436 - # The package appears to have been merged with a
20437 - # different value of sys.getfilesystemencoding(),
20438 - # so fall back to utf_8 if appropriate.
20439 - try:
20440 - for x in contents:
20441 - _unicode_encode(x,
20442 - encoding=_encodings['fs'],
20443 - errors='strict')
20444 - except UnicodeEncodeError:
20445 - pass
20446 - else:
20447 - os = portage.os
20448 - encoding = _encodings['fs']
20449 -
20450 - tar.encoding = encoding
20451 - root = normalize_path(root).rstrip(os.path.sep) + os.path.sep
20452 - id_strings = {}
20453 - maxval = len(contents)
20454 - curval = 0
20455 - if onProgress:
20456 - onProgress(maxval, 0)
20457 - paths = list(contents)
20458 - paths.sort()
20459 - for path in paths:
20460 - curval += 1
20461 - try:
20462 - lst = os.lstat(path)
20463 - except OSError as e:
20464 - if e.errno != errno.ENOENT:
20465 - raise
20466 - del e
20467 - if onProgress:
20468 - onProgress(maxval, curval)
20469 - continue
20470 - contents_type = contents[path][0]
20471 - if path.startswith(root):
20472 - arcname = "./" + path[len(root):]
20473 - else:
20474 - raise ValueError("invalid root argument: '%s'" % root)
20475 - live_path = path
20476 - if 'dir' == contents_type and \
20477 - not stat.S_ISDIR(lst.st_mode) and \
20478 - os.path.isdir(live_path):
20479 - # Even though this was a directory in the original ${D}, it exists
20480 - # as a symlink to a directory in the live filesystem. It must be
20481 - # recorded as a real directory in the tar file to ensure that tar
20482 - # can properly extract it's children.
20483 - live_path = os.path.realpath(live_path)
20484 - lst = os.lstat(live_path)
20485 -
20486 - # Since os.lstat() inside TarFile.gettarinfo() can trigger a
20487 - # UnicodeEncodeError when python has something other than utf_8
20488 - # return from sys.getfilesystemencoding() (as in bug #388773),
20489 - # we implement the needed functionality here, using the result
20490 - # of our successful lstat call. An alternative to this would be
20491 - # to pass in the fileobj argument to TarFile.gettarinfo(), so
20492 - # that it could use fstat instead of lstat. However, that would
20493 - # have the unwanted effect of dereferencing symlinks.
20494 -
20495 - tarinfo = tar.tarinfo()
20496 - tarinfo.name = arcname
20497 - tarinfo.mode = lst.st_mode
20498 - tarinfo.uid = lst.st_uid
20499 - tarinfo.gid = lst.st_gid
20500 - tarinfo.size = 0
20501 - tarinfo.mtime = lst.st_mtime
20502 - tarinfo.linkname = ""
20503 - if stat.S_ISREG(lst.st_mode):
20504 - inode = (lst.st_ino, lst.st_dev)
20505 - if (lst.st_nlink > 1 and
20506 - inode in tar.inodes and
20507 - arcname != tar.inodes[inode]):
20508 - tarinfo.type = tarfile.LNKTYPE
20509 - tarinfo.linkname = tar.inodes[inode]
20510 - else:
20511 - tar.inodes[inode] = arcname
20512 - tarinfo.type = tarfile.REGTYPE
20513 - tarinfo.size = lst.st_size
20514 - elif stat.S_ISDIR(lst.st_mode):
20515 - tarinfo.type = tarfile.DIRTYPE
20516 - elif stat.S_ISLNK(lst.st_mode):
20517 - tarinfo.type = tarfile.SYMTYPE
20518 - tarinfo.linkname = os.readlink(live_path)
20519 - else:
20520 - continue
20521 - try:
20522 - tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
20523 - except KeyError:
20524 - pass
20525 - try:
20526 - tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
20527 - except KeyError:
20528 - pass
20529 -
20530 - if stat.S_ISREG(lst.st_mode):
20531 - if protect and protect(path):
20532 - # Create an empty file as a place holder in order to avoid
20533 - # potential collision-protect issues.
20534 - f = tempfile.TemporaryFile()
20535 - f.write(_unicode_encode(
20536 - "# empty file because --include-config=n " + \
20537 - "when `quickpkg` was used\n"))
20538 - f.flush()
20539 - f.seek(0)
20540 - tarinfo.size = os.fstat(f.fileno()).st_size
20541 - tar.addfile(tarinfo, f)
20542 - f.close()
20543 - else:
20544 - path_bytes = _unicode_encode(path,
20545 - encoding=encoding,
20546 - errors='strict')
20547 -
20548 - if xattrs:
20549 - # Compatible with GNU tar, which saves the xattrs
20550 - # under the SCHILY.xattr namespace.
20551 - for k in xattr.list(path_bytes):
20552 - tarinfo.pax_headers['SCHILY.xattr.' +
20553 - _unicode_decode(k)] = _unicode_decode(
20554 - xattr.get(path_bytes, _unicode_encode(k)))
20555 -
20556 - with open(path_bytes, 'rb') as f:
20557 - tar.addfile(tarinfo, f)
20558 -
20559 - else:
20560 - tar.addfile(tarinfo)
20561 - if onProgress:
20562 - onProgress(maxval, curval)
20563 + """
20564 + Write contents to any file like object. The file will be left open.
20565 + """
20566 + root_len = len(root) - 1
20567 + for filename in sorted(contents):
20568 + entry_data = contents[filename]
20569 + entry_type = entry_data[0]
20570 + relative_filename = filename[root_len:]
20571 + if entry_type == "obj":
20572 + entry_type, mtime, md5sum = entry_data
20573 + line = "%s %s %s %s\n" % (entry_type, relative_filename, md5sum, mtime)
20574 + elif entry_type == "sym":
20575 + entry_type, mtime, link = entry_data
20576 + line = "%s %s -> %s %s\n" % (entry_type, relative_filename, link, mtime)
20577 + else: # dir, dev, fif
20578 + line = "%s %s\n" % (entry_type, relative_filename)
20579 + f.write(line)
20580 +
20581 +
20582 + def tar_contents(contents, root, tar, protect=None, onProgress=None, xattrs=False):
20583 + os = _os_merge
20584 + encoding = _encodings["merge"]
20585 +
20586 + try:
20587 + for x in contents:
20588 + _unicode_encode(x, encoding=_encodings["merge"], errors="strict")
20589 + except UnicodeEncodeError:
20590 + # The package appears to have been merged with a
20591 + # different value of sys.getfilesystemencoding(),
20592 + # so fall back to utf_8 if appropriate.
20593 + try:
20594 + for x in contents:
20595 + _unicode_encode(x, encoding=_encodings["fs"], errors="strict")
20596 + except UnicodeEncodeError:
20597 + pass
20598 + else:
20599 + os = portage.os
20600 + encoding = _encodings["fs"]
20601 +
20602 + tar.encoding = encoding
20603 + root = normalize_path(root).rstrip(os.path.sep) + os.path.sep
20604 + id_strings = {}
20605 + maxval = len(contents)
20606 + curval = 0
20607 + if onProgress:
20608 + onProgress(maxval, 0)
20609 + paths = list(contents)
20610 + paths.sort()
20611 + for path in paths:
20612 + curval += 1
20613 + try:
20614 + lst = os.lstat(path)
20615 + except OSError as e:
20616 + if e.errno != errno.ENOENT:
20617 + raise
20618 + del e
20619 + if onProgress:
20620 + onProgress(maxval, curval)
20621 + continue
20622 + contents_type = contents[path][0]
20623 + if path.startswith(root):
20624 + arcname = "./" + path[len(root) :]
20625 + else:
20626 + raise ValueError("invalid root argument: '%s'" % root)
20627 + live_path = path
20628 + if (
20629 + "dir" == contents_type
20630 + and not stat.S_ISDIR(lst.st_mode)
20631 + and os.path.isdir(live_path)
20632 + ):
20633 + # Even though this was a directory in the original ${D}, it exists
20634 + # as a symlink to a directory in the live filesystem. It must be
20635 + # recorded as a real directory in the tar file to ensure that tar
20636 + # can properly extract it's children.
20637 + live_path = os.path.realpath(live_path)
20638 + lst = os.lstat(live_path)
20639 +
20640 + # Since os.lstat() inside TarFile.gettarinfo() can trigger a
20641 + # UnicodeEncodeError when python has something other than utf_8
20642 + # return from sys.getfilesystemencoding() (as in bug #388773),
20643 + # we implement the needed functionality here, using the result
20644 + # of our successful lstat call. An alternative to this would be
20645 + # to pass in the fileobj argument to TarFile.gettarinfo(), so
20646 + # that it could use fstat instead of lstat. However, that would
20647 + # have the unwanted effect of dereferencing symlinks.
20648 +
20649 + tarinfo = tar.tarinfo()
20650 + tarinfo.name = arcname
20651 + tarinfo.mode = lst.st_mode
20652 + tarinfo.uid = lst.st_uid
20653 + tarinfo.gid = lst.st_gid
20654 + tarinfo.size = 0
20655 + tarinfo.mtime = lst.st_mtime
20656 + tarinfo.linkname = ""
20657 + if stat.S_ISREG(lst.st_mode):
20658 + inode = (lst.st_ino, lst.st_dev)
20659 + if (
20660 + lst.st_nlink > 1
20661 + and inode in tar.inodes
20662 + and arcname != tar.inodes[inode]
20663 + ):
20664 + tarinfo.type = tarfile.LNKTYPE
20665 + tarinfo.linkname = tar.inodes[inode]
20666 + else:
20667 + tar.inodes[inode] = arcname
20668 + tarinfo.type = tarfile.REGTYPE
20669 + tarinfo.size = lst.st_size
20670 + elif stat.S_ISDIR(lst.st_mode):
20671 + tarinfo.type = tarfile.DIRTYPE
20672 + elif stat.S_ISLNK(lst.st_mode):
20673 + tarinfo.type = tarfile.SYMTYPE
20674 + tarinfo.linkname = os.readlink(live_path)
20675 + else:
20676 + continue
20677 + try:
20678 + tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
20679 + except KeyError:
20680 + pass
20681 + try:
20682 + tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
20683 + except KeyError:
20684 + pass
20685 +
20686 + if stat.S_ISREG(lst.st_mode):
20687 + if protect and protect(path):
20688 + # Create an empty file as a place holder in order to avoid
20689 + # potential collision-protect issues.
20690 + f = tempfile.TemporaryFile()
20691 + f.write(
20692 + _unicode_encode(
20693 + "# empty file because --include-config=n "
20694 + + "when `quickpkg` was used\n"
20695 + )
20696 + )
20697 + f.flush()
20698 + f.seek(0)
20699 + tarinfo.size = os.fstat(f.fileno()).st_size
20700 + tar.addfile(tarinfo, f)
20701 + f.close()
20702 + else:
20703 + path_bytes = _unicode_encode(path, encoding=encoding, errors="strict")
20704 +
20705 + if xattrs:
20706 + # Compatible with GNU tar, which saves the xattrs
20707 + # under the SCHILY.xattr namespace.
20708 + for k in xattr.list(path_bytes):
20709 + tarinfo.pax_headers[
20710 + "SCHILY.xattr." + _unicode_decode(k)
20711 + ] = _unicode_decode(xattr.get(path_bytes, _unicode_encode(k)))
20712 +
20713 + with open(path_bytes, "rb") as f:
20714 + tar.addfile(tarinfo, f)
20715 +
20716 + else:
20717 + tar.addfile(tarinfo)
20718 + if onProgress:
20719 + onProgress(maxval, curval)
20720 diff --cc lib/portage/getbinpkg.py
20721 index 3eb9479f2,6aa8f1de1..aaf0bcf81
20722 --- a/lib/portage/getbinpkg.py
20723 +++ b/lib/portage/getbinpkg.py
20724 @@@ -19,7 -19,6 +19,8 @@@ import socke
20725 import time
20726 import tempfile
20727 import base64
20728 ++# PREFIX LOCAL
20729 +from portage.const import CACHE_PATH
20730 import warnings
20731
20732 _all_errors = [NotImplementedError, ValueError, socket.error]
20733 @@@ -348,561 -386,616 +388,618 @@@ def match_in_array(array, prefix="", su
20734
20735
20736 def dir_get_list(baseurl, conn=None):
20737 - """Takes a base url to connect to and read from.
20738 - URI should be in the form <proto>://<site>[:port]<path>
20739 - Connection is used for persistent connection instances."""
20740 -
20741 - warnings.warn("portage.getbinpkg.dir_get_list() is deprecated",
20742 - DeprecationWarning, stacklevel=2)
20743 -
20744 - if not conn:
20745 - keepconnection = 0
20746 - else:
20747 - keepconnection = 1
20748 -
20749 - conn, protocol, address, params, headers = create_conn(baseurl, conn)
20750 -
20751 - listing = None
20752 - if protocol in ["http","https"]:
20753 - if not address.endswith("/"):
20754 - # http servers can return a 400 error here
20755 - # if the address doesn't end with a slash.
20756 - address += "/"
20757 - page, rc, msg = make_http_request(conn, address, params, headers)
20758 -
20759 - if page:
20760 - parser = ParseLinks()
20761 - parser.feed(_unicode_decode(page))
20762 - del page
20763 - listing = parser.get_anchors()
20764 - else:
20765 - import portage.exception
20766 - raise portage.exception.PortageException(
20767 - _("Unable to get listing: %s %s") % (rc,msg))
20768 - elif protocol in ["ftp"]:
20769 - if address[-1] == '/':
20770 - olddir = conn.pwd()
20771 - conn.cwd(address)
20772 - listing = conn.nlst()
20773 - conn.cwd(olddir)
20774 - del olddir
20775 - else:
20776 - listing = conn.nlst(address)
20777 - elif protocol == "sftp":
20778 - listing = conn.listdir(address)
20779 - else:
20780 - raise TypeError(_("Unknown protocol. '%s'") % protocol)
20781 -
20782 - if not keepconnection:
20783 - conn.close()
20784 -
20785 - return listing
20786 + """Takes a base url to connect to and read from.
20787 + URI should be in the form <proto>://<site>[:port]<path>
20788 + Connection is used for persistent connection instances."""
20789 +
20790 + warnings.warn(
20791 + "portage.getbinpkg.dir_get_list() is deprecated",
20792 + DeprecationWarning,
20793 + stacklevel=2,
20794 + )
20795 +
20796 + if not conn:
20797 + keepconnection = 0
20798 + else:
20799 + keepconnection = 1
20800 +
20801 + conn, protocol, address, params, headers = create_conn(baseurl, conn)
20802 +
20803 + listing = None
20804 + if protocol in ["http", "https"]:
20805 + if not address.endswith("/"):
20806 + # http servers can return a 400 error here
20807 + # if the address doesn't end with a slash.
20808 + address += "/"
20809 + page, rc, msg = make_http_request(conn, address, params, headers)
20810 +
20811 + if page:
20812 + parser = ParseLinks()
20813 + parser.feed(_unicode_decode(page))
20814 + del page
20815 + listing = parser.get_anchors()
20816 + else:
20817 + import portage.exception
20818 +
20819 + raise portage.exception.PortageException(
20820 + _("Unable to get listing: %s %s") % (rc, msg)
20821 + )
20822 + elif protocol in ["ftp"]:
20823 + if address[-1] == "/":
20824 + olddir = conn.pwd()
20825 + conn.cwd(address)
20826 + listing = conn.nlst()
20827 + conn.cwd(olddir)
20828 + del olddir
20829 + else:
20830 + listing = conn.nlst(address)
20831 + elif protocol == "sftp":
20832 + listing = conn.listdir(address)
20833 + else:
20834 + raise TypeError(_("Unknown protocol. '%s'") % protocol)
20835 +
20836 + if not keepconnection:
20837 + conn.close()
20838 +
20839 + return listing
20840 +
20841
20842 def file_get_metadata(baseurl, conn=None, chunk_size=3000):
20843 - """Takes a base url to connect to and read from.
20844 - URI should be in the form <proto>://<site>[:port]<path>
20845 - Connection is used for persistent connection instances."""
20846 -
20847 - warnings.warn("portage.getbinpkg.file_get_metadata() is deprecated",
20848 - DeprecationWarning, stacklevel=2)
20849 -
20850 - if not conn:
20851 - keepconnection = 0
20852 - else:
20853 - keepconnection = 1
20854 -
20855 - conn, protocol, address, params, headers = create_conn(baseurl, conn)
20856 -
20857 - if protocol in ["http","https"]:
20858 - headers["Range"] = "bytes=-%s" % str(chunk_size)
20859 - data, _x, _x = make_http_request(conn, address, params, headers)
20860 - elif protocol in ["ftp"]:
20861 - data, _x, _x = make_ftp_request(conn, address, -chunk_size)
20862 - elif protocol == "sftp":
20863 - f = conn.open(address)
20864 - try:
20865 - f.seek(-chunk_size, 2)
20866 - data = f.read()
20867 - finally:
20868 - f.close()
20869 - else:
20870 - raise TypeError(_("Unknown protocol. '%s'") % protocol)
20871 -
20872 - if data:
20873 - xpaksize = portage.xpak.decodeint(data[-8:-4])
20874 - if (xpaksize + 8) > chunk_size:
20875 - myid = file_get_metadata(baseurl, conn, xpaksize + 8)
20876 - if not keepconnection:
20877 - conn.close()
20878 - return myid
20879 - xpak_data = data[len(data) - (xpaksize + 8):-8]
20880 - del data
20881 -
20882 - myid = portage.xpak.xsplit_mem(xpak_data)
20883 - if not myid:
20884 - myid = None, None
20885 - del xpak_data
20886 - else:
20887 - myid = None, None
20888 -
20889 - if not keepconnection:
20890 - conn.close()
20891 -
20892 - return myid
20893 -
20894 -
20895 - def file_get(baseurl=None, dest=None, conn=None, fcmd=None, filename=None,
20896 - fcmd_vars=None):
20897 - """Takes a base url to connect to and read from.
20898 - URI should be in the form <proto>://[user[:pass]@]<site>[:port]<path>"""
20899 -
20900 - if not fcmd:
20901 -
20902 - warnings.warn("Use of portage.getbinpkg.file_get() without the fcmd "
20903 - "parameter is deprecated", DeprecationWarning, stacklevel=2)
20904 -
20905 - return file_get_lib(baseurl, dest, conn)
20906 -
20907 - variables = {}
20908 -
20909 - if fcmd_vars is not None:
20910 - variables.update(fcmd_vars)
20911 -
20912 - if "DISTDIR" not in variables:
20913 - if dest is None:
20914 - raise portage.exception.MissingParameter(
20915 - _("%s is missing required '%s' key") %
20916 - ("fcmd_vars", "DISTDIR"))
20917 - variables["DISTDIR"] = dest
20918 -
20919 - if "URI" not in variables:
20920 - if baseurl is None:
20921 - raise portage.exception.MissingParameter(
20922 - _("%s is missing required '%s' key") %
20923 - ("fcmd_vars", "URI"))
20924 - variables["URI"] = baseurl
20925 -
20926 - if "FILE" not in variables:
20927 - if filename is None:
20928 - filename = os.path.basename(variables["URI"])
20929 - variables["FILE"] = filename
20930 -
20931 - from portage.util import varexpand
20932 - from portage.process import spawn
20933 - myfetch = portage.util.shlex_split(fcmd)
20934 - myfetch = [varexpand(x, mydict=variables) for x in myfetch]
20935 - fd_pipes = {
20936 - 0: portage._get_stdin().fileno(),
20937 - 1: sys.__stdout__.fileno(),
20938 - 2: sys.__stdout__.fileno()
20939 - }
20940 - sys.__stdout__.flush()
20941 - sys.__stderr__.flush()
20942 - retval = spawn(myfetch, env=os.environ.copy(), fd_pipes=fd_pipes)
20943 - if retval != os.EX_OK:
20944 - sys.stderr.write(_("Fetcher exited with a failure condition.\n"))
20945 - return 0
20946 - return 1
20947 + """Takes a base url to connect to and read from.
20948 + URI should be in the form <proto>://<site>[:port]<path>
20949 + Connection is used for persistent connection instances."""
20950 +
20951 + warnings.warn(
20952 + "portage.getbinpkg.file_get_metadata() is deprecated",
20953 + DeprecationWarning,
20954 + stacklevel=2,
20955 + )
20956 +
20957 + if not conn:
20958 + keepconnection = 0
20959 + else:
20960 + keepconnection = 1
20961 +
20962 + conn, protocol, address, params, headers = create_conn(baseurl, conn)
20963 +
20964 + if protocol in ["http", "https"]:
20965 + headers["Range"] = "bytes=-%s" % str(chunk_size)
20966 + data, _x, _x = make_http_request(conn, address, params, headers)
20967 + elif protocol in ["ftp"]:
20968 + data, _x, _x = make_ftp_request(conn, address, -chunk_size)
20969 + elif protocol == "sftp":
20970 + f = conn.open(address)
20971 + try:
20972 + f.seek(-chunk_size, 2)
20973 + data = f.read()
20974 + finally:
20975 + f.close()
20976 + else:
20977 + raise TypeError(_("Unknown protocol. '%s'") % protocol)
20978 +
20979 + if data:
20980 + xpaksize = portage.xpak.decodeint(data[-8:-4])
20981 + if (xpaksize + 8) > chunk_size:
20982 + myid = file_get_metadata(baseurl, conn, xpaksize + 8)
20983 + if not keepconnection:
20984 + conn.close()
20985 + return myid
20986 + xpak_data = data[len(data) - (xpaksize + 8) : -8]
20987 + del data
20988 +
20989 + myid = portage.xpak.xsplit_mem(xpak_data)
20990 + if not myid:
20991 + myid = None, None
20992 + del xpak_data
20993 + else:
20994 + myid = None, None
20995 +
20996 + if not keepconnection:
20997 + conn.close()
20998 +
20999 + return myid
21000 +
21001 +
21002 + def file_get(
21003 + baseurl=None, dest=None, conn=None, fcmd=None, filename=None, fcmd_vars=None
21004 + ):
21005 + """Takes a base url to connect to and read from.
21006 + URI should be in the form <proto>://[user[:pass]@]<site>[:port]<path>"""
21007 +
21008 + if not fcmd:
21009 +
21010 + warnings.warn(
21011 + "Use of portage.getbinpkg.file_get() without the fcmd "
21012 + "parameter is deprecated",
21013 + DeprecationWarning,
21014 + stacklevel=2,
21015 + )
21016 +
21017 + return file_get_lib(baseurl, dest, conn)
21018 +
21019 + variables = {}
21020 +
21021 + if fcmd_vars is not None:
21022 + variables.update(fcmd_vars)
21023 +
21024 + if "DISTDIR" not in variables:
21025 + if dest is None:
21026 + raise portage.exception.MissingParameter(
21027 + _("%s is missing required '%s' key") % ("fcmd_vars", "DISTDIR")
21028 + )
21029 + variables["DISTDIR"] = dest
21030 +
21031 + if "URI" not in variables:
21032 + if baseurl is None:
21033 + raise portage.exception.MissingParameter(
21034 + _("%s is missing required '%s' key") % ("fcmd_vars", "URI")
21035 + )
21036 + variables["URI"] = baseurl
21037 +
21038 + if "FILE" not in variables:
21039 + if filename is None:
21040 + filename = os.path.basename(variables["URI"])
21041 + variables["FILE"] = filename
21042 +
21043 + from portage.util import varexpand
21044 + from portage.process import spawn
21045 +
21046 + myfetch = portage.util.shlex_split(fcmd)
21047 + myfetch = [varexpand(x, mydict=variables) for x in myfetch]
21048 + fd_pipes = {
21049 + 0: portage._get_stdin().fileno(),
21050 + 1: sys.__stdout__.fileno(),
21051 + 2: sys.__stdout__.fileno(),
21052 + }
21053 + sys.__stdout__.flush()
21054 + sys.__stderr__.flush()
21055 + retval = spawn(myfetch, env=os.environ.copy(), fd_pipes=fd_pipes)
21056 + if retval != os.EX_OK:
21057 + sys.stderr.write(_("Fetcher exited with a failure condition.\n"))
21058 + return 0
21059 + return 1
21060 +
21061
21062 def file_get_lib(baseurl, dest, conn=None):
21063 - """Takes a base url to connect to and read from.
21064 - URI should be in the form <proto>://<site>[:port]<path>
21065 - Connection is used for persistent connection instances."""
21066 -
21067 - warnings.warn("portage.getbinpkg.file_get_lib() is deprecated",
21068 - DeprecationWarning, stacklevel=2)
21069 -
21070 - if not conn:
21071 - keepconnection = 0
21072 - else:
21073 - keepconnection = 1
21074 -
21075 - conn, protocol, address, params, headers = create_conn(baseurl, conn)
21076 -
21077 - sys.stderr.write("Fetching '" + str(os.path.basename(address)) + "'\n")
21078 - if protocol in ["http", "https"]:
21079 - data, rc, _msg = make_http_request(conn, address, params, headers, dest=dest)
21080 - elif protocol in ["ftp"]:
21081 - data, rc, _msg = make_ftp_request(conn, address, dest=dest)
21082 - elif protocol == "sftp":
21083 - rc = 0
21084 - try:
21085 - f = conn.open(address)
21086 - except SystemExit:
21087 - raise
21088 - except Exception:
21089 - rc = 1
21090 - else:
21091 - try:
21092 - if dest:
21093 - bufsize = 8192
21094 - while True:
21095 - data = f.read(bufsize)
21096 - if not data:
21097 - break
21098 - dest.write(data)
21099 - finally:
21100 - f.close()
21101 - else:
21102 - raise TypeError(_("Unknown protocol. '%s'") % protocol)
21103 -
21104 - if not keepconnection:
21105 - conn.close()
21106 -
21107 - return rc
21108 -
21109 -
21110 - def dir_get_metadata(baseurl, conn=None, chunk_size=3000, verbose=1, usingcache=1, makepickle=None):
21111 -
21112 - warnings.warn("portage.getbinpkg.dir_get_metadata() is deprecated",
21113 - DeprecationWarning, stacklevel=2)
21114 -
21115 - if not conn:
21116 - keepconnection = 0
21117 - else:
21118 - keepconnection = 1
21119 -
21120 - cache_path = CACHE_PATH
21121 - metadatafilename = os.path.join(cache_path, 'remote_metadata.pickle')
21122 -
21123 - if makepickle is None:
21124 - makepickle = CACHE_PATH+"/metadata.idx.most_recent"
21125 -
21126 - try:
21127 - conn = create_conn(baseurl, conn)[0]
21128 - except _all_errors as e:
21129 - # ftplib.FTP(host) can raise errors like this:
21130 - # socket.error: (111, 'Connection refused')
21131 - sys.stderr.write("!!! %s\n" % (e,))
21132 - return {}
21133 -
21134 - out = sys.stdout
21135 - try:
21136 - metadatafile = open(_unicode_encode(metadatafilename,
21137 - encoding=_encodings['fs'], errors='strict'), 'rb')
21138 - mypickle = pickle.Unpickler(metadatafile)
21139 - try:
21140 - mypickle.find_global = None
21141 - except AttributeError:
21142 - # TODO: If py3k, override Unpickler.find_class().
21143 - pass
21144 - metadata = mypickle.load()
21145 - out.write(_("Loaded metadata pickle.\n"))
21146 - out.flush()
21147 - metadatafile.close()
21148 - except (SystemExit, KeyboardInterrupt):
21149 - raise
21150 - except Exception:
21151 - metadata = {}
21152 - if baseurl not in metadata:
21153 - metadata[baseurl] = {}
21154 - if "indexname" not in metadata[baseurl]:
21155 - metadata[baseurl]["indexname"] = ""
21156 - if "timestamp" not in metadata[baseurl]:
21157 - metadata[baseurl]["timestamp"] = 0
21158 - if "unmodified" not in metadata[baseurl]:
21159 - metadata[baseurl]["unmodified"] = 0
21160 - if "data" not in metadata[baseurl]:
21161 - metadata[baseurl]["data"] = {}
21162 -
21163 - if not os.access(cache_path, os.W_OK):
21164 - sys.stderr.write(_("!!! Unable to write binary metadata to disk!\n"))
21165 - sys.stderr.write(_("!!! Permission denied: '%s'\n") % cache_path)
21166 - return metadata[baseurl]["data"]
21167 -
21168 - import portage.exception
21169 - try:
21170 - filelist = dir_get_list(baseurl, conn)
21171 - except portage.exception.PortageException as e:
21172 - sys.stderr.write(_("!!! Error connecting to '%s'.\n") %
21173 - _hide_url_passwd(baseurl))
21174 - sys.stderr.write("!!! %s\n" % str(e))
21175 - del e
21176 - return metadata[baseurl]["data"]
21177 - tbz2list = match_in_array(filelist, suffix=".tbz2")
21178 - metalist = match_in_array(filelist, prefix="metadata.idx")
21179 - del filelist
21180 -
21181 - # Determine if our metadata file is current.
21182 - metalist.sort()
21183 - metalist.reverse() # makes the order new-to-old.
21184 - for mfile in metalist:
21185 - if usingcache and \
21186 - ((metadata[baseurl]["indexname"] != mfile) or \
21187 - (metadata[baseurl]["timestamp"] < int(time.time() - (60 * 60 * 24)))):
21188 - # Try to download new cache until we succeed on one.
21189 - data = ""
21190 - for trynum in [1, 2, 3]:
21191 - mytempfile = tempfile.TemporaryFile()
21192 - try:
21193 - file_get(baseurl + "/" + mfile, mytempfile, conn)
21194 - if mytempfile.tell() > len(data):
21195 - mytempfile.seek(0)
21196 - data = mytempfile.read()
21197 - except ValueError as e:
21198 - sys.stderr.write("--- %s\n" % str(e))
21199 - if trynum < 3:
21200 - sys.stderr.write(_("Retrying...\n"))
21201 - sys.stderr.flush()
21202 - mytempfile.close()
21203 - continue
21204 - if match_in_array([mfile], suffix=".gz"):
21205 - out.write("gzip'd\n")
21206 - out.flush()
21207 - try:
21208 - import gzip
21209 - mytempfile.seek(0)
21210 - gzindex = gzip.GzipFile(mfile[:-3], 'rb', 9, mytempfile)
21211 - data = gzindex.read()
21212 - except SystemExit as e:
21213 - raise
21214 - except Exception as e:
21215 - mytempfile.close()
21216 - sys.stderr.write(_("!!! Failed to use gzip: ") + str(e) + "\n")
21217 - sys.stderr.flush()
21218 - mytempfile.close()
21219 - try:
21220 - metadata[baseurl]["data"] = pickle.loads(data)
21221 - del data
21222 - metadata[baseurl]["indexname"] = mfile
21223 - metadata[baseurl]["timestamp"] = int(time.time())
21224 - metadata[baseurl]["modified"] = 0 # It's not, right after download.
21225 - out.write(_("Pickle loaded.\n"))
21226 - out.flush()
21227 - break
21228 - except SystemExit as e:
21229 - raise
21230 - except Exception as e:
21231 - sys.stderr.write(_("!!! Failed to read data from index: ") + str(mfile) + "\n")
21232 - sys.stderr.write("!!! %s" % str(e))
21233 - sys.stderr.flush()
21234 - try:
21235 - metadatafile = open(_unicode_encode(metadatafilename,
21236 - encoding=_encodings['fs'], errors='strict'), 'wb')
21237 - pickle.dump(metadata, metadatafile, protocol=2)
21238 - metadatafile.close()
21239 - except SystemExit as e:
21240 - raise
21241 - except Exception as e:
21242 - sys.stderr.write(_("!!! Failed to write binary metadata to disk!\n"))
21243 - sys.stderr.write("!!! %s\n" % str(e))
21244 - sys.stderr.flush()
21245 - break
21246 - # We may have metadata... now we run through the tbz2 list and check.
21247 -
21248 - class CacheStats:
21249 - from time import time
21250 - def __init__(self, out):
21251 - self.misses = 0
21252 - self.hits = 0
21253 - self.last_update = 0
21254 - self.out = out
21255 - self.min_display_latency = 0.2
21256 - def update(self):
21257 - cur_time = self.time()
21258 - if cur_time - self.last_update >= self.min_display_latency:
21259 - self.last_update = cur_time
21260 - self.display()
21261 - def display(self):
21262 - self.out.write("\r"+colorize("WARN",
21263 - _("cache miss: '") + str(self.misses) + "'") + \
21264 - " --- " + colorize("GOOD", _("cache hit: '") + str(self.hits) + "'"))
21265 - self.out.flush()
21266 -
21267 - cache_stats = CacheStats(out)
21268 - have_tty = os.environ.get('TERM') != 'dumb' and out.isatty()
21269 - if have_tty:
21270 - cache_stats.display()
21271 - binpkg_filenames = set()
21272 - for x in tbz2list:
21273 - x = os.path.basename(x)
21274 - binpkg_filenames.add(x)
21275 - if x not in metadata[baseurl]["data"]:
21276 - cache_stats.misses += 1
21277 - if have_tty:
21278 - cache_stats.update()
21279 - metadata[baseurl]["modified"] = 1
21280 - myid = None
21281 - for _x in range(3):
21282 - try:
21283 - myid = file_get_metadata(
21284 - "/".join((baseurl.rstrip("/"), x.lstrip("/"))),
21285 - conn, chunk_size)
21286 - break
21287 - except http_client_BadStatusLine:
21288 - # Sometimes this error is thrown from conn.getresponse() in
21289 - # make_http_request(). The docstring for this error in
21290 - # httplib.py says "Presumably, the server closed the
21291 - # connection before sending a valid response".
21292 - conn = create_conn(baseurl)[0]
21293 - except http_client_ResponseNotReady:
21294 - # With some http servers this error is known to be thrown
21295 - # from conn.getresponse() in make_http_request() when the
21296 - # remote file does not have appropriate read permissions.
21297 - # Maybe it's possible to recover from this exception in
21298 - # cases though, so retry.
21299 - conn = create_conn(baseurl)[0]
21300 -
21301 - if myid and myid[0]:
21302 - metadata[baseurl]["data"][x] = make_metadata_dict(myid)
21303 - elif verbose:
21304 - sys.stderr.write(colorize("BAD",
21305 - _("!!! Failed to retrieve metadata on: ")) + str(x) + "\n")
21306 - sys.stderr.flush()
21307 - else:
21308 - cache_stats.hits += 1
21309 - if have_tty:
21310 - cache_stats.update()
21311 - cache_stats.display()
21312 - # Cleanse stale cache for files that don't exist on the server anymore.
21313 - stale_cache = set(metadata[baseurl]["data"]).difference(binpkg_filenames)
21314 - if stale_cache:
21315 - for x in stale_cache:
21316 - del metadata[baseurl]["data"][x]
21317 - metadata[baseurl]["modified"] = 1
21318 - del stale_cache
21319 - del binpkg_filenames
21320 - out.write("\n")
21321 - out.flush()
21322 -
21323 - try:
21324 - if "modified" in metadata[baseurl] and metadata[baseurl]["modified"]:
21325 - metadata[baseurl]["timestamp"] = int(time.time())
21326 - metadatafile = open(_unicode_encode(metadatafilename,
21327 - encoding=_encodings['fs'], errors='strict'), 'wb')
21328 - pickle.dump(metadata, metadatafile, protocol=2)
21329 - metadatafile.close()
21330 - if makepickle:
21331 - metadatafile = open(_unicode_encode(makepickle,
21332 - encoding=_encodings['fs'], errors='strict'), 'wb')
21333 - pickle.dump(metadata[baseurl]["data"], metadatafile, protocol=2)
21334 - metadatafile.close()
21335 - except SystemExit as e:
21336 - raise
21337 - except Exception as e:
21338 - sys.stderr.write(_("!!! Failed to write binary metadata to disk!\n"))
21339 - sys.stderr.write("!!! "+str(e)+"\n")
21340 - sys.stderr.flush()
21341 -
21342 - if not keepconnection:
21343 - conn.close()
21344 -
21345 - return metadata[baseurl]["data"]
21346 + """Takes a base url to connect to and read from.
21347 + URI should be in the form <proto>://<site>[:port]<path>
21348 + Connection is used for persistent connection instances."""
21349 +
21350 + warnings.warn(
21351 + "portage.getbinpkg.file_get_lib() is deprecated",
21352 + DeprecationWarning,
21353 + stacklevel=2,
21354 + )
21355 +
21356 + if not conn:
21357 + keepconnection = 0
21358 + else:
21359 + keepconnection = 1
21360 +
21361 + conn, protocol, address, params, headers = create_conn(baseurl, conn)
21362 +
21363 + sys.stderr.write("Fetching '" + str(os.path.basename(address)) + "'\n")
21364 + if protocol in ["http", "https"]:
21365 + data, rc, _msg = make_http_request(conn, address, params, headers, dest=dest)
21366 + elif protocol in ["ftp"]:
21367 + data, rc, _msg = make_ftp_request(conn, address, dest=dest)
21368 + elif protocol == "sftp":
21369 + rc = 0
21370 + try:
21371 + f = conn.open(address)
21372 + except SystemExit:
21373 + raise
21374 + except Exception:
21375 + rc = 1
21376 + else:
21377 + try:
21378 + if dest:
21379 + bufsize = 8192
21380 + while True:
21381 + data = f.read(bufsize)
21382 + if not data:
21383 + break
21384 + dest.write(data)
21385 + finally:
21386 + f.close()
21387 + else:
21388 + raise TypeError(_("Unknown protocol. '%s'") % protocol)
21389 +
21390 + if not keepconnection:
21391 + conn.close()
21392 +
21393 + return rc
21394 +
21395 +
21396 + def dir_get_metadata(
21397 + baseurl, conn=None, chunk_size=3000, verbose=1, usingcache=1, makepickle=None
21398 + ):
21399 +
21400 + warnings.warn(
21401 + "portage.getbinpkg.dir_get_metadata() is deprecated",
21402 + DeprecationWarning,
21403 + stacklevel=2,
21404 + )
21405 +
21406 + if not conn:
21407 + keepconnection = 0
21408 + else:
21409 + keepconnection = 1
21410 +
21411 - cache_path = "/var/cache/edb"
21412 ++ # PREFIX LOCAL
21413 ++ cache_path = CACHE_PATH
21414 + metadatafilename = os.path.join(cache_path, "remote_metadata.pickle")
21415 +
21416 + if makepickle is None:
21417 - makepickle = "/var/cache/edb/metadata.idx.most_recent"
21418 ++ # PREFIX LOCAL
21419 ++ makepickle = CACHE_PATH + "/metadata.idx.most_recent"
21420 +
21421 + try:
21422 + conn = create_conn(baseurl, conn)[0]
21423 + except _all_errors as e:
21424 + # ftplib.FTP(host) can raise errors like this:
21425 + # socket.error: (111, 'Connection refused')
21426 + sys.stderr.write("!!! %s\n" % (e,))
21427 + return {}
21428 +
21429 + out = sys.stdout
21430 + try:
21431 + metadatafile = open(
21432 + _unicode_encode(
21433 + metadatafilename, encoding=_encodings["fs"], errors="strict"
21434 + ),
21435 + "rb",
21436 + )
21437 + mypickle = pickle.Unpickler(metadatafile)
21438 + try:
21439 + mypickle.find_global = None
21440 + except AttributeError:
21441 + # TODO: If py3k, override Unpickler.find_class().
21442 + pass
21443 + metadata = mypickle.load()
21444 + out.write(_("Loaded metadata pickle.\n"))
21445 + out.flush()
21446 + metadatafile.close()
21447 + except (SystemExit, KeyboardInterrupt):
21448 + raise
21449 + except Exception:
21450 + metadata = {}
21451 + if baseurl not in metadata:
21452 + metadata[baseurl] = {}
21453 + if "indexname" not in metadata[baseurl]:
21454 + metadata[baseurl]["indexname"] = ""
21455 + if "timestamp" not in metadata[baseurl]:
21456 + metadata[baseurl]["timestamp"] = 0
21457 + if "unmodified" not in metadata[baseurl]:
21458 + metadata[baseurl]["unmodified"] = 0
21459 + if "data" not in metadata[baseurl]:
21460 + metadata[baseurl]["data"] = {}
21461 +
21462 + if not os.access(cache_path, os.W_OK):
21463 + sys.stderr.write(_("!!! Unable to write binary metadata to disk!\n"))
21464 + sys.stderr.write(_("!!! Permission denied: '%s'\n") % cache_path)
21465 + return metadata[baseurl]["data"]
21466 +
21467 + import portage.exception
21468 +
21469 + try:
21470 + filelist = dir_get_list(baseurl, conn)
21471 + except portage.exception.PortageException as e:
21472 + sys.stderr.write(
21473 + _("!!! Error connecting to '%s'.\n") % _hide_url_passwd(baseurl)
21474 + )
21475 + sys.stderr.write("!!! %s\n" % str(e))
21476 + del e
21477 + return metadata[baseurl]["data"]
21478 + tbz2list = match_in_array(filelist, suffix=".tbz2")
21479 + metalist = match_in_array(filelist, prefix="metadata.idx")
21480 + del filelist
21481 +
21482 + # Determine if our metadata file is current.
21483 + metalist.sort()
21484 + metalist.reverse() # makes the order new-to-old.
21485 + for mfile in metalist:
21486 + if usingcache and (
21487 + (metadata[baseurl]["indexname"] != mfile)
21488 + or (metadata[baseurl]["timestamp"] < int(time.time() - (60 * 60 * 24)))
21489 + ):
21490 + # Try to download new cache until we succeed on one.
21491 + data = ""
21492 + for trynum in [1, 2, 3]:
21493 + mytempfile = tempfile.TemporaryFile()
21494 + try:
21495 + file_get(baseurl + "/" + mfile, mytempfile, conn)
21496 + if mytempfile.tell() > len(data):
21497 + mytempfile.seek(0)
21498 + data = mytempfile.read()
21499 + except ValueError as e:
21500 + sys.stderr.write("--- %s\n" % str(e))
21501 + if trynum < 3:
21502 + sys.stderr.write(_("Retrying...\n"))
21503 + sys.stderr.flush()
21504 + mytempfile.close()
21505 + continue
21506 + if match_in_array([mfile], suffix=".gz"):
21507 + out.write("gzip'd\n")
21508 + out.flush()
21509 + try:
21510 + import gzip
21511 +
21512 + mytempfile.seek(0)
21513 + gzindex = gzip.GzipFile(mfile[:-3], "rb", 9, mytempfile)
21514 + data = gzindex.read()
21515 + except SystemExit as e:
21516 + raise
21517 + except Exception as e:
21518 + mytempfile.close()
21519 + sys.stderr.write(_("!!! Failed to use gzip: ") + str(e) + "\n")
21520 + sys.stderr.flush()
21521 + mytempfile.close()
21522 + try:
21523 + metadata[baseurl]["data"] = pickle.loads(data)
21524 + del data
21525 + metadata[baseurl]["indexname"] = mfile
21526 + metadata[baseurl]["timestamp"] = int(time.time())
21527 + metadata[baseurl]["modified"] = 0 # It's not, right after download.
21528 + out.write(_("Pickle loaded.\n"))
21529 + out.flush()
21530 + break
21531 + except SystemExit as e:
21532 + raise
21533 + except Exception as e:
21534 + sys.stderr.write(
21535 + _("!!! Failed to read data from index: ") + str(mfile) + "\n"
21536 + )
21537 + sys.stderr.write("!!! %s" % str(e))
21538 + sys.stderr.flush()
21539 + try:
21540 + metadatafile = open(
21541 + _unicode_encode(
21542 + metadatafilename, encoding=_encodings["fs"], errors="strict"
21543 + ),
21544 + "wb",
21545 + )
21546 + pickle.dump(metadata, metadatafile, protocol=2)
21547 + metadatafile.close()
21548 + except SystemExit as e:
21549 + raise
21550 + except Exception as e:
21551 + sys.stderr.write(_("!!! Failed to write binary metadata to disk!\n"))
21552 + sys.stderr.write("!!! %s\n" % str(e))
21553 + sys.stderr.flush()
21554 + break
21555 + # We may have metadata... now we run through the tbz2 list and check.
21556 +
21557 + class CacheStats:
21558 + from time import time
21559 +
21560 + def __init__(self, out):
21561 + self.misses = 0
21562 + self.hits = 0
21563 + self.last_update = 0
21564 + self.out = out
21565 + self.min_display_latency = 0.2
21566 +
21567 + def update(self):
21568 + cur_time = self.time()
21569 + if cur_time - self.last_update >= self.min_display_latency:
21570 + self.last_update = cur_time
21571 + self.display()
21572 +
21573 + def display(self):
21574 + self.out.write(
21575 + "\r"
21576 + + colorize("WARN", _("cache miss: '") + str(self.misses) + "'")
21577 + + " --- "
21578 + + colorize("GOOD", _("cache hit: '") + str(self.hits) + "'")
21579 + )
21580 + self.out.flush()
21581 +
21582 + cache_stats = CacheStats(out)
21583 + have_tty = os.environ.get("TERM") != "dumb" and out.isatty()
21584 + if have_tty:
21585 + cache_stats.display()
21586 + binpkg_filenames = set()
21587 + for x in tbz2list:
21588 + x = os.path.basename(x)
21589 + binpkg_filenames.add(x)
21590 + if x not in metadata[baseurl]["data"]:
21591 + cache_stats.misses += 1
21592 + if have_tty:
21593 + cache_stats.update()
21594 + metadata[baseurl]["modified"] = 1
21595 + myid = None
21596 + for _x in range(3):
21597 + try:
21598 + myid = file_get_metadata(
21599 + "/".join((baseurl.rstrip("/"), x.lstrip("/"))), conn, chunk_size
21600 + )
21601 + break
21602 + except http_client_BadStatusLine:
21603 + # Sometimes this error is thrown from conn.getresponse() in
21604 + # make_http_request(). The docstring for this error in
21605 + # httplib.py says "Presumably, the server closed the
21606 + # connection before sending a valid response".
21607 + conn = create_conn(baseurl)[0]
21608 + except http_client_ResponseNotReady:
21609 + # With some http servers this error is known to be thrown
21610 + # from conn.getresponse() in make_http_request() when the
21611 + # remote file does not have appropriate read permissions.
21612 + # Maybe it's possible to recover from this exception in
21613 + # cases though, so retry.
21614 + conn = create_conn(baseurl)[0]
21615 +
21616 + if myid and myid[0]:
21617 + metadata[baseurl]["data"][x] = make_metadata_dict(myid)
21618 + elif verbose:
21619 + sys.stderr.write(
21620 + colorize("BAD", _("!!! Failed to retrieve metadata on: "))
21621 + + str(x)
21622 + + "\n"
21623 + )
21624 + sys.stderr.flush()
21625 + else:
21626 + cache_stats.hits += 1
21627 + if have_tty:
21628 + cache_stats.update()
21629 + cache_stats.display()
21630 + # Cleanse stale cache for files that don't exist on the server anymore.
21631 + stale_cache = set(metadata[baseurl]["data"]).difference(binpkg_filenames)
21632 + if stale_cache:
21633 + for x in stale_cache:
21634 + del metadata[baseurl]["data"][x]
21635 + metadata[baseurl]["modified"] = 1
21636 + del stale_cache
21637 + del binpkg_filenames
21638 + out.write("\n")
21639 + out.flush()
21640 +
21641 + try:
21642 + if "modified" in metadata[baseurl] and metadata[baseurl]["modified"]:
21643 + metadata[baseurl]["timestamp"] = int(time.time())
21644 + metadatafile = open(
21645 + _unicode_encode(
21646 + metadatafilename, encoding=_encodings["fs"], errors="strict"
21647 + ),
21648 + "wb",
21649 + )
21650 + pickle.dump(metadata, metadatafile, protocol=2)
21651 + metadatafile.close()
21652 + if makepickle:
21653 + metadatafile = open(
21654 + _unicode_encode(makepickle, encoding=_encodings["fs"], errors="strict"),
21655 + "wb",
21656 + )
21657 + pickle.dump(metadata[baseurl]["data"], metadatafile, protocol=2)
21658 + metadatafile.close()
21659 + except SystemExit as e:
21660 + raise
21661 + except Exception as e:
21662 + sys.stderr.write(_("!!! Failed to write binary metadata to disk!\n"))
21663 + sys.stderr.write("!!! " + str(e) + "\n")
21664 + sys.stderr.flush()
21665 +
21666 + if not keepconnection:
21667 + conn.close()
21668 +
21669 + return metadata[baseurl]["data"]
21670 +
21671
21672 def _cmp_cpv(d1, d2):
21673 - cpv1 = d1["CPV"]
21674 - cpv2 = d2["CPV"]
21675 - if cpv1 > cpv2:
21676 - return 1
21677 - if cpv1 == cpv2:
21678 - return 0
21679 - return -1
21680 + cpv1 = d1["CPV"]
21681 + cpv2 = d2["CPV"]
21682 + if cpv1 > cpv2:
21683 + return 1
21684 + if cpv1 == cpv2:
21685 + return 0
21686 + return -1
21687
21688 - class PackageIndex:
21689
21690 - def __init__(self,
21691 - allowed_pkg_keys=None,
21692 - default_header_data=None,
21693 - default_pkg_data=None,
21694 - inherited_keys=None,
21695 - translated_keys=None):
21696 -
21697 - self._pkg_slot_dict = None
21698 - if allowed_pkg_keys is not None:
21699 - self._pkg_slot_dict = slot_dict_class(allowed_pkg_keys)
21700 -
21701 - self._default_header_data = default_header_data
21702 - self._default_pkg_data = default_pkg_data
21703 - self._inherited_keys = inherited_keys
21704 - self._write_translation_map = {}
21705 - self._read_translation_map = {}
21706 - if translated_keys:
21707 - self._write_translation_map.update(translated_keys)
21708 - self._read_translation_map.update(((y, x) for (x, y) in translated_keys))
21709 - self.header = {}
21710 - if self._default_header_data:
21711 - self.header.update(self._default_header_data)
21712 - self.packages = []
21713 - self.modified = True
21714 -
21715 - def _readpkgindex(self, pkgfile, pkg_entry=True):
21716 -
21717 - allowed_keys = None
21718 - if self._pkg_slot_dict is None or not pkg_entry:
21719 - d = {}
21720 - else:
21721 - d = self._pkg_slot_dict()
21722 - allowed_keys = d.allowed_keys
21723 -
21724 - for line in pkgfile:
21725 - line = line.rstrip("\n")
21726 - if not line:
21727 - break
21728 - line = line.split(":", 1)
21729 - if not len(line) == 2:
21730 - continue
21731 - k, v = line
21732 - if v:
21733 - v = v[1:]
21734 - k = self._read_translation_map.get(k, k)
21735 - if allowed_keys is not None and \
21736 - k not in allowed_keys:
21737 - continue
21738 - d[k] = v
21739 - return d
21740 -
21741 - def _writepkgindex(self, pkgfile, items):
21742 - for k, v in items:
21743 - pkgfile.write("%s: %s\n" % \
21744 - (self._write_translation_map.get(k, k), v))
21745 - pkgfile.write("\n")
21746 -
21747 - def read(self, pkgfile):
21748 - self.readHeader(pkgfile)
21749 - self.readBody(pkgfile)
21750 -
21751 - def readHeader(self, pkgfile):
21752 - self.header.update(self._readpkgindex(pkgfile, pkg_entry=False))
21753 -
21754 - def readBody(self, pkgfile):
21755 - while True:
21756 - d = self._readpkgindex(pkgfile)
21757 - if not d:
21758 - break
21759 - mycpv = d.get("CPV")
21760 - if not mycpv:
21761 - continue
21762 - if self._default_pkg_data:
21763 - for k, v in self._default_pkg_data.items():
21764 - d.setdefault(k, v)
21765 - if self._inherited_keys:
21766 - for k in self._inherited_keys:
21767 - v = self.header.get(k)
21768 - if v is not None:
21769 - d.setdefault(k, v)
21770 - self.packages.append(d)
21771 -
21772 - def write(self, pkgfile):
21773 - if self.modified:
21774 - self.header["TIMESTAMP"] = str(int(time.time()))
21775 - self.header["PACKAGES"] = str(len(self.packages))
21776 - keys = list(self.header)
21777 - keys.sort()
21778 - self._writepkgindex(pkgfile, [(k, self.header[k]) \
21779 - for k in keys if self.header[k]])
21780 - for metadata in sorted(self.packages,
21781 - key=portage.util.cmp_sort_key(_cmp_cpv)):
21782 - metadata = metadata.copy()
21783 - if self._inherited_keys:
21784 - for k in self._inherited_keys:
21785 - v = self.header.get(k)
21786 - if v is not None and v == metadata.get(k):
21787 - del metadata[k]
21788 - if self._default_pkg_data:
21789 - for k, v in self._default_pkg_data.items():
21790 - if metadata.get(k) == v:
21791 - metadata.pop(k, None)
21792 - keys = list(metadata)
21793 - keys.sort()
21794 - self._writepkgindex(pkgfile,
21795 - [(k, metadata[k]) for k in keys if metadata[k]])
21796 + class PackageIndex:
21797 + def __init__(
21798 + self,
21799 + allowed_pkg_keys=None,
21800 + default_header_data=None,
21801 + default_pkg_data=None,
21802 + inherited_keys=None,
21803 + translated_keys=None,
21804 + ):
21805 +
21806 + self._pkg_slot_dict = None
21807 + if allowed_pkg_keys is not None:
21808 + self._pkg_slot_dict = slot_dict_class(allowed_pkg_keys)
21809 +
21810 + self._default_header_data = default_header_data
21811 + self._default_pkg_data = default_pkg_data
21812 + self._inherited_keys = inherited_keys
21813 + self._write_translation_map = {}
21814 + self._read_translation_map = {}
21815 + if translated_keys:
21816 + self._write_translation_map.update(translated_keys)
21817 + self._read_translation_map.update(((y, x) for (x, y) in translated_keys))
21818 + self.header = {}
21819 + if self._default_header_data:
21820 + self.header.update(self._default_header_data)
21821 + self.packages = []
21822 + self.modified = True
21823 +
21824 + def _readpkgindex(self, pkgfile, pkg_entry=True):
21825 +
21826 + allowed_keys = None
21827 + if self._pkg_slot_dict is None or not pkg_entry:
21828 + d = {}
21829 + else:
21830 + d = self._pkg_slot_dict()
21831 + allowed_keys = d.allowed_keys
21832 +
21833 + for line in pkgfile:
21834 + line = line.rstrip("\n")
21835 + if not line:
21836 + break
21837 + line = line.split(":", 1)
21838 + if not len(line) == 2:
21839 + continue
21840 + k, v = line
21841 + if v:
21842 + v = v[1:]
21843 + k = self._read_translation_map.get(k, k)
21844 + if allowed_keys is not None and k not in allowed_keys:
21845 + continue
21846 + d[k] = v
21847 + return d
21848 +
21849 + def _writepkgindex(self, pkgfile, items):
21850 + for k, v in items:
21851 + pkgfile.write("%s: %s\n" % (self._write_translation_map.get(k, k), v))
21852 + pkgfile.write("\n")
21853 +
21854 + def read(self, pkgfile):
21855 + self.readHeader(pkgfile)
21856 + self.readBody(pkgfile)
21857 +
21858 + def readHeader(self, pkgfile):
21859 + self.header.update(self._readpkgindex(pkgfile, pkg_entry=False))
21860 +
21861 + def readBody(self, pkgfile):
21862 + while True:
21863 + d = self._readpkgindex(pkgfile)
21864 + if not d:
21865 + break
21866 + mycpv = d.get("CPV")
21867 + if not mycpv:
21868 + continue
21869 + if self._default_pkg_data:
21870 + for k, v in self._default_pkg_data.items():
21871 + d.setdefault(k, v)
21872 + if self._inherited_keys:
21873 + for k in self._inherited_keys:
21874 + v = self.header.get(k)
21875 + if v is not None:
21876 + d.setdefault(k, v)
21877 + self.packages.append(d)
21878 +
21879 + def write(self, pkgfile):
21880 + if self.modified:
21881 + self.header["TIMESTAMP"] = str(int(time.time()))
21882 + self.header["PACKAGES"] = str(len(self.packages))
21883 + keys = list(self.header)
21884 + keys.sort()
21885 + self._writepkgindex(
21886 + pkgfile, [(k, self.header[k]) for k in keys if self.header[k]]
21887 + )
21888 + for metadata in sorted(self.packages, key=portage.util.cmp_sort_key(_cmp_cpv)):
21889 + metadata = metadata.copy()
21890 + if self._inherited_keys:
21891 + for k in self._inherited_keys:
21892 + v = self.header.get(k)
21893 + if v is not None and v == metadata.get(k):
21894 + del metadata[k]
21895 + if self._default_pkg_data:
21896 + for k, v in self._default_pkg_data.items():
21897 + if metadata.get(k) == v:
21898 + metadata.pop(k, None)
21899 + keys = list(metadata)
21900 + keys.sort()
21901 + self._writepkgindex(
21902 + pkgfile, [(k, metadata[k]) for k in keys if metadata[k]]
21903 + )
21904 diff --cc lib/portage/package/ebuild/_config/special_env_vars.py
21905 index 682bea62b,06ae3aa39..9331bf451
21906 --- a/lib/portage/package/ebuild/_config/special_env_vars.py
21907 +++ b/lib/portage/package/ebuild/_config/special_env_vars.py
21908 @@@ -40,49 -83,109 +83,114 @@@ environ_whitelist = [
21909 # environment in order to prevent sandbox from sourcing /etc/profile
21910 # in it's bashrc (causing major leakage).
21911 environ_whitelist += [
21912 - "ACCEPT_LICENSE", "BASH_ENV", "BASH_FUNC____in_portage_iuse%%",
21913 - "BROOT", "BUILD_PREFIX", "COLUMNS", "D",
21914 - "DISTDIR", "DOC_SYMLINKS_DIR", "EAPI", "EBUILD",
21915 - "EBUILD_FORCE_TEST",
21916 - "EBUILD_PHASE", "EBUILD_PHASE_FUNC", "ECLASSDIR", "ECLASS_DEPTH", "ED",
21917 - "EMERGE_FROM", "ENV_UNSET", "EPREFIX", "EROOT", "ESYSROOT",
21918 - "FEATURES", "FILESDIR", "HOME", "MERGE_TYPE", "NOCOLOR", "PATH",
21919 - "PKGDIR",
21920 - "PKGUSE", "PKG_LOGDIR", "PKG_TMPDIR",
21921 - "PORTAGE_ACTUAL_DISTDIR", "PORTAGE_ARCHLIST", "PORTAGE_BASHRC_FILES",
21922 - "PORTAGE_BASHRC", "PM_EBUILD_HOOK_DIR",
21923 - "PORTAGE_BINPKG_FILE", "PORTAGE_BINPKG_TAR_OPTS",
21924 - "PORTAGE_BINPKG_TMPFILE",
21925 - "PORTAGE_BIN_PATH",
21926 - "PORTAGE_BUILDDIR", "PORTAGE_BUILD_GROUP", "PORTAGE_BUILD_USER",
21927 - "PORTAGE_BUNZIP2_COMMAND", "PORTAGE_BZIP2_COMMAND",
21928 - "PORTAGE_COLORMAP", "PORTAGE_COMPRESS", "PORTAGE_COMPRESSION_COMMAND",
21929 - "PORTAGE_COMPRESS_EXCLUDE_SUFFIXES",
21930 - "PORTAGE_CONFIGROOT", "PORTAGE_DEBUG", "PORTAGE_DEPCACHEDIR",
21931 - "PORTAGE_DOHTML_UNWARNED_SKIPPED_EXTENSIONS",
21932 - "PORTAGE_DOHTML_UNWARNED_SKIPPED_FILES",
21933 - "PORTAGE_DOHTML_WARN_ON_SKIPPED_FILES",
21934 - "PORTAGE_EBUILD_EXIT_FILE", "PORTAGE_FEATURES",
21935 - "PORTAGE_GID", "PORTAGE_GRPNAME",
21936 - "PORTAGE_INTERNAL_CALLER",
21937 - "PORTAGE_INST_GID", "PORTAGE_INST_UID",
21938 - "PORTAGE_IPC_DAEMON", "PORTAGE_IUSE", "PORTAGE_ECLASS_LOCATIONS",
21939 - "PORTAGE_LOG_FILE", "PORTAGE_OVERRIDE_EPREFIX", "PORTAGE_PIPE_FD",
21940 - "PORTAGE_PROPERTIES",
21941 - "PORTAGE_PYM_PATH", "PORTAGE_PYTHON",
21942 - "PORTAGE_PYTHONPATH", "PORTAGE_QUIET",
21943 - "PORTAGE_REPO_NAME", "PORTAGE_REPOSITORIES", "PORTAGE_RESTRICT",
21944 - "PORTAGE_SIGPIPE_STATUS", "PORTAGE_SOCKS5_PROXY",
21945 - "PORTAGE_TMPDIR", "PORTAGE_UPDATE_ENV", "PORTAGE_USERNAME",
21946 - "PORTAGE_VERBOSE", "PORTAGE_WORKDIR_MODE", "PORTAGE_XATTR_EXCLUDE",
21947 - "PORTDIR", "PORTDIR_OVERLAY", "PREROOTPATH", "PYTHONDONTWRITEBYTECODE",
21948 - "REPLACING_VERSIONS", "REPLACED_BY_VERSION",
21949 - "ROOT", "ROOTPATH", "SANDBOX_LOG", "SYSROOT", "T", "TMP", "TMPDIR",
21950 - "USE_EXPAND", "USE_ORDER", "WORKDIR",
21951 - "XARGS", "__PORTAGE_TEST_HARDLINK_LOCKS",
21952 - # PREFIX LOCAL
21953 - "EXTRA_PATH", "PORTAGE_GROUP", "PORTAGE_USER",
21954 - # END PREFIX LOCAL
21955 + "ACCEPT_LICENSE",
21956 + "BASH_ENV",
21957 + "BASH_FUNC____in_portage_iuse%%",
21958 + "BROOT",
21959 + "BUILD_PREFIX",
21960 + "COLUMNS",
21961 + "D",
21962 + "DISTDIR",
21963 + "DOC_SYMLINKS_DIR",
21964 + "EAPI",
21965 + "EBUILD",
21966 + "EBUILD_FORCE_TEST",
21967 + "EBUILD_PHASE",
21968 + "EBUILD_PHASE_FUNC",
21969 + "ECLASSDIR",
21970 + "ECLASS_DEPTH",
21971 + "ED",
21972 + "EMERGE_FROM",
21973 + "ENV_UNSET",
21974 + "EPREFIX",
21975 + "EROOT",
21976 + "ESYSROOT",
21977 + "FEATURES",
21978 + "FILESDIR",
21979 + "HOME",
21980 + "MERGE_TYPE",
21981 + "NOCOLOR",
21982 + "PATH",
21983 + "PKGDIR",
21984 + "PKGUSE",
21985 + "PKG_LOGDIR",
21986 + "PKG_TMPDIR",
21987 + "PORTAGE_ACTUAL_DISTDIR",
21988 + "PORTAGE_ARCHLIST",
21989 + "PORTAGE_BASHRC_FILES",
21990 + "PORTAGE_BASHRC",
21991 + "PM_EBUILD_HOOK_DIR",
21992 + "PORTAGE_BINPKG_FILE",
21993 + "PORTAGE_BINPKG_TAR_OPTS",
21994 + "PORTAGE_BINPKG_TMPFILE",
21995 + "PORTAGE_BIN_PATH",
21996 + "PORTAGE_BUILDDIR",
21997 + "PORTAGE_BUILD_GROUP",
21998 + "PORTAGE_BUILD_USER",
21999 + "PORTAGE_BUNZIP2_COMMAND",
22000 + "PORTAGE_BZIP2_COMMAND",
22001 + "PORTAGE_COLORMAP",
22002 + "PORTAGE_COMPRESS",
22003 + "PORTAGE_COMPRESSION_COMMAND",
22004 + "PORTAGE_COMPRESS_EXCLUDE_SUFFIXES",
22005 + "PORTAGE_CONFIGROOT",
22006 + "PORTAGE_DEBUG",
22007 + "PORTAGE_DEPCACHEDIR",
22008 + "PORTAGE_DOHTML_UNWARNED_SKIPPED_EXTENSIONS",
22009 + "PORTAGE_DOHTML_UNWARNED_SKIPPED_FILES",
22010 + "PORTAGE_DOHTML_WARN_ON_SKIPPED_FILES",
22011 + "PORTAGE_EBUILD_EXIT_FILE",
22012 + "PORTAGE_FEATURES",
22013 + "PORTAGE_GID",
22014 + "PORTAGE_GRPNAME",
22015 + "PORTAGE_INTERNAL_CALLER",
22016 + "PORTAGE_INST_GID",
22017 + "PORTAGE_INST_UID",
22018 + "PORTAGE_IPC_DAEMON",
22019 + "PORTAGE_IUSE",
22020 + "PORTAGE_ECLASS_LOCATIONS",
22021 + "PORTAGE_LOG_FILE",
22022 + "PORTAGE_OVERRIDE_EPREFIX",
22023 + "PORTAGE_PIPE_FD",
22024 + "PORTAGE_PROPERTIES",
22025 + "PORTAGE_PYM_PATH",
22026 + "PORTAGE_PYTHON",
22027 + "PORTAGE_PYTHONPATH",
22028 + "PORTAGE_QUIET",
22029 + "PORTAGE_REPO_NAME",
22030 + "PORTAGE_REPOSITORIES",
22031 + "PORTAGE_RESTRICT",
22032 + "PORTAGE_SIGPIPE_STATUS",
22033 + "PORTAGE_SOCKS5_PROXY",
22034 + "PORTAGE_TMPDIR",
22035 + "PORTAGE_UPDATE_ENV",
22036 + "PORTAGE_USERNAME",
22037 + "PORTAGE_VERBOSE",
22038 + "PORTAGE_WORKDIR_MODE",
22039 + "PORTAGE_XATTR_EXCLUDE",
22040 + "PORTDIR",
22041 + "PORTDIR_OVERLAY",
22042 + "PREROOTPATH",
22043 + "PYTHONDONTWRITEBYTECODE",
22044 + "REPLACING_VERSIONS",
22045 + "REPLACED_BY_VERSION",
22046 + "ROOT",
22047 + "ROOTPATH",
22048 + "SANDBOX_LOG",
22049 + "SYSROOT",
22050 + "T",
22051 + "TMP",
22052 + "TMPDIR",
22053 + "USE_EXPAND",
22054 + "USE_ORDER",
22055 + "WORKDIR",
22056 + "XARGS",
22057 + "__PORTAGE_TEST_HARDLINK_LOCKS",
22058 ++ # BEGIN PREFIX LOCAL
22059 ++ "EXTRA_PATH",
22060 ++ "PORTAGE_GROUP",
22061 ++ "PORTAGE_USER",
22062 ++ # END PREFIX LOCAL
22063 ]
22064
22065 # user config variables
22066 @@@ -115,13 -232,15 +237,18 @@@ environ_whitelist +=
22067 ]
22068
22069 # other variables inherited from the calling environment
22070 +# UNIXMODE is necessary for MiNT
22071 environ_whitelist += [
22072 - "CVS_RSH", "ECHANGELOG_USER",
22073 - "GPG_AGENT_INFO",
22074 - "SSH_AGENT_PID", "SSH_AUTH_SOCK",
22075 - "STY", "WINDOW", "XAUTHORITY",
22076 - "UNIXMODE",
22077 + "CVS_RSH",
22078 + "ECHANGELOG_USER",
22079 + "GPG_AGENT_INFO",
22080 + "SSH_AGENT_PID",
22081 + "SSH_AUTH_SOCK",
22082 + "STY",
22083 + "WINDOW",
22084 + "XAUTHORITY",
22085 ++ # PREFIX LOCAL
22086 ++ "UNIXMODE",
22087 ]
22088
22089 environ_whitelist = frozenset(environ_whitelist)
22090 @@@ -141,11 -265,9 +273,22 @@@ environ_filter +=
22091
22092 # misc variables inherited from the calling environment
22093 environ_filter += [
22094 - "INFOPATH", "MANPATH", "USER",
22095 - "HOST", "GROUP", "LOGNAME", "MAIL", "REMOTEHOST",
22096 - "SECURITYSESSIONID",
22097 - "TERMINFO", "TERM_PROGRAM", "TERM_PROGRAM_VERSION",
22098 - "VENDOR", "__CF_USER_TEXT_ENCODING",
22099 + "INFOPATH",
22100 + "MANPATH",
22101 + "USER",
22102 ++ # BEGIN PREFIX LOCAL
22103 ++ "HOST",
22104 ++ "GROUP",
22105 ++ "LOGNAME",
22106 ++ "MAIL",
22107 ++ "REMOTEHOST",
22108 ++ "SECURITYSESSIONID",
22109 ++ "TERMINFO",
22110 ++ "TERM_PROGRAM",
22111 ++ "TERM_PROGRAM_VERSION",
22112 ++ "VENDOR",
22113 ++ "__CF_USER_TEXT_ENCODING",
22114 ++ # END PREFIX LOCAL
22115 ]
22116
22117 # variables that break bash
22118 diff --cc lib/portage/package/ebuild/config.py
22119 index 59972af76,b4d6862a3..625a1be49
22120 --- a/lib/portage/package/ebuild/config.py
22121 +++ b/lib/portage/package/ebuild/config.py
22122 @@@ -41,15 -64,28 +64,28 @@@ from portage.env.loaders import KeyValu
22123 from portage.exception import InvalidDependString, PortageException
22124 from portage.localization import _
22125 from portage.output import colorize
22126 -from portage.process import fakeroot_capable, sandbox_capable
22127 +from portage.process import fakeroot_capable, sandbox_capable, macossandbox_capable
22128 from portage.repository.config import (
22129 - allow_profile_repo_deps,
22130 - load_repository_config,
22131 + allow_profile_repo_deps,
22132 + load_repository_config,
22133 + )
22134 + from portage.util import (
22135 + ensure_dirs,
22136 + getconfig,
22137 + grabdict,
22138 + grabdict_package,
22139 + grabfile,
22140 + grabfile_package,
22141 + LazyItemsDict,
22142 + normalize_path,
22143 + shlex_split,
22144 + stack_dictlist,
22145 + stack_dicts,
22146 + stack_lists,
22147 + writemsg,
22148 + writemsg_level,
22149 + _eapi_cache,
22150 )
22151 - from portage.util import ensure_dirs, getconfig, grabdict, \
22152 - grabdict_package, grabfile, grabfile_package, LazyItemsDict, \
22153 - normalize_path, shlex_split, stack_dictlist, stack_dicts, stack_lists, \
22154 - writemsg, writemsg_level, _eapi_cache
22155 from portage.util.install_mask import _raise_exc
22156 from portage.util.path import first_existing
22157 from portage.util._path import exists_raise_eaccess, isdir_raise_eaccess
22158 @@@ -70,2889 -111,3307 +111,3315 @@@ from portage.package.ebuild._config.unp
22159
22160 _feature_flags_cache = {}
22161
22162 +
22163 def _get_feature_flags(eapi_attrs):
22164 - cache_key = (eapi_attrs.feature_flag_test,)
22165 - flags = _feature_flags_cache.get(cache_key)
22166 - if flags is not None:
22167 - return flags
22168 + cache_key = (eapi_attrs.feature_flag_test,)
22169 + flags = _feature_flags_cache.get(cache_key)
22170 + if flags is not None:
22171 + return flags
22172 +
22173 + flags = []
22174 + if eapi_attrs.feature_flag_test:
22175 + flags.append("test")
22176
22177 - flags = []
22178 - if eapi_attrs.feature_flag_test:
22179 - flags.append("test")
22180 + flags = frozenset(flags)
22181 + _feature_flags_cache[cache_key] = flags
22182 + return flags
22183
22184 - flags = frozenset(flags)
22185 - _feature_flags_cache[cache_key] = flags
22186 - return flags
22187
22188 def autouse(myvartree, use_cache=1, mysettings=None):
22189 - warnings.warn("portage.autouse() is deprecated",
22190 - DeprecationWarning, stacklevel=2)
22191 - return ""
22192 + warnings.warn("portage.autouse() is deprecated", DeprecationWarning, stacklevel=2)
22193 + return ""
22194 +
22195
22196 def check_config_instance(test):
22197 - if not isinstance(test, config):
22198 - raise TypeError("Invalid type for config object: %s (should be %s)" % (test.__class__, config))
22199 + if not isinstance(test, config):
22200 + raise TypeError(
22201 + "Invalid type for config object: %s (should be %s)"
22202 + % (test.__class__, config)
22203 + )
22204 +
22205
22206 def best_from_dict(key, top_dict, key_order, EmptyOnError=1, FullCopy=1, AllowEmpty=1):
22207 - for x in key_order:
22208 - if x in top_dict and key in top_dict[x]:
22209 - if FullCopy:
22210 - return copy.deepcopy(top_dict[x][key])
22211 - return top_dict[x][key]
22212 - if EmptyOnError:
22213 - return ""
22214 - raise KeyError("Key not found in list; '%s'" % key)
22215 + for x in key_order:
22216 + if x in top_dict and key in top_dict[x]:
22217 + if FullCopy:
22218 + return copy.deepcopy(top_dict[x][key])
22219 + return top_dict[x][key]
22220 + if EmptyOnError:
22221 + return ""
22222 + raise KeyError("Key not found in list; '%s'" % key)
22223 +
22224
22225 def _lazy_iuse_regex(iuse_implicit):
22226 - """
22227 - The PORTAGE_IUSE value is lazily evaluated since re.escape() is slow
22228 - and the value is only used when an ebuild phase needs to be executed
22229 - (it's used only to generate QA notices).
22230 - """
22231 - # Escape anything except ".*" which is supposed to pass through from
22232 - # _get_implicit_iuse().
22233 - regex = sorted(re.escape(x) for x in iuse_implicit)
22234 - regex = "^(%s)$" % "|".join(regex)
22235 - regex = regex.replace("\\.\\*", ".*")
22236 - return regex
22237 + """
22238 + The PORTAGE_IUSE value is lazily evaluated since re.escape() is slow
22239 + and the value is only used when an ebuild phase needs to be executed
22240 + (it's used only to generate QA notices).
22241 + """
22242 + # Escape anything except ".*" which is supposed to pass through from
22243 + # _get_implicit_iuse().
22244 + regex = sorted(re.escape(x) for x in iuse_implicit)
22245 + regex = "^(%s)$" % "|".join(regex)
22246 + regex = regex.replace("\\.\\*", ".*")
22247 + return regex
22248 +
22249
22250 class _iuse_implicit_match_cache:
22251 + def __init__(self, settings):
22252 + self._iuse_implicit_re = re.compile(
22253 + "^(%s)$" % "|".join(settings._get_implicit_iuse())
22254 + )
22255 + self._cache = {}
22256 +
22257 + def __call__(self, flag):
22258 + """
22259 + Returns True if the flag is matched, False otherwise.
22260 + """
22261 + try:
22262 + return self._cache[flag]
22263 + except KeyError:
22264 + m = self._iuse_implicit_re.match(flag) is not None
22265 + self._cache[flag] = m
22266 + return m
22267
22268 - def __init__(self, settings):
22269 - self._iuse_implicit_re = re.compile("^(%s)$" % \
22270 - "|".join(settings._get_implicit_iuse()))
22271 - self._cache = {}
22272 -
22273 - def __call__(self, flag):
22274 - """
22275 - Returns True if the flag is matched, False otherwise.
22276 - """
22277 - try:
22278 - return self._cache[flag]
22279 - except KeyError:
22280 - m = self._iuse_implicit_re.match(flag) is not None
22281 - self._cache[flag] = m
22282 - return m
22283
22284 class config:
22285 - """
22286 - This class encompasses the main portage configuration. Data is pulled from
22287 - ROOT/PORTDIR/profiles/, from ROOT/etc/make.profile incrementally through all
22288 - parent profiles as well as from ROOT/PORTAGE_CONFIGROOT/* for user specified
22289 - overrides.
22290 -
22291 - Generally if you need data like USE flags, FEATURES, environment variables,
22292 - virtuals ...etc you look in here.
22293 - """
22294 -
22295 - _constant_keys = frozenset(['PORTAGE_BIN_PATH', 'PORTAGE_GID',
22296 - 'PORTAGE_PYM_PATH', 'PORTAGE_PYTHONPATH'])
22297 -
22298 - _deprecated_keys = {'PORTAGE_LOGDIR': 'PORT_LOGDIR',
22299 - 'PORTAGE_LOGDIR_CLEAN': 'PORT_LOGDIR_CLEAN',
22300 - 'SIGNED_OFF_BY': 'DCO_SIGNED_OFF_BY'}
22301 -
22302 - _setcpv_aux_keys = ('BDEPEND', 'DEFINED_PHASES', 'DEPEND', 'EAPI', 'IDEPEND',
22303 - 'INHERITED', 'IUSE', 'REQUIRED_USE', 'KEYWORDS', 'LICENSE', 'PDEPEND',
22304 - 'PROPERTIES', 'RDEPEND', 'SLOT',
22305 - 'repository', 'RESTRICT', 'LICENSE',)
22306 -
22307 - _module_aliases = {
22308 - "cache.metadata_overlay.database" : "portage.cache.flat_hash.mtime_md5_database",
22309 - "portage.cache.metadata_overlay.database" : "portage.cache.flat_hash.mtime_md5_database",
22310 - }
22311 -
22312 - _case_insensitive_vars = special_env_vars.case_insensitive_vars
22313 - _default_globals = special_env_vars.default_globals
22314 - _env_blacklist = special_env_vars.env_blacklist
22315 - _environ_filter = special_env_vars.environ_filter
22316 - _environ_whitelist = special_env_vars.environ_whitelist
22317 - _environ_whitelist_re = special_env_vars.environ_whitelist_re
22318 - _global_only_vars = special_env_vars.global_only_vars
22319 -
22320 - def __init__(self, clone=None, mycpv=None, config_profile_path=None,
22321 - config_incrementals=None, config_root=None, target_root=None,
22322 - sysroot=None, eprefix=None, local_config=True, env=None,
22323 - _unmatched_removal=False, repositories=None):
22324 - """
22325 - @param clone: If provided, init will use deepcopy to copy by value the instance.
22326 - @type clone: Instance of config class.
22327 - @param mycpv: CPV to load up (see setcpv), this is the same as calling init with mycpv=None
22328 - and then calling instance.setcpv(mycpv).
22329 - @type mycpv: String
22330 - @param config_profile_path: Configurable path to the profile (usually PROFILE_PATH from portage.const)
22331 - @type config_profile_path: String
22332 - @param config_incrementals: List of incremental variables
22333 - (defaults to portage.const.INCREMENTALS)
22334 - @type config_incrementals: List
22335 - @param config_root: path to read local config from (defaults to "/", see PORTAGE_CONFIGROOT)
22336 - @type config_root: String
22337 - @param target_root: the target root, which typically corresponds to the
22338 - value of the $ROOT env variable (default is /)
22339 - @type target_root: String
22340 - @param sysroot: the sysroot to build against, which typically corresponds
22341 - to the value of the $SYSROOT env variable (default is /)
22342 - @type sysroot: String
22343 - @param eprefix: set the EPREFIX variable (default is portage.const.EPREFIX)
22344 - @type eprefix: String
22345 - @param local_config: Enables loading of local config (/etc/portage); used most by repoman to
22346 - ignore local config (keywording and unmasking)
22347 - @type local_config: Boolean
22348 - @param env: The calling environment which is used to override settings.
22349 - Defaults to os.environ if unspecified.
22350 - @type env: dict
22351 - @param _unmatched_removal: Enabled by repoman when the
22352 - --unmatched-removal option is given.
22353 - @type _unmatched_removal: Boolean
22354 - @param repositories: Configuration of repositories.
22355 - Defaults to portage.repository.config.load_repository_config().
22356 - @type repositories: Instance of portage.repository.config.RepoConfigLoader class.
22357 - """
22358 -
22359 - # This is important when config is reloaded after emerge --sync.
22360 - _eapi_cache.clear()
22361 -
22362 - # When initializing the global portage.settings instance, avoid
22363 - # raising exceptions whenever possible since exceptions thrown
22364 - # from 'import portage' or 'import portage.exceptions' statements
22365 - # can practically render the api unusable for api consumers.
22366 - tolerant = hasattr(portage, '_initializing_globals')
22367 - self._tolerant = tolerant
22368 - self._unmatched_removal = _unmatched_removal
22369 -
22370 - self.locked = 0
22371 - self.mycpv = None
22372 - self._setcpv_args_hash = None
22373 - self.puse = ""
22374 - self._penv = []
22375 - self.modifiedkeys = []
22376 - self.uvlist = []
22377 - self._accept_chost_re = None
22378 - self._accept_properties = None
22379 - self._accept_restrict = None
22380 - self._features_overrides = []
22381 - self._make_defaults = None
22382 - self._parent_stable = None
22383 - self._soname_provided = None
22384 -
22385 - # _unknown_features records unknown features that
22386 - # have triggered warning messages, and ensures that
22387 - # the same warning isn't shown twice.
22388 - self._unknown_features = set()
22389 -
22390 - self.local_config = local_config
22391 -
22392 - if clone:
22393 - # For immutable attributes, use shallow copy for
22394 - # speed and memory conservation.
22395 - self._tolerant = clone._tolerant
22396 - self._unmatched_removal = clone._unmatched_removal
22397 - self.categories = clone.categories
22398 - self.depcachedir = clone.depcachedir
22399 - self.incrementals = clone.incrementals
22400 - self.module_priority = clone.module_priority
22401 - self.profile_path = clone.profile_path
22402 - self.profiles = clone.profiles
22403 - self.packages = clone.packages
22404 - self.repositories = clone.repositories
22405 - self.unpack_dependencies = clone.unpack_dependencies
22406 - self._default_features_use = clone._default_features_use
22407 - self._iuse_effective = clone._iuse_effective
22408 - self._iuse_implicit_match = clone._iuse_implicit_match
22409 - self._non_user_variables = clone._non_user_variables
22410 - self._env_d_blacklist = clone._env_d_blacklist
22411 - self._pbashrc = clone._pbashrc
22412 - self._repo_make_defaults = clone._repo_make_defaults
22413 - self.usemask = clone.usemask
22414 - self.useforce = clone.useforce
22415 - self.puse = clone.puse
22416 - self.user_profile_dir = clone.user_profile_dir
22417 - self.local_config = clone.local_config
22418 - self.make_defaults_use = clone.make_defaults_use
22419 - self.mycpv = clone.mycpv
22420 - self._setcpv_args_hash = clone._setcpv_args_hash
22421 - self._soname_provided = clone._soname_provided
22422 - self._profile_bashrc = clone._profile_bashrc
22423 -
22424 - # immutable attributes (internal policy ensures lack of mutation)
22425 - self._locations_manager = clone._locations_manager
22426 - self._use_manager = clone._use_manager
22427 - # force instantiation of lazy immutable objects when cloning, so
22428 - # that they're not instantiated more than once
22429 - self._keywords_manager_obj = clone._keywords_manager
22430 - self._mask_manager_obj = clone._mask_manager
22431 -
22432 - # shared mutable attributes
22433 - self._unknown_features = clone._unknown_features
22434 -
22435 - self.modules = copy.deepcopy(clone.modules)
22436 - self._penv = copy.deepcopy(clone._penv)
22437 -
22438 - self.configdict = copy.deepcopy(clone.configdict)
22439 - self.configlist = [
22440 - self.configdict['env.d'],
22441 - self.configdict['repo'],
22442 - self.configdict['features'],
22443 - self.configdict['pkginternal'],
22444 - self.configdict['globals'],
22445 - self.configdict['defaults'],
22446 - self.configdict['conf'],
22447 - self.configdict['pkg'],
22448 - self.configdict['env'],
22449 - ]
22450 - self.lookuplist = self.configlist[:]
22451 - self.lookuplist.reverse()
22452 - self._use_expand_dict = copy.deepcopy(clone._use_expand_dict)
22453 - self.backupenv = self.configdict["backupenv"]
22454 - self.prevmaskdict = copy.deepcopy(clone.prevmaskdict)
22455 - self.pprovideddict = copy.deepcopy(clone.pprovideddict)
22456 - self.features = features_set(self)
22457 - self.features._features = copy.deepcopy(clone.features._features)
22458 - self._features_overrides = copy.deepcopy(clone._features_overrides)
22459 -
22460 - #Strictly speaking _license_manager is not immutable. Users need to ensure that
22461 - #extract_global_changes() is called right after __init__ (if at all).
22462 - #It also has the mutable member _undef_lic_groups. It is used to track
22463 - #undefined license groups, to not display an error message for the same
22464 - #group again and again. Because of this, it's useful to share it between
22465 - #all LicenseManager instances.
22466 - self._license_manager = clone._license_manager
22467 -
22468 - # force instantiation of lazy objects when cloning, so
22469 - # that they're not instantiated more than once
22470 - self._virtuals_manager_obj = copy.deepcopy(clone._virtuals_manager)
22471 -
22472 - self._accept_properties = copy.deepcopy(clone._accept_properties)
22473 - self._ppropertiesdict = copy.deepcopy(clone._ppropertiesdict)
22474 - self._accept_restrict = copy.deepcopy(clone._accept_restrict)
22475 - self._paccept_restrict = copy.deepcopy(clone._paccept_restrict)
22476 - self._penvdict = copy.deepcopy(clone._penvdict)
22477 - self._pbashrcdict = copy.deepcopy(clone._pbashrcdict)
22478 - self._expand_map = copy.deepcopy(clone._expand_map)
22479 -
22480 - else:
22481 - # lazily instantiated objects
22482 - self._keywords_manager_obj = None
22483 - self._mask_manager_obj = None
22484 - self._virtuals_manager_obj = None
22485 -
22486 - locations_manager = LocationsManager(config_root=config_root,
22487 - config_profile_path=config_profile_path, eprefix=eprefix,
22488 - local_config=local_config, target_root=target_root,
22489 - sysroot=sysroot)
22490 - self._locations_manager = locations_manager
22491 -
22492 - eprefix = locations_manager.eprefix
22493 - config_root = locations_manager.config_root
22494 - sysroot = locations_manager.sysroot
22495 - esysroot = locations_manager.esysroot
22496 - broot = locations_manager.broot
22497 - abs_user_config = locations_manager.abs_user_config
22498 - make_conf_paths = [
22499 - os.path.join(config_root, 'etc', 'make.conf'),
22500 - os.path.join(config_root, MAKE_CONF_FILE)
22501 - ]
22502 - try:
22503 - if os.path.samefile(*make_conf_paths):
22504 - make_conf_paths.pop()
22505 - except OSError:
22506 - pass
22507 -
22508 - make_conf_count = 0
22509 - make_conf = {}
22510 - for x in make_conf_paths:
22511 - mygcfg = getconfig(x,
22512 - tolerant=tolerant, allow_sourcing=True,
22513 - expand=make_conf, recursive=True)
22514 - if mygcfg is not None:
22515 - make_conf.update(mygcfg)
22516 - make_conf_count += 1
22517 -
22518 - if make_conf_count == 2:
22519 - writemsg("!!! %s\n" %
22520 - _("Found 2 make.conf files, using both '%s' and '%s'") %
22521 - tuple(make_conf_paths), noiselevel=-1)
22522 -
22523 - # __* variables set in make.conf are local and are not be propagated.
22524 - make_conf = {k: v for k, v in make_conf.items() if not k.startswith("__")}
22525 -
22526 - # Allow ROOT setting to come from make.conf if it's not overridden
22527 - # by the constructor argument (from the calling environment).
22528 - locations_manager.set_root_override(make_conf.get("ROOT"))
22529 - target_root = locations_manager.target_root
22530 - eroot = locations_manager.eroot
22531 - self.global_config_path = locations_manager.global_config_path
22532 -
22533 - # The expand_map is used for variable substitution
22534 - # in getconfig() calls, and the getconfig() calls
22535 - # update expand_map with the value of each variable
22536 - # assignment that occurs. Variable substitution occurs
22537 - # in the following order, which corresponds to the
22538 - # order of appearance in self.lookuplist:
22539 - #
22540 - # * env.d
22541 - # * make.globals
22542 - # * make.defaults
22543 - # * make.conf
22544 - #
22545 - # Notably absent is "env", since we want to avoid any
22546 - # interaction with the calling environment that might
22547 - # lead to unexpected results.
22548 -
22549 - env_d = getconfig(os.path.join(eroot, "etc", "profile.env"),
22550 - tolerant=tolerant, expand=False) or {}
22551 - expand_map = env_d.copy()
22552 - self._expand_map = expand_map
22553 -
22554 - # Allow make.globals and make.conf to set paths relative to vars like ${EPREFIX}.
22555 - expand_map["BROOT"] = broot
22556 - expand_map["EPREFIX"] = eprefix
22557 - expand_map["EROOT"] = eroot
22558 - expand_map["ESYSROOT"] = esysroot
22559 - expand_map["PORTAGE_CONFIGROOT"] = config_root
22560 - expand_map["ROOT"] = target_root
22561 - expand_map["SYSROOT"] = sysroot
22562 -
22563 - if portage._not_installed:
22564 - make_globals_path = os.path.join(PORTAGE_BASE_PATH, "cnf", "make.globals")
22565 - else:
22566 - make_globals_path = os.path.join(self.global_config_path, "make.globals")
22567 - old_make_globals = os.path.join(config_root, "etc", "make.globals")
22568 - if os.path.isfile(old_make_globals) and \
22569 - not os.path.samefile(make_globals_path, old_make_globals):
22570 - # Don't warn if they refer to the same path, since
22571 - # that can be used for backward compatibility with
22572 - # old software.
22573 - writemsg("!!! %s\n" %
22574 - _("Found obsolete make.globals file: "
22575 - "'%s', (using '%s' instead)") %
22576 - (old_make_globals, make_globals_path),
22577 - noiselevel=-1)
22578 -
22579 - make_globals = getconfig(make_globals_path,
22580 - tolerant=tolerant, expand=expand_map)
22581 - if make_globals is None:
22582 - make_globals = {}
22583 -
22584 - for k, v in self._default_globals.items():
22585 - make_globals.setdefault(k, v)
22586 -
22587 - if config_incrementals is None:
22588 - self.incrementals = INCREMENTALS
22589 - else:
22590 - self.incrementals = config_incrementals
22591 - if not isinstance(self.incrementals, frozenset):
22592 - self.incrementals = frozenset(self.incrementals)
22593 -
22594 - self.module_priority = ("user", "default")
22595 - self.modules = {}
22596 - modules_file = os.path.join(config_root, MODULES_FILE_PATH)
22597 - modules_loader = KeyValuePairFileLoader(modules_file, None, None)
22598 - modules_dict, modules_errors = modules_loader.load()
22599 - self.modules["user"] = modules_dict
22600 - if self.modules["user"] is None:
22601 - self.modules["user"] = {}
22602 - user_auxdbmodule = \
22603 - self.modules["user"].get("portdbapi.auxdbmodule")
22604 - if user_auxdbmodule is not None and \
22605 - user_auxdbmodule in self._module_aliases:
22606 - warnings.warn("'%s' is deprecated: %s" %
22607 - (user_auxdbmodule, modules_file))
22608 -
22609 - self.modules["default"] = {
22610 - "portdbapi.auxdbmodule": "portage.cache.flat_hash.mtime_md5_database",
22611 - }
22612 -
22613 - self.configlist=[]
22614 -
22615 - # back up our incremental variables:
22616 - self.configdict={}
22617 - self._use_expand_dict = {}
22618 - # configlist will contain: [ env.d, globals, features, defaults, conf, pkg, backupenv, env ]
22619 - self.configlist.append({})
22620 - self.configdict["env.d"] = self.configlist[-1]
22621 -
22622 - self.configlist.append({})
22623 - self.configdict["repo"] = self.configlist[-1]
22624 -
22625 - self.configlist.append({})
22626 - self.configdict["features"] = self.configlist[-1]
22627 -
22628 - self.configlist.append({})
22629 - self.configdict["pkginternal"] = self.configlist[-1]
22630 -
22631 - # env_d will be None if profile.env doesn't exist.
22632 - if env_d:
22633 - self.configdict["env.d"].update(env_d)
22634 -
22635 - # backupenv is used for calculating incremental variables.
22636 - if env is None:
22637 - env = os.environ
22638 -
22639 - # Avoid potential UnicodeDecodeError exceptions later.
22640 - env_unicode = dict((_unicode_decode(k), _unicode_decode(v))
22641 - for k, v in env.items())
22642 -
22643 - self.backupenv = env_unicode
22644 -
22645 - if env_d:
22646 - # Remove duplicate values so they don't override updated
22647 - # profile.env values later (profile.env is reloaded in each
22648 - # call to self.regenerate).
22649 - for k, v in env_d.items():
22650 - try:
22651 - if self.backupenv[k] == v:
22652 - del self.backupenv[k]
22653 - except KeyError:
22654 - pass
22655 - del k, v
22656 -
22657 - self.configdict["env"] = LazyItemsDict(self.backupenv)
22658 -
22659 - self.configlist.append(make_globals)
22660 - self.configdict["globals"]=self.configlist[-1]
22661 -
22662 - self.make_defaults_use = []
22663 -
22664 - #Loading Repositories
22665 - self["PORTAGE_CONFIGROOT"] = config_root
22666 - self["ROOT"] = target_root
22667 - self["SYSROOT"] = sysroot
22668 - self["EPREFIX"] = eprefix
22669 - self["EROOT"] = eroot
22670 - self["ESYSROOT"] = esysroot
22671 - self["BROOT"] = broot
22672 - known_repos = []
22673 - portdir = ""
22674 - portdir_overlay = ""
22675 - portdir_sync = None
22676 - for confs in [make_globals, make_conf, self.configdict["env"]]:
22677 - v = confs.get("PORTDIR")
22678 - if v is not None:
22679 - portdir = v
22680 - known_repos.append(v)
22681 - v = confs.get("PORTDIR_OVERLAY")
22682 - if v is not None:
22683 - portdir_overlay = v
22684 - known_repos.extend(shlex_split(v))
22685 - v = confs.get("SYNC")
22686 - if v is not None:
22687 - portdir_sync = v
22688 - if 'PORTAGE_RSYNC_EXTRA_OPTS' in confs:
22689 - self['PORTAGE_RSYNC_EXTRA_OPTS'] = confs['PORTAGE_RSYNC_EXTRA_OPTS']
22690 -
22691 - self["PORTDIR"] = portdir
22692 - self["PORTDIR_OVERLAY"] = portdir_overlay
22693 - if portdir_sync:
22694 - self["SYNC"] = portdir_sync
22695 - self.lookuplist = [self.configdict["env"]]
22696 - if repositories is None:
22697 - self.repositories = load_repository_config(self)
22698 - else:
22699 - self.repositories = repositories
22700 -
22701 - known_repos.extend(repo.location for repo in self.repositories)
22702 - known_repos = frozenset(known_repos)
22703 -
22704 - self['PORTAGE_REPOSITORIES'] = self.repositories.config_string()
22705 - self.backup_changes('PORTAGE_REPOSITORIES')
22706 -
22707 - #filling PORTDIR and PORTDIR_OVERLAY variable for compatibility
22708 - main_repo = self.repositories.mainRepo()
22709 - if main_repo is not None:
22710 - self["PORTDIR"] = main_repo.location
22711 - self.backup_changes("PORTDIR")
22712 - expand_map["PORTDIR"] = self["PORTDIR"]
22713 -
22714 - # repoman controls PORTDIR_OVERLAY via the environment, so no
22715 - # special cases are needed here.
22716 - portdir_overlay = list(self.repositories.repoLocationList())
22717 - if portdir_overlay and portdir_overlay[0] == self["PORTDIR"]:
22718 - portdir_overlay = portdir_overlay[1:]
22719 -
22720 - new_ov = []
22721 - if portdir_overlay:
22722 - for ov in portdir_overlay:
22723 - ov = normalize_path(ov)
22724 - if isdir_raise_eaccess(ov) or portage._sync_mode:
22725 - new_ov.append(portage._shell_quote(ov))
22726 - else:
22727 - writemsg(_("!!! Invalid PORTDIR_OVERLAY"
22728 - " (not a dir): '%s'\n") % ov, noiselevel=-1)
22729 -
22730 - self["PORTDIR_OVERLAY"] = " ".join(new_ov)
22731 - self.backup_changes("PORTDIR_OVERLAY")
22732 - expand_map["PORTDIR_OVERLAY"] = self["PORTDIR_OVERLAY"]
22733 -
22734 - locations_manager.set_port_dirs(self["PORTDIR"], self["PORTDIR_OVERLAY"])
22735 - locations_manager.load_profiles(self.repositories, known_repos)
22736 -
22737 - profiles_complex = locations_manager.profiles_complex
22738 - self.profiles = locations_manager.profiles
22739 - self.profile_path = locations_manager.profile_path
22740 - self.user_profile_dir = locations_manager.user_profile_dir
22741 -
22742 - try:
22743 - packages_list = [grabfile_package(
22744 - os.path.join(x.location, "packages"),
22745 - verify_eapi=True, eapi=x.eapi, eapi_default=None,
22746 - allow_repo=allow_profile_repo_deps(x),
22747 - allow_build_id=x.allow_build_id)
22748 - for x in profiles_complex]
22749 - except EnvironmentError as e:
22750 - _raise_exc(e)
22751 -
22752 - self.packages = tuple(stack_lists(packages_list, incremental=1))
22753 -
22754 - # revmaskdict
22755 - self.prevmaskdict={}
22756 - for x in self.packages:
22757 - # Negative atoms are filtered by the above stack_lists() call.
22758 - if not isinstance(x, Atom):
22759 - x = Atom(x.lstrip('*'))
22760 - self.prevmaskdict.setdefault(x.cp, []).append(x)
22761 -
22762 - self.unpack_dependencies = load_unpack_dependencies_configuration(self.repositories)
22763 -
22764 - mygcfg = {}
22765 - if profiles_complex:
22766 - mygcfg_dlists = []
22767 - for x in profiles_complex:
22768 - # Prevent accidents triggered by USE="${USE} ..." settings
22769 - # at the top of make.defaults which caused parent profile
22770 - # USE to override parent profile package.use settings.
22771 - # It would be nice to guard USE_EXPAND variables like
22772 - # this too, but unfortunately USE_EXPAND is not known
22773 - # until after make.defaults has been evaluated, so that
22774 - # will require some form of make.defaults preprocessing.
22775 - expand_map.pop("USE", None)
22776 - mygcfg_dlists.append(
22777 - getconfig(os.path.join(x.location, "make.defaults"),
22778 - tolerant=tolerant, expand=expand_map,
22779 - recursive=x.portage1_directories))
22780 - self._make_defaults = mygcfg_dlists
22781 - mygcfg = stack_dicts(mygcfg_dlists,
22782 - incrementals=self.incrementals)
22783 - if mygcfg is None:
22784 - mygcfg = {}
22785 - self.configlist.append(mygcfg)
22786 - self.configdict["defaults"]=self.configlist[-1]
22787 -
22788 - mygcfg = {}
22789 - for x in make_conf_paths:
22790 - mygcfg.update(getconfig(x,
22791 - tolerant=tolerant, allow_sourcing=True,
22792 - expand=expand_map, recursive=True) or {})
22793 -
22794 - # __* variables set in make.conf are local and are not be propagated.
22795 - mygcfg = {k: v for k, v in mygcfg.items() if not k.startswith("__")}
22796 -
22797 - # Don't allow the user to override certain variables in make.conf
22798 - profile_only_variables = self.configdict["defaults"].get(
22799 - "PROFILE_ONLY_VARIABLES", "").split()
22800 - profile_only_variables = stack_lists([profile_only_variables])
22801 - non_user_variables = set()
22802 - non_user_variables.update(profile_only_variables)
22803 - non_user_variables.update(self._env_blacklist)
22804 - non_user_variables.update(self._global_only_vars)
22805 - non_user_variables = frozenset(non_user_variables)
22806 - self._non_user_variables = non_user_variables
22807 -
22808 - self._env_d_blacklist = frozenset(chain(
22809 - profile_only_variables,
22810 - self._env_blacklist,
22811 - ))
22812 - env_d = self.configdict["env.d"]
22813 - for k in self._env_d_blacklist:
22814 - env_d.pop(k, None)
22815 -
22816 - for k in profile_only_variables:
22817 - mygcfg.pop(k, None)
22818 -
22819 - self.configlist.append(mygcfg)
22820 - self.configdict["conf"]=self.configlist[-1]
22821 -
22822 - self.configlist.append(LazyItemsDict())
22823 - self.configdict["pkg"]=self.configlist[-1]
22824 -
22825 - self.configdict["backupenv"] = self.backupenv
22826 -
22827 - # Don't allow the user to override certain variables in the env
22828 - for k in profile_only_variables:
22829 - self.backupenv.pop(k, None)
22830 -
22831 - self.configlist.append(self.configdict["env"])
22832 -
22833 - # make lookuplist for loading package.*
22834 - self.lookuplist=self.configlist[:]
22835 - self.lookuplist.reverse()
22836 -
22837 - # Blacklist vars that could interfere with portage internals.
22838 - for blacklisted in self._env_blacklist:
22839 - for cfg in self.lookuplist:
22840 - cfg.pop(blacklisted, None)
22841 - self.backupenv.pop(blacklisted, None)
22842 - del blacklisted, cfg
22843 -
22844 - self["PORTAGE_CONFIGROOT"] = config_root
22845 - self.backup_changes("PORTAGE_CONFIGROOT")
22846 - self["ROOT"] = target_root
22847 - self.backup_changes("ROOT")
22848 - self["SYSROOT"] = sysroot
22849 - self.backup_changes("SYSROOT")
22850 - self["EPREFIX"] = eprefix
22851 - self.backup_changes("EPREFIX")
22852 - self["EROOT"] = eroot
22853 - self.backup_changes("EROOT")
22854 - self["ESYSROOT"] = esysroot
22855 - self.backup_changes("ESYSROOT")
22856 - self["BROOT"] = broot
22857 - self.backup_changes("BROOT")
22858 -
22859 - # The prefix of the running portage instance is used in the
22860 - # ebuild environment to implement the --host-root option for
22861 - # best_version and has_version.
22862 - self["PORTAGE_OVERRIDE_EPREFIX"] = portage.const.EPREFIX
22863 - self.backup_changes("PORTAGE_OVERRIDE_EPREFIX")
22864 -
22865 - self._ppropertiesdict = portage.dep.ExtendedAtomDict(dict)
22866 - self._paccept_restrict = portage.dep.ExtendedAtomDict(dict)
22867 - self._penvdict = portage.dep.ExtendedAtomDict(dict)
22868 - self._pbashrcdict = {}
22869 - self._pbashrc = ()
22870 -
22871 - self._repo_make_defaults = {}
22872 - for repo in self.repositories.repos_with_profiles():
22873 - d = getconfig(os.path.join(repo.location, "profiles", "make.defaults"),
22874 - tolerant=tolerant, expand=self.configdict["globals"].copy(), recursive=repo.portage1_profiles) or {}
22875 - if d:
22876 - for k in chain(self._env_blacklist,
22877 - profile_only_variables, self._global_only_vars):
22878 - d.pop(k, None)
22879 - self._repo_make_defaults[repo.name] = d
22880 -
22881 - #Read all USE related files from profiles and optionally from user config.
22882 - self._use_manager = UseManager(self.repositories, profiles_complex,
22883 - abs_user_config, self._isStable, user_config=local_config)
22884 - #Initialize all USE related variables we track ourselves.
22885 - self.usemask = self._use_manager.getUseMask()
22886 - self.useforce = self._use_manager.getUseForce()
22887 - self.configdict["conf"]["USE"] = \
22888 - self._use_manager.extract_global_USE_changes( \
22889 - self.configdict["conf"].get("USE", ""))
22890 -
22891 - #Read license_groups and optionally license_groups and package.license from user config
22892 - self._license_manager = LicenseManager(locations_manager.profile_locations, \
22893 - abs_user_config, user_config=local_config)
22894 - #Extract '*/*' entries from package.license
22895 - self.configdict["conf"]["ACCEPT_LICENSE"] = \
22896 - self._license_manager.extract_global_changes( \
22897 - self.configdict["conf"].get("ACCEPT_LICENSE", ""))
22898 -
22899 - # profile.bashrc
22900 - self._profile_bashrc = tuple(os.path.isfile(os.path.join(profile.location, 'profile.bashrc'))
22901 - for profile in profiles_complex)
22902 -
22903 - if local_config:
22904 - #package.properties
22905 - propdict = grabdict_package(os.path.join(
22906 - abs_user_config, "package.properties"), recursive=1, allow_wildcard=True, \
22907 - allow_repo=True, verify_eapi=False,
22908 - allow_build_id=True)
22909 - v = propdict.pop("*/*", None)
22910 - if v is not None:
22911 - if "ACCEPT_PROPERTIES" in self.configdict["conf"]:
22912 - self.configdict["conf"]["ACCEPT_PROPERTIES"] += " " + " ".join(v)
22913 - else:
22914 - self.configdict["conf"]["ACCEPT_PROPERTIES"] = " ".join(v)
22915 - for k, v in propdict.items():
22916 - self._ppropertiesdict.setdefault(k.cp, {})[k] = v
22917 -
22918 - # package.accept_restrict
22919 - d = grabdict_package(os.path.join(
22920 - abs_user_config, "package.accept_restrict"),
22921 - recursive=True, allow_wildcard=True,
22922 - allow_repo=True, verify_eapi=False,
22923 - allow_build_id=True)
22924 - v = d.pop("*/*", None)
22925 - if v is not None:
22926 - if "ACCEPT_RESTRICT" in self.configdict["conf"]:
22927 - self.configdict["conf"]["ACCEPT_RESTRICT"] += " " + " ".join(v)
22928 - else:
22929 - self.configdict["conf"]["ACCEPT_RESTRICT"] = " ".join(v)
22930 - for k, v in d.items():
22931 - self._paccept_restrict.setdefault(k.cp, {})[k] = v
22932 -
22933 - #package.env
22934 - penvdict = grabdict_package(os.path.join(
22935 - abs_user_config, "package.env"), recursive=1, allow_wildcard=True, \
22936 - allow_repo=True, verify_eapi=False,
22937 - allow_build_id=True)
22938 - v = penvdict.pop("*/*", None)
22939 - if v is not None:
22940 - global_wildcard_conf = {}
22941 - self._grab_pkg_env(v, global_wildcard_conf)
22942 - incrementals = self.incrementals
22943 - conf_configdict = self.configdict["conf"]
22944 - for k, v in global_wildcard_conf.items():
22945 - if k in incrementals:
22946 - if k in conf_configdict:
22947 - conf_configdict[k] = \
22948 - conf_configdict[k] + " " + v
22949 - else:
22950 - conf_configdict[k] = v
22951 - else:
22952 - conf_configdict[k] = v
22953 - expand_map[k] = v
22954 -
22955 - for k, v in penvdict.items():
22956 - self._penvdict.setdefault(k.cp, {})[k] = v
22957 -
22958 - # package.bashrc
22959 - for profile in profiles_complex:
22960 - if not 'profile-bashrcs' in profile.profile_formats:
22961 - continue
22962 - self._pbashrcdict[profile] = \
22963 - portage.dep.ExtendedAtomDict(dict)
22964 - bashrc = grabdict_package(os.path.join(profile.location,
22965 - "package.bashrc"), recursive=1, allow_wildcard=True,
22966 - allow_repo=allow_profile_repo_deps(profile),
22967 - verify_eapi=True,
22968 - eapi=profile.eapi, eapi_default=None,
22969 - allow_build_id=profile.allow_build_id)
22970 - if not bashrc:
22971 - continue
22972 -
22973 - for k, v in bashrc.items():
22974 - envfiles = [os.path.join(profile.location,
22975 - "bashrc",
22976 - envname) for envname in v]
22977 - self._pbashrcdict[profile].setdefault(k.cp, {})\
22978 - .setdefault(k, []).extend(envfiles)
22979 -
22980 - #getting categories from an external file now
22981 - self.categories = [grabfile(os.path.join(x, "categories")) \
22982 - for x in locations_manager.profile_and_user_locations]
22983 - category_re = dbapi._category_re
22984 - # categories used to be a tuple, but now we use a frozenset
22985 - # for hashed category validation in pordbapi.cp_list()
22986 - self.categories = frozenset(
22987 - x for x in stack_lists(self.categories, incremental=1)
22988 - if category_re.match(x) is not None)
22989 -
22990 - archlist = [grabfile(os.path.join(x, "arch.list")) \
22991 - for x in locations_manager.profile_and_user_locations]
22992 - archlist = sorted(stack_lists(archlist, incremental=1))
22993 - self.configdict["conf"]["PORTAGE_ARCHLIST"] = " ".join(archlist)
22994 -
22995 - pkgprovidedlines = []
22996 - for x in profiles_complex:
22997 - provpath = os.path.join(x.location, "package.provided")
22998 - if os.path.exists(provpath):
22999 - if _get_eapi_attrs(x.eapi).allows_package_provided:
23000 - pkgprovidedlines.append(grabfile(provpath,
23001 - recursive=x.portage1_directories))
23002 - else:
23003 - # TODO: bail out?
23004 - writemsg((_("!!! package.provided not allowed in EAPI %s: ")
23005 - %x.eapi)+x.location+"\n",
23006 - noiselevel=-1)
23007 -
23008 - pkgprovidedlines = stack_lists(pkgprovidedlines, incremental=1)
23009 - has_invalid_data = False
23010 - for x in range(len(pkgprovidedlines)-1, -1, -1):
23011 - myline = pkgprovidedlines[x]
23012 - if not isvalidatom("=" + myline):
23013 - writemsg(_("Invalid package name in package.provided: %s\n") % \
23014 - myline, noiselevel=-1)
23015 - has_invalid_data = True
23016 - del pkgprovidedlines[x]
23017 - continue
23018 - cpvr = catpkgsplit(pkgprovidedlines[x])
23019 - if not cpvr or cpvr[0] == "null":
23020 - writemsg(_("Invalid package name in package.provided: ")+pkgprovidedlines[x]+"\n",
23021 - noiselevel=-1)
23022 - has_invalid_data = True
23023 - del pkgprovidedlines[x]
23024 - continue
23025 - if has_invalid_data:
23026 - writemsg(_("See portage(5) for correct package.provided usage.\n"),
23027 - noiselevel=-1)
23028 - self.pprovideddict = {}
23029 - for x in pkgprovidedlines:
23030 - x_split = catpkgsplit(x)
23031 - if x_split is None:
23032 - continue
23033 - mycatpkg = cpv_getkey(x)
23034 - if mycatpkg in self.pprovideddict:
23035 - self.pprovideddict[mycatpkg].append(x)
23036 - else:
23037 - self.pprovideddict[mycatpkg]=[x]
23038 -
23039 - # reasonable defaults; this is important as without USE_ORDER,
23040 - # USE will always be "" (nothing set)!
23041 - if "USE_ORDER" not in self:
23042 - self["USE_ORDER"] = "env:pkg:conf:defaults:pkginternal:features:repo:env.d"
23043 - self.backup_changes("USE_ORDER")
23044 -
23045 - if "CBUILD" not in self and "CHOST" in self:
23046 - self["CBUILD"] = self["CHOST"]
23047 - self.backup_changes("CBUILD")
23048 -
23049 - if "USERLAND" not in self:
23050 - # Set default USERLAND so that our test cases can assume that
23051 - # it's always set. This allows isolated-functions.sh to avoid
23052 - # calling uname -s when sourced.
23053 - system = platform.system()
23054 - if system is not None and \
23055 - (system.endswith("BSD") or system == "DragonFly"):
23056 - self["USERLAND"] = "BSD"
23057 - else:
23058 - self["USERLAND"] = "GNU"
23059 - self.backup_changes("USERLAND")
23060 -
23061 - default_inst_ids = {
23062 - "PORTAGE_INST_GID": "0",
23063 - "PORTAGE_INST_UID": "0",
23064 - }
23065 -
23066 - # PREFIX LOCAL: inventing UID/GID based on a path is a very
23067 - # bad idea, it breaks almost everything since group ids
23068 - # don't have to match, when a user has many
23069 - # This in particularly breaks the configure-set portage
23070 - # group and user (in portage/data.py)
23071 - eroot_or_parent = first_existing(eroot)
23072 - unprivileged = True
23073 - # try:
23074 - # eroot_st = os.stat(eroot_or_parent)
23075 - # except OSError:
23076 - # pass
23077 - # else:
23078 - #
23079 - # if portage.data._unprivileged_mode(
23080 - # eroot_or_parent, eroot_st):
23081 - # unprivileged = True
23082 - #
23083 - # default_inst_ids["PORTAGE_INST_GID"] = str(eroot_st.st_gid)
23084 - # default_inst_ids["PORTAGE_INST_UID"] = str(eroot_st.st_uid)
23085 - #
23086 - # if "PORTAGE_USERNAME" not in self:
23087 - # try:
23088 - # pwd_struct = pwd.getpwuid(eroot_st.st_uid)
23089 - # except KeyError:
23090 - # pass
23091 - # else:
23092 - # self["PORTAGE_USERNAME"] = pwd_struct.pw_name
23093 - # self.backup_changes("PORTAGE_USERNAME")
23094 - #
23095 - # if "PORTAGE_GRPNAME" not in self:
23096 - # try:
23097 - # grp_struct = grp.getgrgid(eroot_st.st_gid)
23098 - # except KeyError:
23099 - # pass
23100 - # else:
23101 - # self["PORTAGE_GRPNAME"] = grp_struct.gr_name
23102 - # self.backup_changes("PORTAGE_GRPNAME")
23103 - # END PREFIX LOCAL
23104 -
23105 - for var, default_val in default_inst_ids.items():
23106 - try:
23107 - self[var] = str(int(self.get(var, default_val)))
23108 - except ValueError:
23109 - writemsg(_("!!! %s='%s' is not a valid integer. "
23110 - "Falling back to %s.\n") % (var, self[var], default_val),
23111 - noiselevel=-1)
23112 - self[var] = default_val
23113 - self.backup_changes(var)
23114 -
23115 - self.depcachedir = self.get("PORTAGE_DEPCACHEDIR")
23116 - if self.depcachedir is None:
23117 - self.depcachedir = os.path.join(os.sep,
23118 - portage.const.EPREFIX, DEPCACHE_PATH.lstrip(os.sep))
23119 - if unprivileged and target_root != os.sep:
23120 - # In unprivileged mode, automatically make
23121 - # depcachedir relative to target_root if the
23122 - # default depcachedir is not writable.
23123 - if not os.access(first_existing(self.depcachedir),
23124 - os.W_OK):
23125 - self.depcachedir = os.path.join(eroot,
23126 - DEPCACHE_PATH.lstrip(os.sep))
23127 -
23128 - self["PORTAGE_DEPCACHEDIR"] = self.depcachedir
23129 - self.backup_changes("PORTAGE_DEPCACHEDIR")
23130 -
23131 - if portage._internal_caller:
23132 - self["PORTAGE_INTERNAL_CALLER"] = "1"
23133 - self.backup_changes("PORTAGE_INTERNAL_CALLER")
23134 -
23135 - # initialize self.features
23136 - self.regenerate()
23137 - feature_use = []
23138 - if "test" in self.features:
23139 - feature_use.append("test")
23140 - self.configdict["features"]["USE"] = self._default_features_use = " ".join(feature_use)
23141 - if feature_use:
23142 - # Regenerate USE so that the initial "test" flag state is
23143 - # correct for evaluation of !test? conditionals in RESTRICT.
23144 - self.regenerate()
23145 -
23146 - if unprivileged:
23147 - self.features.add('unprivileged')
23148 -
23149 - if bsd_chflags:
23150 - self.features.add('chflags')
23151 -
23152 - self._init_iuse()
23153 -
23154 - self._validate_commands()
23155 -
23156 - for k in self._case_insensitive_vars:
23157 - if k in self:
23158 - self[k] = self[k].lower()
23159 - self.backup_changes(k)
23160 -
23161 - # The first constructed config object initializes these modules,
23162 - # and subsequent calls to the _init() functions have no effect.
23163 - portage.output._init(config_root=self['PORTAGE_CONFIGROOT'])
23164 - portage.data._init(self)
23165 -
23166 - if mycpv:
23167 - self.setcpv(mycpv)
23168 -
23169 - def _init_iuse(self):
23170 - self._iuse_effective = self._calc_iuse_effective()
23171 - self._iuse_implicit_match = _iuse_implicit_match_cache(self)
23172 -
23173 - @property
23174 - def mygcfg(self):
23175 - warnings.warn("portage.config.mygcfg is deprecated", stacklevel=3)
23176 - return {}
23177 -
23178 - def _validate_commands(self):
23179 - for k in special_env_vars.validate_commands:
23180 - v = self.get(k)
23181 - if v is not None:
23182 - valid, v_split = validate_cmd_var(v)
23183 -
23184 - if not valid:
23185 - if v_split:
23186 - writemsg_level(_("%s setting is invalid: '%s'\n") % \
23187 - (k, v), level=logging.ERROR, noiselevel=-1)
23188 -
23189 - # before deleting the invalid setting, backup
23190 - # the default value if available
23191 - v = self.configdict['globals'].get(k)
23192 - if v is not None:
23193 - default_valid, v_split = validate_cmd_var(v)
23194 - if not default_valid:
23195 - if v_split:
23196 - writemsg_level(
23197 - _("%s setting from make.globals" + \
23198 - " is invalid: '%s'\n") % \
23199 - (k, v), level=logging.ERROR, noiselevel=-1)
23200 - # make.globals seems corrupt, so try for
23201 - # a hardcoded default instead
23202 - v = self._default_globals.get(k)
23203 -
23204 - # delete all settings for this key,
23205 - # including the invalid one
23206 - del self[k]
23207 - self.backupenv.pop(k, None)
23208 - if v:
23209 - # restore validated default
23210 - self.configdict['globals'][k] = v
23211 -
23212 - def _init_dirs(self):
23213 - """
23214 - Create a few directories that are critical to portage operation
23215 - """
23216 - if not os.access(self["EROOT"], os.W_OK):
23217 - return
23218 -
23219 - # gid, mode, mask, preserve_perms
23220 - dir_mode_map = {
23221 - "tmp" : ( -1, 0o1777, 0, True),
23222 - "var/tmp" : ( -1, 0o1777, 0, True),
23223 - PRIVATE_PATH : (portage_gid, 0o2750, 0o2, False),
23224 - CACHE_PATH : (portage_gid, 0o755, 0o2, False)
23225 - }
23226 -
23227 - for mypath, (gid, mode, modemask, preserve_perms) \
23228 - in dir_mode_map.items():
23229 - mydir = os.path.join(self["EROOT"], mypath)
23230 - if preserve_perms and os.path.isdir(mydir):
23231 - # Only adjust permissions on some directories if
23232 - # they don't exist yet. This gives freedom to the
23233 - # user to adjust permissions to suit their taste.
23234 - continue
23235 - try:
23236 - ensure_dirs(mydir, gid=gid, mode=mode, mask=modemask)
23237 - except PortageException as e:
23238 - writemsg(_("!!! Directory initialization failed: '%s'\n") % mydir,
23239 - noiselevel=-1)
23240 - writemsg("!!! %s\n" % str(e),
23241 - noiselevel=-1)
23242 -
23243 - @property
23244 - def _keywords_manager(self):
23245 - if self._keywords_manager_obj is None:
23246 - self._keywords_manager_obj = KeywordsManager(
23247 - self._locations_manager.profiles_complex,
23248 - self._locations_manager.abs_user_config,
23249 - self.local_config,
23250 - global_accept_keywords=self.configdict["defaults"].get("ACCEPT_KEYWORDS", ""))
23251 - return self._keywords_manager_obj
23252 -
23253 - @property
23254 - def _mask_manager(self):
23255 - if self._mask_manager_obj is None:
23256 - self._mask_manager_obj = MaskManager(self.repositories,
23257 - self._locations_manager.profiles_complex,
23258 - self._locations_manager.abs_user_config,
23259 - user_config=self.local_config,
23260 - strict_umatched_removal=self._unmatched_removal)
23261 - return self._mask_manager_obj
23262 -
23263 - @property
23264 - def _virtuals_manager(self):
23265 - if self._virtuals_manager_obj is None:
23266 - self._virtuals_manager_obj = VirtualsManager(self.profiles)
23267 - return self._virtuals_manager_obj
23268 -
23269 - @property
23270 - def pkeywordsdict(self):
23271 - result = self._keywords_manager.pkeywordsdict.copy()
23272 - for k, v in result.items():
23273 - result[k] = v.copy()
23274 - return result
23275 -
23276 - @property
23277 - def pmaskdict(self):
23278 - return self._mask_manager._pmaskdict.copy()
23279 -
23280 - @property
23281 - def punmaskdict(self):
23282 - return self._mask_manager._punmaskdict.copy()
23283 -
23284 - @property
23285 - def soname_provided(self):
23286 - if self._soname_provided is None:
23287 - d = stack_dictlist((grabdict(
23288 - os.path.join(x, "soname.provided"), recursive=True)
23289 - for x in self.profiles), incremental=True)
23290 - self._soname_provided = frozenset(SonameAtom(cat, soname)
23291 - for cat, sonames in d.items() for soname in sonames)
23292 - return self._soname_provided
23293 -
23294 - def expandLicenseTokens(self, tokens):
23295 - """ Take a token from ACCEPT_LICENSE or package.license and expand it
23296 - if it's a group token (indicated by @) or just return it if it's not a
23297 - group. If a group is negated then negate all group elements."""
23298 - return self._license_manager.expandLicenseTokens(tokens)
23299 -
23300 - def validate(self):
23301 - """Validate miscellaneous settings and display warnings if necessary.
23302 - (This code was previously in the global scope of portage.py)"""
23303 -
23304 - groups = self.get("ACCEPT_KEYWORDS", "").split()
23305 - archlist = self.archlist()
23306 - if not archlist:
23307 - writemsg(_("--- 'profiles/arch.list' is empty or "
23308 - "not available. Empty ebuild repository?\n"), noiselevel=1)
23309 - else:
23310 - for group in groups:
23311 - if group not in archlist and \
23312 - not (group.startswith("-") and group[1:] in archlist) and \
23313 - group not in ("*", "~*", "**"):
23314 - writemsg(_("!!! INVALID ACCEPT_KEYWORDS: %s\n") % str(group),
23315 - noiselevel=-1)
23316 -
23317 - profile_broken = False
23318 -
23319 - # getmaskingstatus requires ARCH for ACCEPT_KEYWORDS support
23320 - arch = self.get('ARCH')
23321 - if not self.profile_path or not arch:
23322 - profile_broken = True
23323 - else:
23324 - # If any one of these files exists, then
23325 - # the profile is considered valid.
23326 - for x in ("make.defaults", "parent",
23327 - "packages", "use.force", "use.mask"):
23328 - if exists_raise_eaccess(os.path.join(self.profile_path, x)):
23329 - break
23330 - else:
23331 - profile_broken = True
23332 -
23333 - if profile_broken and not portage._sync_mode:
23334 - abs_profile_path = None
23335 - for x in (PROFILE_PATH, 'etc/make.profile'):
23336 - x = os.path.join(self["PORTAGE_CONFIGROOT"], x)
23337 - try:
23338 - os.lstat(x)
23339 - except OSError:
23340 - pass
23341 - else:
23342 - abs_profile_path = x
23343 - break
23344 -
23345 - if abs_profile_path is None:
23346 - abs_profile_path = os.path.join(self["PORTAGE_CONFIGROOT"],
23347 - PROFILE_PATH)
23348 -
23349 - writemsg(_("\n\n!!! %s is not a symlink and will probably prevent most merges.\n") % abs_profile_path,
23350 - noiselevel=-1)
23351 - writemsg(_("!!! It should point into a profile within %s/profiles/\n") % self["PORTDIR"])
23352 - writemsg(_("!!! (You can safely ignore this message when syncing. It's harmless.)\n\n\n"))
23353 -
23354 - abs_user_virtuals = os.path.join(self["PORTAGE_CONFIGROOT"],
23355 - USER_VIRTUALS_FILE)
23356 - if os.path.exists(abs_user_virtuals):
23357 - writemsg("\n!!! /etc/portage/virtuals is deprecated in favor of\n")
23358 - writemsg("!!! /etc/portage/profile/virtuals. Please move it to\n")
23359 - writemsg("!!! this new location.\n\n")
23360 -
23361 - if not sandbox_capable and not macossandbox_capable and \
23362 - ("sandbox" in self.features or "usersandbox" in self.features):
23363 - if self.profile_path is not None and \
23364 - os.path.realpath(self.profile_path) == \
23365 - os.path.realpath(os.path.join(
23366 - self["PORTAGE_CONFIGROOT"], PROFILE_PATH)):
23367 - # Don't show this warning when running repoman and the
23368 - # sandbox feature came from a profile that doesn't belong
23369 - # to the user.
23370 - writemsg(colorize("BAD", _("!!! Problem with sandbox"
23371 - " binary. Disabling...\n\n")), noiselevel=-1)
23372 -
23373 - if "fakeroot" in self.features and \
23374 - not fakeroot_capable:
23375 - writemsg(_("!!! FEATURES=fakeroot is enabled, but the "
23376 - "fakeroot binary is not installed.\n"), noiselevel=-1)
23377 -
23378 - if "webrsync-gpg" in self.features:
23379 - writemsg(_("!!! FEATURES=webrsync-gpg is deprecated, see the make.conf(5) man page.\n"),
23380 - noiselevel=-1)
23381 -
23382 - if os.getuid() == 0 and not hasattr(os, "setgroups"):
23383 - warning_shown = False
23384 -
23385 - if "userpriv" in self.features:
23386 - writemsg(_("!!! FEATURES=userpriv is enabled, but "
23387 - "os.setgroups is not available.\n"), noiselevel=-1)
23388 - warning_shown = True
23389 -
23390 - if "userfetch" in self.features:
23391 - writemsg(_("!!! FEATURES=userfetch is enabled, but "
23392 - "os.setgroups is not available.\n"), noiselevel=-1)
23393 - warning_shown = True
23394 -
23395 - if warning_shown and platform.python_implementation() == 'PyPy':
23396 - writemsg(_("!!! See https://bugs.pypy.org/issue833 for details.\n"),
23397 - noiselevel=-1)
23398 -
23399 - binpkg_compression = self.get("BINPKG_COMPRESS")
23400 - if binpkg_compression:
23401 - try:
23402 - compression = _compressors[binpkg_compression]
23403 - except KeyError as e:
23404 - writemsg("!!! BINPKG_COMPRESS contains invalid or "
23405 - "unsupported compression method: %s" % e.args[0],
23406 - noiselevel=-1)
23407 - else:
23408 - try:
23409 - compression_binary = shlex_split(
23410 - portage.util.varexpand(compression["compress"],
23411 - mydict=self))[0]
23412 - except IndexError as e:
23413 - writemsg("!!! BINPKG_COMPRESS contains invalid or "
23414 - "unsupported compression method: %s" % e.args[0],
23415 - noiselevel=-1)
23416 - else:
23417 - if portage.process.find_binary(
23418 - compression_binary) is None:
23419 - missing_package = compression["package"]
23420 - writemsg("!!! BINPKG_COMPRESS unsupported %s. "
23421 - "Missing package: %s" %
23422 - (binpkg_compression, missing_package),
23423 - noiselevel=-1)
23424 -
23425 - def load_best_module(self,property_string):
23426 - best_mod = best_from_dict(property_string,self.modules,self.module_priority)
23427 - mod = None
23428 - try:
23429 - mod = load_mod(best_mod)
23430 - except ImportError:
23431 - if best_mod in self._module_aliases:
23432 - mod = load_mod(self._module_aliases[best_mod])
23433 - elif not best_mod.startswith("cache."):
23434 - raise
23435 - else:
23436 - best_mod = "portage." + best_mod
23437 - try:
23438 - mod = load_mod(best_mod)
23439 - except ImportError:
23440 - raise
23441 - return mod
23442 -
23443 - def lock(self):
23444 - self.locked = 1
23445 -
23446 - def unlock(self):
23447 - self.locked = 0
23448 -
23449 - def modifying(self):
23450 - if self.locked:
23451 - raise Exception(_("Configuration is locked."))
23452 -
23453 - def backup_changes(self,key=None):
23454 - self.modifying()
23455 - if key and key in self.configdict["env"]:
23456 - self.backupenv[key] = copy.deepcopy(self.configdict["env"][key])
23457 - else:
23458 - raise KeyError(_("No such key defined in environment: %s") % key)
23459 -
23460 - def reset(self, keeping_pkg=0, use_cache=None):
23461 - """
23462 - Restore environment from self.backupenv, call self.regenerate()
23463 - @param keeping_pkg: Should we keep the setcpv() data or delete it.
23464 - @type keeping_pkg: Boolean
23465 - @rype: None
23466 - """
23467 -
23468 - if use_cache is not None:
23469 - warnings.warn("The use_cache parameter for config.reset() is deprecated and without effect.",
23470 - DeprecationWarning, stacklevel=2)
23471 -
23472 - self.modifying()
23473 - self.configdict["env"].clear()
23474 - self.configdict["env"].update(self.backupenv)
23475 -
23476 - self.modifiedkeys = []
23477 - if not keeping_pkg:
23478 - self.mycpv = None
23479 - self._setcpv_args_hash = None
23480 - self.puse = ""
23481 - del self._penv[:]
23482 - self.configdict["pkg"].clear()
23483 - self.configdict["pkginternal"].clear()
23484 - self.configdict["features"]["USE"] = self._default_features_use
23485 - self.configdict["repo"].clear()
23486 - self.configdict["defaults"]["USE"] = \
23487 - " ".join(self.make_defaults_use)
23488 - self.usemask = self._use_manager.getUseMask()
23489 - self.useforce = self._use_manager.getUseForce()
23490 - self.regenerate()
23491 -
23492 - class _lazy_vars:
23493 -
23494 - __slots__ = ('built_use', 'settings', 'values')
23495 -
23496 - def __init__(self, built_use, settings):
23497 - self.built_use = built_use
23498 - self.settings = settings
23499 - self.values = None
23500 -
23501 - def __getitem__(self, k):
23502 - if self.values is None:
23503 - self.values = self._init_values()
23504 - return self.values[k]
23505 -
23506 - def _init_values(self):
23507 - values = {}
23508 - settings = self.settings
23509 - use = self.built_use
23510 - if use is None:
23511 - use = frozenset(settings['PORTAGE_USE'].split())
23512 -
23513 - values['ACCEPT_LICENSE'] = settings._license_manager.get_prunned_accept_license( \
23514 - settings.mycpv, use, settings.get('LICENSE', ''), settings.get('SLOT'), settings.get('PORTAGE_REPO_NAME'))
23515 - values['PORTAGE_PROPERTIES'] = self._flatten('PROPERTIES', use, settings)
23516 - values['PORTAGE_RESTRICT'] = self._flatten('RESTRICT', use, settings)
23517 - return values
23518 -
23519 - def _flatten(self, var, use, settings):
23520 - try:
23521 - restrict = set(use_reduce(settings.get(var, ''), uselist=use, flat=True))
23522 - except InvalidDependString:
23523 - restrict = set()
23524 - return ' '.join(sorted(restrict))
23525 -
23526 - class _lazy_use_expand:
23527 - """
23528 - Lazily evaluate USE_EXPAND variables since they are only needed when
23529 - an ebuild shell is spawned. Variables values are made consistent with
23530 - the previously calculated USE settings.
23531 - """
23532 -
23533 - def __init__(self, settings, unfiltered_use,
23534 - use, usemask, iuse_effective,
23535 - use_expand_split, use_expand_dict):
23536 - self._settings = settings
23537 - self._unfiltered_use = unfiltered_use
23538 - self._use = use
23539 - self._usemask = usemask
23540 - self._iuse_effective = iuse_effective
23541 - self._use_expand_split = use_expand_split
23542 - self._use_expand_dict = use_expand_dict
23543 -
23544 - def __getitem__(self, key):
23545 - prefix = key.lower() + '_'
23546 - prefix_len = len(prefix)
23547 - expand_flags = set( x[prefix_len:] for x in self._use \
23548 - if x[:prefix_len] == prefix )
23549 - var_split = self._use_expand_dict.get(key, '').split()
23550 - # Preserve the order of var_split because it can matter for things
23551 - # like LINGUAS.
23552 - var_split = [ x for x in var_split if x in expand_flags ]
23553 - var_split.extend(expand_flags.difference(var_split))
23554 - has_wildcard = '*' in expand_flags
23555 - if has_wildcard:
23556 - var_split = [ x for x in var_split if x != "*" ]
23557 - has_iuse = set()
23558 - for x in self._iuse_effective:
23559 - if x[:prefix_len] == prefix:
23560 - has_iuse.add(x[prefix_len:])
23561 - if has_wildcard:
23562 - # * means to enable everything in IUSE that's not masked
23563 - if has_iuse:
23564 - usemask = self._usemask
23565 - for suffix in has_iuse:
23566 - x = prefix + suffix
23567 - if x not in usemask:
23568 - if suffix not in expand_flags:
23569 - var_split.append(suffix)
23570 - else:
23571 - # If there is a wildcard and no matching flags in IUSE then
23572 - # LINGUAS should be unset so that all .mo files are
23573 - # installed.
23574 - var_split = []
23575 - # Make the flags unique and filter them according to IUSE.
23576 - # Also, continue to preserve order for things like LINGUAS
23577 - # and filter any duplicates that variable may contain.
23578 - filtered_var_split = []
23579 - remaining = has_iuse.intersection(var_split)
23580 - for x in var_split:
23581 - if x in remaining:
23582 - remaining.remove(x)
23583 - filtered_var_split.append(x)
23584 - var_split = filtered_var_split
23585 -
23586 - return ' '.join(var_split)
23587 -
23588 - def _setcpv_recursion_gate(f):
23589 - """
23590 - Raise AssertionError for recursive setcpv calls.
23591 - """
23592 - def wrapper(self, *args, **kwargs):
23593 - if hasattr(self, '_setcpv_active'):
23594 - raise AssertionError('setcpv recursion detected')
23595 - self._setcpv_active = True
23596 - try:
23597 - return f(self, *args, **kwargs)
23598 - finally:
23599 - del self._setcpv_active
23600 - return wrapper
23601 -
23602 - @_setcpv_recursion_gate
23603 - def setcpv(self, mycpv, use_cache=None, mydb=None):
23604 - """
23605 - Load a particular CPV into the config, this lets us see the
23606 - Default USE flags for a particular ebuild as well as the USE
23607 - flags from package.use.
23608 -
23609 - @param mycpv: A cpv to load
23610 - @type mycpv: string
23611 - @param mydb: a dbapi instance that supports aux_get with the IUSE key.
23612 - @type mydb: dbapi or derivative.
23613 - @rtype: None
23614 - """
23615 -
23616 - if use_cache is not None:
23617 - warnings.warn("The use_cache parameter for config.setcpv() is deprecated and without effect.",
23618 - DeprecationWarning, stacklevel=2)
23619 -
23620 - self.modifying()
23621 -
23622 - pkg = None
23623 - built_use = None
23624 - explicit_iuse = None
23625 - if not isinstance(mycpv, str):
23626 - pkg = mycpv
23627 - mycpv = pkg.cpv
23628 - mydb = pkg._metadata
23629 - explicit_iuse = pkg.iuse.all
23630 - args_hash = (mycpv, id(pkg))
23631 - if pkg.built:
23632 - built_use = pkg.use.enabled
23633 - else:
23634 - args_hash = (mycpv, id(mydb))
23635 -
23636 - if args_hash == self._setcpv_args_hash:
23637 - return
23638 - self._setcpv_args_hash = args_hash
23639 -
23640 - has_changed = False
23641 - self.mycpv = mycpv
23642 - cat, pf = catsplit(mycpv)
23643 - cp = cpv_getkey(mycpv)
23644 - cpv_slot = self.mycpv
23645 - pkginternaluse = ""
23646 - pkginternaluse_list = []
23647 - feature_use = []
23648 - iuse = ""
23649 - pkg_configdict = self.configdict["pkg"]
23650 - previous_iuse = pkg_configdict.get("IUSE")
23651 - previous_iuse_effective = pkg_configdict.get("IUSE_EFFECTIVE")
23652 - previous_features = pkg_configdict.get("FEATURES")
23653 - previous_penv = self._penv
23654 -
23655 - aux_keys = self._setcpv_aux_keys
23656 -
23657 - # Discard any existing metadata and package.env settings from
23658 - # the previous package instance.
23659 - pkg_configdict.clear()
23660 -
23661 - pkg_configdict["CATEGORY"] = cat
23662 - pkg_configdict["PF"] = pf
23663 - repository = None
23664 - eapi = None
23665 - if mydb:
23666 - if not hasattr(mydb, "aux_get"):
23667 - for k in aux_keys:
23668 - if k in mydb:
23669 - # Make these lazy, since __getitem__ triggers
23670 - # evaluation of USE conditionals which can't
23671 - # occur until PORTAGE_USE is calculated below.
23672 - pkg_configdict.addLazySingleton(k,
23673 - mydb.__getitem__, k)
23674 - else:
23675 - # When calling dbapi.aux_get(), grab USE for built/installed
23676 - # packages since we want to save it PORTAGE_BUILT_USE for
23677 - # evaluating conditional USE deps in atoms passed via IPC to
23678 - # helpers like has_version and best_version.
23679 - aux_keys = set(aux_keys)
23680 - if hasattr(mydb, '_aux_cache_keys'):
23681 - aux_keys = aux_keys.intersection(mydb._aux_cache_keys)
23682 - aux_keys.add('USE')
23683 - aux_keys = list(aux_keys)
23684 - for k, v in zip(aux_keys, mydb.aux_get(self.mycpv, aux_keys)):
23685 - pkg_configdict[k] = v
23686 - built_use = frozenset(pkg_configdict.pop('USE').split())
23687 - if not built_use:
23688 - # Empty USE means this dbapi instance does not contain
23689 - # built packages.
23690 - built_use = None
23691 - eapi = pkg_configdict['EAPI']
23692 -
23693 - repository = pkg_configdict.pop("repository", None)
23694 - if repository is not None:
23695 - pkg_configdict["PORTAGE_REPO_NAME"] = repository
23696 - iuse = pkg_configdict["IUSE"]
23697 - if pkg is None:
23698 - self.mycpv = _pkg_str(self.mycpv, metadata=pkg_configdict,
23699 - settings=self)
23700 - cpv_slot = self.mycpv
23701 - else:
23702 - cpv_slot = pkg
23703 - for x in iuse.split():
23704 - if x.startswith("+"):
23705 - pkginternaluse_list.append(x[1:])
23706 - elif x.startswith("-"):
23707 - pkginternaluse_list.append(x)
23708 - pkginternaluse = " ".join(pkginternaluse_list)
23709 -
23710 - eapi_attrs = _get_eapi_attrs(eapi)
23711 -
23712 - if pkginternaluse != self.configdict["pkginternal"].get("USE", ""):
23713 - self.configdict["pkginternal"]["USE"] = pkginternaluse
23714 - has_changed = True
23715 -
23716 - repo_env = []
23717 - if repository and repository != Package.UNKNOWN_REPO:
23718 - repos = []
23719 - try:
23720 - repos.extend(repo.name for repo in
23721 - self.repositories[repository].masters)
23722 - except KeyError:
23723 - pass
23724 - repos.append(repository)
23725 - for repo in repos:
23726 - d = self._repo_make_defaults.get(repo)
23727 - if d is None:
23728 - d = {}
23729 - else:
23730 - # make a copy, since we might modify it with
23731 - # package.use settings
23732 - d = d.copy()
23733 - cpdict = self._use_manager._repo_puse_dict.get(repo, {}).get(cp)
23734 - if cpdict:
23735 - repo_puse = ordered_by_atom_specificity(cpdict, cpv_slot)
23736 - if repo_puse:
23737 - for x in repo_puse:
23738 - d["USE"] = d.get("USE", "") + " " + " ".join(x)
23739 - if d:
23740 - repo_env.append(d)
23741 -
23742 - if repo_env or self.configdict["repo"]:
23743 - self.configdict["repo"].clear()
23744 - self.configdict["repo"].update(stack_dicts(repo_env,
23745 - incrementals=self.incrementals))
23746 - has_changed = True
23747 -
23748 - defaults = []
23749 - for i, pkgprofileuse_dict in enumerate(self._use_manager._pkgprofileuse):
23750 - if self.make_defaults_use[i]:
23751 - defaults.append(self.make_defaults_use[i])
23752 - cpdict = pkgprofileuse_dict.get(cp)
23753 - if cpdict:
23754 - pkg_defaults = ordered_by_atom_specificity(cpdict, cpv_slot)
23755 - if pkg_defaults:
23756 - defaults.extend(pkg_defaults)
23757 - defaults = " ".join(defaults)
23758 - if defaults != self.configdict["defaults"].get("USE",""):
23759 - self.configdict["defaults"]["USE"] = defaults
23760 - has_changed = True
23761 -
23762 - useforce = self._use_manager.getUseForce(cpv_slot)
23763 - if useforce != self.useforce:
23764 - self.useforce = useforce
23765 - has_changed = True
23766 -
23767 - usemask = self._use_manager.getUseMask(cpv_slot)
23768 - if usemask != self.usemask:
23769 - self.usemask = usemask
23770 - has_changed = True
23771 -
23772 - oldpuse = self.puse
23773 - self.puse = self._use_manager.getPUSE(cpv_slot)
23774 - if oldpuse != self.puse:
23775 - has_changed = True
23776 - self.configdict["pkg"]["PKGUSE"] = self.puse[:] # For saving to PUSE file
23777 - self.configdict["pkg"]["USE"] = self.puse[:] # this gets appended to USE
23778 -
23779 - if previous_features:
23780 - # The package from the previous setcpv call had package.env
23781 - # settings which modified FEATURES. Therefore, trigger a
23782 - # regenerate() call in order to ensure that self.features
23783 - # is accurate.
23784 - has_changed = True
23785 - # Prevent stale features USE from corrupting the evaluation
23786 - # of USE conditional RESTRICT.
23787 - self.configdict["features"]["USE"] = self._default_features_use
23788 -
23789 - self._penv = []
23790 - cpdict = self._penvdict.get(cp)
23791 - if cpdict:
23792 - penv_matches = ordered_by_atom_specificity(cpdict, cpv_slot)
23793 - if penv_matches:
23794 - for x in penv_matches:
23795 - self._penv.extend(x)
23796 -
23797 - bashrc_files = []
23798 -
23799 - for profile, profile_bashrc in zip(self._locations_manager.profiles_complex, self._profile_bashrc):
23800 - if profile_bashrc:
23801 - bashrc_files.append(os.path.join(profile.location, 'profile.bashrc'))
23802 - if profile in self._pbashrcdict:
23803 - cpdict = self._pbashrcdict[profile].get(cp)
23804 - if cpdict:
23805 - bashrc_matches = \
23806 - ordered_by_atom_specificity(cpdict, cpv_slot)
23807 - for x in bashrc_matches:
23808 - bashrc_files.extend(x)
23809 -
23810 - self._pbashrc = tuple(bashrc_files)
23811 -
23812 - protected_pkg_keys = set(pkg_configdict)
23813 - protected_pkg_keys.discard('USE')
23814 -
23815 - # If there are _any_ package.env settings for this package
23816 - # then it automatically triggers config.reset(), in order
23817 - # to account for possible incremental interaction between
23818 - # package.use, package.env, and overrides from the calling
23819 - # environment (configdict['env']).
23820 - if self._penv:
23821 - has_changed = True
23822 - # USE is special because package.use settings override
23823 - # it. Discard any package.use settings here and they'll
23824 - # be added back later.
23825 - pkg_configdict.pop('USE', None)
23826 - self._grab_pkg_env(self._penv, pkg_configdict,
23827 - protected_keys=protected_pkg_keys)
23828 -
23829 - # Now add package.use settings, which override USE from
23830 - # package.env
23831 - if self.puse:
23832 - if 'USE' in pkg_configdict:
23833 - pkg_configdict['USE'] = \
23834 - pkg_configdict['USE'] + " " + self.puse
23835 - else:
23836 - pkg_configdict['USE'] = self.puse
23837 -
23838 - elif previous_penv:
23839 - has_changed = True
23840 -
23841 - if not (previous_iuse == iuse and
23842 - previous_iuse_effective is not None == eapi_attrs.iuse_effective):
23843 - has_changed = True
23844 -
23845 - if has_changed:
23846 - # This can modify self.features due to package.env settings.
23847 - self.reset(keeping_pkg=1)
23848 -
23849 - if "test" in self.features:
23850 - # This is independent of IUSE and RESTRICT, so that the same
23851 - # value can be shared between packages with different settings,
23852 - # which is important when evaluating USE conditional RESTRICT.
23853 - feature_use.append("test")
23854 -
23855 - feature_use = " ".join(feature_use)
23856 - if feature_use != self.configdict["features"]["USE"]:
23857 - # Regenerate USE for evaluation of conditional RESTRICT.
23858 - self.configdict["features"]["USE"] = feature_use
23859 - self.reset(keeping_pkg=1)
23860 - has_changed = True
23861 -
23862 - if explicit_iuse is None:
23863 - explicit_iuse = frozenset(x.lstrip("+-") for x in iuse.split())
23864 - if eapi_attrs.iuse_effective:
23865 - iuse_implicit_match = self._iuse_effective_match
23866 - else:
23867 - iuse_implicit_match = self._iuse_implicit_match
23868 -
23869 - if pkg is None:
23870 - raw_properties = pkg_configdict.get("PROPERTIES")
23871 - raw_restrict = pkg_configdict.get("RESTRICT")
23872 - else:
23873 - raw_properties = pkg._raw_metadata["PROPERTIES"]
23874 - raw_restrict = pkg._raw_metadata["RESTRICT"]
23875 -
23876 - restrict_test = False
23877 - if raw_restrict:
23878 - try:
23879 - if built_use is not None:
23880 - properties = use_reduce(raw_properties,
23881 - uselist=built_use, flat=True)
23882 - restrict = use_reduce(raw_restrict,
23883 - uselist=built_use, flat=True)
23884 - else:
23885 - properties = use_reduce(raw_properties,
23886 - uselist=frozenset(x for x in self['USE'].split()
23887 - if x in explicit_iuse or iuse_implicit_match(x)),
23888 - flat=True)
23889 - restrict = use_reduce(raw_restrict,
23890 - uselist=frozenset(x for x in self['USE'].split()
23891 - if x in explicit_iuse or iuse_implicit_match(x)),
23892 - flat=True)
23893 - except PortageException:
23894 - pass
23895 - else:
23896 - allow_test = self.get('ALLOW_TEST', '').split()
23897 - restrict_test = (
23898 - "test" in restrict and not "all" in allow_test and
23899 - not ("test_network" in properties and "network" in allow_test))
23900 -
23901 - if restrict_test and "test" in self.features:
23902 - # Handle it like IUSE="-test", since features USE is
23903 - # independent of RESTRICT.
23904 - pkginternaluse_list.append("-test")
23905 - pkginternaluse = " ".join(pkginternaluse_list)
23906 - self.configdict["pkginternal"]["USE"] = pkginternaluse
23907 - # TODO: can we avoid that?
23908 - self.reset(keeping_pkg=1)
23909 - has_changed = True
23910 -
23911 - env_configdict = self.configdict['env']
23912 -
23913 - # Ensure that "pkg" values are always preferred over "env" values.
23914 - # This must occur _after_ the above reset() call, since reset()
23915 - # copies values from self.backupenv.
23916 - for k in protected_pkg_keys:
23917 - env_configdict.pop(k, None)
23918 -
23919 - lazy_vars = self._lazy_vars(built_use, self)
23920 - env_configdict.addLazySingleton('ACCEPT_LICENSE',
23921 - lazy_vars.__getitem__, 'ACCEPT_LICENSE')
23922 - env_configdict.addLazySingleton('PORTAGE_PROPERTIES',
23923 - lazy_vars.__getitem__, 'PORTAGE_PROPERTIES')
23924 - env_configdict.addLazySingleton('PORTAGE_RESTRICT',
23925 - lazy_vars.__getitem__, 'PORTAGE_RESTRICT')
23926 -
23927 - if built_use is not None:
23928 - pkg_configdict['PORTAGE_BUILT_USE'] = ' '.join(built_use)
23929 -
23930 - # If reset() has not been called, it's safe to return
23931 - # early if IUSE has not changed.
23932 - if not has_changed:
23933 - return
23934 -
23935 - # Filter out USE flags that aren't part of IUSE. This has to
23936 - # be done for every setcpv() call since practically every
23937 - # package has different IUSE.
23938 - use = set(self["USE"].split())
23939 - unfiltered_use = frozenset(use)
23940 -
23941 - if eapi_attrs.iuse_effective:
23942 - portage_iuse = set(self._iuse_effective)
23943 - portage_iuse.update(explicit_iuse)
23944 - if built_use is not None:
23945 - # When the binary package was built, the profile may have
23946 - # had different IUSE_IMPLICIT settings, so any member of
23947 - # the built USE setting is considered to be a member of
23948 - # IUSE_EFFECTIVE (see bug 640318).
23949 - portage_iuse.update(built_use)
23950 - self.configdict["pkg"]["IUSE_EFFECTIVE"] = \
23951 - " ".join(sorted(portage_iuse))
23952 -
23953 - self.configdict["env"]["BASH_FUNC____in_portage_iuse%%"] = (
23954 - "() { "
23955 - "if [[ ${#___PORTAGE_IUSE_HASH[@]} -lt 1 ]]; then "
23956 - " declare -gA ___PORTAGE_IUSE_HASH=(%s); "
23957 - "fi; "
23958 - "[[ -n ${___PORTAGE_IUSE_HASH[$1]} ]]; "
23959 - "}" ) % " ".join('["%s"]=1' % x for x in portage_iuse)
23960 - else:
23961 - portage_iuse = self._get_implicit_iuse()
23962 - portage_iuse.update(explicit_iuse)
23963 -
23964 - # The _get_implicit_iuse() returns a regular expression
23965 - # so we can't use the (faster) map. Fall back to
23966 - # implementing ___in_portage_iuse() the older/slower way.
23967 -
23968 - # PORTAGE_IUSE is not always needed so it's lazily evaluated.
23969 - self.configdict["env"].addLazySingleton(
23970 - "PORTAGE_IUSE", _lazy_iuse_regex, portage_iuse)
23971 - self.configdict["env"]["BASH_FUNC____in_portage_iuse%%"] = \
23972 - "() { [[ $1 =~ ${PORTAGE_IUSE} ]]; }"
23973 -
23974 - ebuild_force_test = not restrict_test and \
23975 - self.get("EBUILD_FORCE_TEST") == "1"
23976 -
23977 - if "test" in explicit_iuse or iuse_implicit_match("test"):
23978 - if "test" in self.features:
23979 - if ebuild_force_test and "test" in self.usemask:
23980 - self.usemask = \
23981 - frozenset(x for x in self.usemask if x != "test")
23982 - if restrict_test or \
23983 - ("test" in self.usemask and not ebuild_force_test):
23984 - # "test" is in IUSE and USE=test is masked, so execution
23985 - # of src_test() probably is not reliable. Therefore,
23986 - # temporarily disable FEATURES=test just for this package.
23987 - self["FEATURES"] = " ".join(x for x in self.features \
23988 - if x != "test")
23989 -
23990 - # Allow _* flags from USE_EXPAND wildcards to pass through here.
23991 - use.difference_update([x for x in use \
23992 - if (x not in explicit_iuse and \
23993 - not iuse_implicit_match(x)) and x[-2:] != '_*'])
23994 -
23995 - # Use the calculated USE flags to regenerate the USE_EXPAND flags so
23996 - # that they are consistent. For optimal performance, use slice
23997 - # comparison instead of startswith().
23998 - use_expand_split = set(x.lower() for \
23999 - x in self.get('USE_EXPAND', '').split())
24000 - lazy_use_expand = self._lazy_use_expand(
24001 - self, unfiltered_use, use, self.usemask,
24002 - portage_iuse, use_expand_split, self._use_expand_dict)
24003 -
24004 - use_expand_iuses = dict((k, set()) for k in use_expand_split)
24005 - for x in portage_iuse:
24006 - x_split = x.split('_')
24007 - if len(x_split) == 1:
24008 - continue
24009 - for i in range(len(x_split) - 1):
24010 - k = '_'.join(x_split[:i+1])
24011 - if k in use_expand_split:
24012 - use_expand_iuses[k].add(x)
24013 - break
24014 -
24015 - for k, use_expand_iuse in use_expand_iuses.items():
24016 - if k + '_*' in use:
24017 - use.update( x for x in use_expand_iuse if x not in usemask )
24018 - k = k.upper()
24019 - self.configdict['env'].addLazySingleton(k,
24020 - lazy_use_expand.__getitem__, k)
24021 -
24022 - for k in self.get("USE_EXPAND_UNPREFIXED", "").split():
24023 - var_split = self.get(k, '').split()
24024 - var_split = [ x for x in var_split if x in use ]
24025 - if var_split:
24026 - self.configlist[-1][k] = ' '.join(var_split)
24027 - elif k in self:
24028 - self.configlist[-1][k] = ''
24029 -
24030 - # Filtered for the ebuild environment. Store this in a separate
24031 - # attribute since we still want to be able to see global USE
24032 - # settings for things like emerge --info.
24033 -
24034 - self.configdict["env"]["PORTAGE_USE"] = \
24035 - " ".join(sorted(x for x in use if x[-2:] != '_*'))
24036 -
24037 - # Clear the eapi cache here rather than in the constructor, since
24038 - # setcpv triggers lazy instantiation of things like _use_manager.
24039 - _eapi_cache.clear()
24040 -
24041 - def _grab_pkg_env(self, penv, container, protected_keys=None):
24042 - if protected_keys is None:
24043 - protected_keys = ()
24044 - abs_user_config = os.path.join(
24045 - self['PORTAGE_CONFIGROOT'], USER_CONFIG_PATH)
24046 - non_user_variables = self._non_user_variables
24047 - # Make a copy since we don't want per-package settings
24048 - # to pollute the global expand_map.
24049 - expand_map = self._expand_map.copy()
24050 - incrementals = self.incrementals
24051 - for envname in penv:
24052 - penvfile = os.path.join(abs_user_config, "env", envname)
24053 - penvconfig = getconfig(penvfile, tolerant=self._tolerant,
24054 - allow_sourcing=True, expand=expand_map)
24055 - if penvconfig is None:
24056 - writemsg("!!! %s references non-existent file: %s\n" % \
24057 - (os.path.join(abs_user_config, 'package.env'), penvfile),
24058 - noiselevel=-1)
24059 - else:
24060 - for k, v in penvconfig.items():
24061 - if k in protected_keys or \
24062 - k in non_user_variables:
24063 - writemsg("!!! Illegal variable " + \
24064 - "'%s' assigned in '%s'\n" % \
24065 - (k, penvfile), noiselevel=-1)
24066 - elif k in incrementals:
24067 - if k in container:
24068 - container[k] = container[k] + " " + v
24069 - else:
24070 - container[k] = v
24071 - else:
24072 - container[k] = v
24073 -
24074 - def _iuse_effective_match(self, flag):
24075 - return flag in self._iuse_effective
24076 -
24077 - def _calc_iuse_effective(self):
24078 - """
24079 - Beginning with EAPI 5, IUSE_EFFECTIVE is defined by PMS.
24080 - """
24081 - iuse_effective = []
24082 - iuse_effective.extend(self.get("IUSE_IMPLICIT", "").split())
24083 -
24084 - # USE_EXPAND_IMPLICIT should contain things like ARCH, ELIBC,
24085 - # KERNEL, and USERLAND.
24086 - use_expand_implicit = frozenset(
24087 - self.get("USE_EXPAND_IMPLICIT", "").split())
24088 -
24089 - # USE_EXPAND_UNPREFIXED should contain at least ARCH, and
24090 - # USE_EXPAND_VALUES_ARCH should contain all valid ARCH flags.
24091 - for v in self.get("USE_EXPAND_UNPREFIXED", "").split():
24092 - if v not in use_expand_implicit:
24093 - continue
24094 - iuse_effective.extend(
24095 - self.get("USE_EXPAND_VALUES_" + v, "").split())
24096 -
24097 - use_expand = frozenset(self.get("USE_EXPAND", "").split())
24098 - for v in use_expand_implicit:
24099 - if v not in use_expand:
24100 - continue
24101 - lower_v = v.lower()
24102 - for x in self.get("USE_EXPAND_VALUES_" + v, "").split():
24103 - iuse_effective.append(lower_v + "_" + x)
24104 -
24105 - return frozenset(iuse_effective)
24106 -
24107 - def _get_implicit_iuse(self):
24108 - """
24109 - Prior to EAPI 5, these flags are considered to
24110 - be implicit members of IUSE:
24111 - * Flags derived from ARCH
24112 - * Flags derived from USE_EXPAND_HIDDEN variables
24113 - * Masked flags, such as those from {,package}use.mask
24114 - * Forced flags, such as those from {,package}use.force
24115 - * build and bootstrap flags used by bootstrap.sh
24116 - """
24117 - iuse_implicit = set()
24118 - # Flags derived from ARCH.
24119 - arch = self.configdict["defaults"].get("ARCH")
24120 - if arch:
24121 - iuse_implicit.add(arch)
24122 - iuse_implicit.update(self.get("PORTAGE_ARCHLIST", "").split())
24123 -
24124 - # Flags derived from USE_EXPAND_HIDDEN variables
24125 - # such as ELIBC, KERNEL, and USERLAND.
24126 - use_expand_hidden = self.get("USE_EXPAND_HIDDEN", "").split()
24127 - for x in use_expand_hidden:
24128 - iuse_implicit.add(x.lower() + "_.*")
24129 -
24130 - # Flags that have been masked or forced.
24131 - iuse_implicit.update(self.usemask)
24132 - iuse_implicit.update(self.useforce)
24133 -
24134 - # build and bootstrap flags used by bootstrap.sh
24135 - iuse_implicit.add("build")
24136 - iuse_implicit.add("bootstrap")
24137 -
24138 - return iuse_implicit
24139 -
24140 - def _getUseMask(self, pkg, stable=None):
24141 - return self._use_manager.getUseMask(pkg, stable=stable)
24142 -
24143 - def _getUseForce(self, pkg, stable=None):
24144 - return self._use_manager.getUseForce(pkg, stable=stable)
24145 -
24146 - def _getMaskAtom(self, cpv, metadata):
24147 - """
24148 - Take a package and return a matching package.mask atom, or None if no
24149 - such atom exists or it has been cancelled by package.unmask.
24150 -
24151 - @param cpv: The package name
24152 - @type cpv: String
24153 - @param metadata: A dictionary of raw package metadata
24154 - @type metadata: dict
24155 - @rtype: String
24156 - @return: A matching atom string or None if one is not found.
24157 - """
24158 - return self._mask_manager.getMaskAtom(cpv, metadata["SLOT"], metadata.get('repository'))
24159 -
24160 - def _getRawMaskAtom(self, cpv, metadata):
24161 - """
24162 - Take a package and return a matching package.mask atom, or None if no
24163 - such atom exists or it has been cancelled by package.unmask.
24164 -
24165 - @param cpv: The package name
24166 - @type cpv: String
24167 - @param metadata: A dictionary of raw package metadata
24168 - @type metadata: dict
24169 - @rtype: String
24170 - @return: A matching atom string or None if one is not found.
24171 - """
24172 - return self._mask_manager.getRawMaskAtom(cpv, metadata["SLOT"], metadata.get('repository'))
24173 -
24174 -
24175 - def _getProfileMaskAtom(self, cpv, metadata):
24176 - """
24177 - Take a package and return a matching profile atom, or None if no
24178 - such atom exists. Note that a profile atom may or may not have a "*"
24179 - prefix.
24180 -
24181 - @param cpv: The package name
24182 - @type cpv: String
24183 - @param metadata: A dictionary of raw package metadata
24184 - @type metadata: dict
24185 - @rtype: String
24186 - @return: A matching profile atom string or None if one is not found.
24187 - """
24188 -
24189 - warnings.warn("The config._getProfileMaskAtom() method is deprecated.",
24190 - DeprecationWarning, stacklevel=2)
24191 -
24192 - cp = cpv_getkey(cpv)
24193 - profile_atoms = self.prevmaskdict.get(cp)
24194 - if profile_atoms:
24195 - pkg = "".join((cpv, _slot_separator, metadata["SLOT"]))
24196 - repo = metadata.get("repository")
24197 - if repo and repo != Package.UNKNOWN_REPO:
24198 - pkg = "".join((pkg, _repo_separator, repo))
24199 - pkg_list = [pkg]
24200 - for x in profile_atoms:
24201 - if match_from_list(x, pkg_list):
24202 - continue
24203 - return x
24204 - return None
24205 -
24206 - def _isStable(self, pkg):
24207 - return self._keywords_manager.isStable(pkg,
24208 - self.get("ACCEPT_KEYWORDS", ""),
24209 - self.configdict["backupenv"].get("ACCEPT_KEYWORDS", ""))
24210 -
24211 - def _getKeywords(self, cpv, metadata):
24212 - return self._keywords_manager.getKeywords(cpv, metadata["SLOT"], \
24213 - metadata.get("KEYWORDS", ""), metadata.get("repository"))
24214 -
24215 - def _getMissingKeywords(self, cpv, metadata):
24216 - """
24217 - Take a package and return a list of any KEYWORDS that the user may
24218 - need to accept for the given package. If the KEYWORDS are empty
24219 - and the ** keyword has not been accepted, the returned list will
24220 - contain ** alone (in order to distinguish from the case of "none
24221 - missing").
24222 -
24223 - @param cpv: The package name (for package.keywords support)
24224 - @type cpv: String
24225 - @param metadata: A dictionary of raw package metadata
24226 - @type metadata: dict
24227 - @rtype: List
24228 - @return: A list of KEYWORDS that have not been accepted.
24229 - """
24230 -
24231 - # Hack: Need to check the env directly here as otherwise stacking
24232 - # doesn't work properly as negative values are lost in the config
24233 - # object (bug #139600)
24234 - backuped_accept_keywords = self.configdict["backupenv"].get("ACCEPT_KEYWORDS", "")
24235 - global_accept_keywords = self.get("ACCEPT_KEYWORDS", "")
24236 -
24237 - return self._keywords_manager.getMissingKeywords(cpv, metadata["SLOT"], \
24238 - metadata.get("KEYWORDS", ""), metadata.get('repository'), \
24239 - global_accept_keywords, backuped_accept_keywords)
24240 -
24241 - def _getRawMissingKeywords(self, cpv, metadata):
24242 - """
24243 - Take a package and return a list of any KEYWORDS that the user may
24244 - need to accept for the given package. If the KEYWORDS are empty,
24245 - the returned list will contain ** alone (in order to distinguish
24246 - from the case of "none missing"). This DOES NOT apply any user config
24247 - package.accept_keywords acceptance.
24248 -
24249 - @param cpv: The package name (for package.keywords support)
24250 - @type cpv: String
24251 - @param metadata: A dictionary of raw package metadata
24252 - @type metadata: dict
24253 - @rtype: List
24254 - @return: lists of KEYWORDS that have not been accepted
24255 - and the keywords it looked for.
24256 - """
24257 - return self._keywords_manager.getRawMissingKeywords(cpv, metadata["SLOT"], \
24258 - metadata.get("KEYWORDS", ""), metadata.get('repository'), \
24259 - self.get("ACCEPT_KEYWORDS", ""))
24260 -
24261 - def _getPKeywords(self, cpv, metadata):
24262 - global_accept_keywords = self.get("ACCEPT_KEYWORDS", "")
24263 -
24264 - return self._keywords_manager.getPKeywords(cpv, metadata["SLOT"], \
24265 - metadata.get('repository'), global_accept_keywords)
24266 -
24267 - def _getMissingLicenses(self, cpv, metadata):
24268 - """
24269 - Take a LICENSE string and return a list of any licenses that the user
24270 - may need to accept for the given package. The returned list will not
24271 - contain any licenses that have already been accepted. This method
24272 - can throw an InvalidDependString exception.
24273 -
24274 - @param cpv: The package name (for package.license support)
24275 - @type cpv: String
24276 - @param metadata: A dictionary of raw package metadata
24277 - @type metadata: dict
24278 - @rtype: List
24279 - @return: A list of licenses that have not been accepted.
24280 - """
24281 - return self._license_manager.getMissingLicenses( \
24282 - cpv, metadata["USE"], metadata["LICENSE"], metadata["SLOT"], metadata.get('repository'))
24283 -
24284 - def _getMissingProperties(self, cpv, metadata):
24285 - """
24286 - Take a PROPERTIES string and return a list of any properties the user
24287 - may need to accept for the given package. The returned list will not
24288 - contain any properties that have already been accepted. This method
24289 - can throw an InvalidDependString exception.
24290 -
24291 - @param cpv: The package name (for package.properties support)
24292 - @type cpv: String
24293 - @param metadata: A dictionary of raw package metadata
24294 - @type metadata: dict
24295 - @rtype: List
24296 - @return: A list of properties that have not been accepted.
24297 - """
24298 - accept_properties = self._accept_properties
24299 - try:
24300 - cpv.slot
24301 - except AttributeError:
24302 - cpv = _pkg_str(cpv, metadata=metadata, settings=self)
24303 - cp = cpv_getkey(cpv)
24304 - cpdict = self._ppropertiesdict.get(cp)
24305 - if cpdict:
24306 - pproperties_list = ordered_by_atom_specificity(cpdict, cpv)
24307 - if pproperties_list:
24308 - accept_properties = list(self._accept_properties)
24309 - for x in pproperties_list:
24310 - accept_properties.extend(x)
24311 -
24312 - properties_str = metadata.get("PROPERTIES", "")
24313 - properties = set(use_reduce(properties_str, matchall=1, flat=True))
24314 -
24315 - acceptable_properties = set()
24316 - for x in accept_properties:
24317 - if x == '*':
24318 - acceptable_properties.update(properties)
24319 - elif x == '-*':
24320 - acceptable_properties.clear()
24321 - elif x[:1] == '-':
24322 - acceptable_properties.discard(x[1:])
24323 - else:
24324 - acceptable_properties.add(x)
24325 -
24326 - if "?" in properties_str:
24327 - use = metadata["USE"].split()
24328 - else:
24329 - use = []
24330 -
24331 - return [x for x in use_reduce(properties_str, uselist=use, flat=True)
24332 - if x not in acceptable_properties]
24333 -
24334 - def _getMissingRestrict(self, cpv, metadata):
24335 - """
24336 - Take a RESTRICT string and return a list of any tokens the user
24337 - may need to accept for the given package. The returned list will not
24338 - contain any tokens that have already been accepted. This method
24339 - can throw an InvalidDependString exception.
24340 -
24341 - @param cpv: The package name (for package.accept_restrict support)
24342 - @type cpv: String
24343 - @param metadata: A dictionary of raw package metadata
24344 - @type metadata: dict
24345 - @rtype: List
24346 - @return: A list of tokens that have not been accepted.
24347 - """
24348 - accept_restrict = self._accept_restrict
24349 - try:
24350 - cpv.slot
24351 - except AttributeError:
24352 - cpv = _pkg_str(cpv, metadata=metadata, settings=self)
24353 - cp = cpv_getkey(cpv)
24354 - cpdict = self._paccept_restrict.get(cp)
24355 - if cpdict:
24356 - paccept_restrict_list = ordered_by_atom_specificity(cpdict, cpv)
24357 - if paccept_restrict_list:
24358 - accept_restrict = list(self._accept_restrict)
24359 - for x in paccept_restrict_list:
24360 - accept_restrict.extend(x)
24361 -
24362 - restrict_str = metadata.get("RESTRICT", "")
24363 - all_restricts = set(use_reduce(restrict_str, matchall=1, flat=True))
24364 -
24365 - acceptable_restricts = set()
24366 - for x in accept_restrict:
24367 - if x == '*':
24368 - acceptable_restricts.update(all_restricts)
24369 - elif x == '-*':
24370 - acceptable_restricts.clear()
24371 - elif x[:1] == '-':
24372 - acceptable_restricts.discard(x[1:])
24373 - else:
24374 - acceptable_restricts.add(x)
24375 -
24376 - if "?" in restrict_str:
24377 - use = metadata["USE"].split()
24378 - else:
24379 - use = []
24380 -
24381 - return [x for x in use_reduce(restrict_str, uselist=use, flat=True)
24382 - if x not in acceptable_restricts]
24383 -
24384 - def _accept_chost(self, cpv, metadata):
24385 - """
24386 - @return True if pkg CHOST is accepted, False otherwise.
24387 - """
24388 - if self._accept_chost_re is None:
24389 - accept_chost = self.get("ACCEPT_CHOSTS", "").split()
24390 - if not accept_chost:
24391 - chost = self.get("CHOST")
24392 - if chost:
24393 - accept_chost.append(chost)
24394 - if not accept_chost:
24395 - self._accept_chost_re = re.compile(".*")
24396 - elif len(accept_chost) == 1:
24397 - try:
24398 - self._accept_chost_re = re.compile(r'^%s$' % accept_chost[0])
24399 - except re.error as e:
24400 - writemsg(_("!!! Invalid ACCEPT_CHOSTS value: '%s': %s\n") % \
24401 - (accept_chost[0], e), noiselevel=-1)
24402 - self._accept_chost_re = re.compile("^$")
24403 - else:
24404 - try:
24405 - self._accept_chost_re = re.compile(
24406 - r'^(%s)$' % "|".join(accept_chost))
24407 - except re.error as e:
24408 - writemsg(_("!!! Invalid ACCEPT_CHOSTS value: '%s': %s\n") % \
24409 - (" ".join(accept_chost), e), noiselevel=-1)
24410 - self._accept_chost_re = re.compile("^$")
24411 -
24412 - pkg_chost = metadata.get('CHOST', '')
24413 - return not pkg_chost or \
24414 - self._accept_chost_re.match(pkg_chost) is not None
24415 -
24416 - def setinst(self, mycpv, mydbapi):
24417 - """This used to update the preferences for old-style virtuals.
24418 - It is no-op now."""
24419 - pass
24420 -
24421 - def reload(self):
24422 - """Reload things like /etc/profile.env that can change during runtime."""
24423 - env_d_filename = os.path.join(self["EROOT"], "etc", "profile.env")
24424 - self.configdict["env.d"].clear()
24425 - env_d = getconfig(env_d_filename,
24426 - tolerant=self._tolerant, expand=False)
24427 - if env_d:
24428 - # env_d will be None if profile.env doesn't exist.
24429 - for k in self._env_d_blacklist:
24430 - env_d.pop(k, None)
24431 - self.configdict["env.d"].update(env_d)
24432 -
24433 - def regenerate(self, useonly=0, use_cache=None):
24434 - """
24435 - Regenerate settings
24436 - This involves regenerating valid USE flags, re-expanding USE_EXPAND flags
24437 - re-stacking USE flags (-flag and -*), as well as any other INCREMENTAL
24438 - variables. This also updates the env.d configdict; useful in case an ebuild
24439 - changes the environment.
24440 -
24441 - If FEATURES has already stacked, it is not stacked twice.
24442 -
24443 - @param useonly: Only regenerate USE flags (not any other incrementals)
24444 - @type useonly: Boolean
24445 - @rtype: None
24446 - """
24447 -
24448 - if use_cache is not None:
24449 - warnings.warn("The use_cache parameter for config.regenerate() is deprecated and without effect.",
24450 - DeprecationWarning, stacklevel=2)
24451 -
24452 - self.modifying()
24453 -
24454 - if useonly:
24455 - myincrementals=["USE"]
24456 - else:
24457 - myincrementals = self.incrementals
24458 - myincrementals = set(myincrementals)
24459 -
24460 - # Process USE last because it depends on USE_EXPAND which is also
24461 - # an incremental!
24462 - myincrementals.discard("USE")
24463 -
24464 - mydbs = self.configlist[:-1]
24465 - mydbs.append(self.backupenv)
24466 -
24467 - # ACCEPT_LICENSE is a lazily evaluated incremental, so that * can be
24468 - # used to match all licenses without every having to explicitly expand
24469 - # it to all licenses.
24470 - if self.local_config:
24471 - mysplit = []
24472 - for curdb in mydbs:
24473 - mysplit.extend(curdb.get('ACCEPT_LICENSE', '').split())
24474 - mysplit = prune_incremental(mysplit)
24475 - accept_license_str = ' '.join(mysplit) or '* -@EULA'
24476 - self.configlist[-1]['ACCEPT_LICENSE'] = accept_license_str
24477 - self._license_manager.set_accept_license_str(accept_license_str)
24478 - else:
24479 - # repoman will accept any license
24480 - self._license_manager.set_accept_license_str("*")
24481 -
24482 - # ACCEPT_PROPERTIES works like ACCEPT_LICENSE, without groups
24483 - if self.local_config:
24484 - mysplit = []
24485 - for curdb in mydbs:
24486 - mysplit.extend(curdb.get('ACCEPT_PROPERTIES', '').split())
24487 - mysplit = prune_incremental(mysplit)
24488 - self.configlist[-1]['ACCEPT_PROPERTIES'] = ' '.join(mysplit)
24489 - if tuple(mysplit) != self._accept_properties:
24490 - self._accept_properties = tuple(mysplit)
24491 - else:
24492 - # repoman will accept any property
24493 - self._accept_properties = ('*',)
24494 -
24495 - if self.local_config:
24496 - mysplit = []
24497 - for curdb in mydbs:
24498 - mysplit.extend(curdb.get('ACCEPT_RESTRICT', '').split())
24499 - mysplit = prune_incremental(mysplit)
24500 - self.configlist[-1]['ACCEPT_RESTRICT'] = ' '.join(mysplit)
24501 - if tuple(mysplit) != self._accept_restrict:
24502 - self._accept_restrict = tuple(mysplit)
24503 - else:
24504 - # repoman will accept any property
24505 - self._accept_restrict = ('*',)
24506 -
24507 - increment_lists = {}
24508 - for k in myincrementals:
24509 - incremental_list = []
24510 - increment_lists[k] = incremental_list
24511 - for curdb in mydbs:
24512 - v = curdb.get(k)
24513 - if v is not None:
24514 - incremental_list.append(v.split())
24515 -
24516 - if 'FEATURES' in increment_lists:
24517 - increment_lists['FEATURES'].append(self._features_overrides)
24518 -
24519 - myflags = set()
24520 - for mykey, incremental_list in increment_lists.items():
24521 -
24522 - myflags.clear()
24523 - for mysplit in incremental_list:
24524 -
24525 - for x in mysplit:
24526 - if x=="-*":
24527 - # "-*" is a special "minus" var that means "unset all settings".
24528 - # so USE="-* gnome" will have *just* gnome enabled.
24529 - myflags.clear()
24530 - continue
24531 -
24532 - if x[0]=="+":
24533 - # Not legal. People assume too much. Complain.
24534 - writemsg(colorize("BAD",
24535 - _("%s values should not start with a '+': %s") % (mykey,x)) \
24536 - + "\n", noiselevel=-1)
24537 - x=x[1:]
24538 - if not x:
24539 - continue
24540 -
24541 - if x[0] == "-":
24542 - myflags.discard(x[1:])
24543 - continue
24544 -
24545 - # We got here, so add it now.
24546 - myflags.add(x)
24547 -
24548 - #store setting in last element of configlist, the original environment:
24549 - if myflags or mykey in self:
24550 - self.configlist[-1][mykey] = " ".join(sorted(myflags))
24551 -
24552 - # Do the USE calculation last because it depends on USE_EXPAND.
24553 - use_expand = self.get("USE_EXPAND", "").split()
24554 - use_expand_dict = self._use_expand_dict
24555 - use_expand_dict.clear()
24556 - for k in use_expand:
24557 - v = self.get(k)
24558 - if v is not None:
24559 - use_expand_dict[k] = v
24560 -
24561 - use_expand_unprefixed = self.get("USE_EXPAND_UNPREFIXED", "").split()
24562 -
24563 - # In order to best accomodate the long-standing practice of
24564 - # setting default USE_EXPAND variables in the profile's
24565 - # make.defaults, we translate these variables into their
24566 - # equivalent USE flags so that useful incremental behavior
24567 - # is enabled (for sub-profiles).
24568 - configdict_defaults = self.configdict['defaults']
24569 - if self._make_defaults is not None:
24570 - for i, cfg in enumerate(self._make_defaults):
24571 - if not cfg:
24572 - self.make_defaults_use.append("")
24573 - continue
24574 - use = cfg.get("USE", "")
24575 - expand_use = []
24576 -
24577 - for k in use_expand_unprefixed:
24578 - v = cfg.get(k)
24579 - if v is not None:
24580 - expand_use.extend(v.split())
24581 -
24582 - for k in use_expand_dict:
24583 - v = cfg.get(k)
24584 - if v is None:
24585 - continue
24586 - prefix = k.lower() + '_'
24587 - for x in v.split():
24588 - if x[:1] == '-':
24589 - expand_use.append('-' + prefix + x[1:])
24590 - else:
24591 - expand_use.append(prefix + x)
24592 -
24593 - if expand_use:
24594 - expand_use.append(use)
24595 - use = ' '.join(expand_use)
24596 - self.make_defaults_use.append(use)
24597 - self.make_defaults_use = tuple(self.make_defaults_use)
24598 - # Preserve both positive and negative flags here, since
24599 - # negative flags may later interact with other flags pulled
24600 - # in via USE_ORDER.
24601 - configdict_defaults['USE'] = ' '.join(
24602 - filter(None, self.make_defaults_use))
24603 - # Set to None so this code only runs once.
24604 - self._make_defaults = None
24605 -
24606 - if not self.uvlist:
24607 - for x in self["USE_ORDER"].split(":"):
24608 - if x in self.configdict:
24609 - self.uvlist.append(self.configdict[x])
24610 - self.uvlist.reverse()
24611 -
24612 - # For optimal performance, use slice
24613 - # comparison instead of startswith().
24614 - iuse = self.configdict["pkg"].get("IUSE")
24615 - if iuse is not None:
24616 - iuse = [x.lstrip("+-") for x in iuse.split()]
24617 - myflags = set()
24618 - for curdb in self.uvlist:
24619 -
24620 - for k in use_expand_unprefixed:
24621 - v = curdb.get(k)
24622 - if v is None:
24623 - continue
24624 - for x in v.split():
24625 - if x[:1] == "-":
24626 - myflags.discard(x[1:])
24627 - else:
24628 - myflags.add(x)
24629 -
24630 - cur_use_expand = [x for x in use_expand if x in curdb]
24631 - mysplit = curdb.get("USE", "").split()
24632 - if not mysplit and not cur_use_expand:
24633 - continue
24634 - for x in mysplit:
24635 - if x == "-*":
24636 - myflags.clear()
24637 - continue
24638 -
24639 - if x[0] == "+":
24640 - writemsg(colorize("BAD", _("USE flags should not start "
24641 - "with a '+': %s\n") % x), noiselevel=-1)
24642 - x = x[1:]
24643 - if not x:
24644 - continue
24645 -
24646 - if x[0] == "-":
24647 - if x[-2:] == '_*':
24648 - prefix = x[1:-1]
24649 - prefix_len = len(prefix)
24650 - myflags.difference_update(
24651 - [y for y in myflags if \
24652 - y[:prefix_len] == prefix])
24653 - myflags.discard(x[1:])
24654 - continue
24655 -
24656 - if iuse is not None and x[-2:] == '_*':
24657 - # Expand wildcards here, so that cases like
24658 - # USE="linguas_* -linguas_en_US" work correctly.
24659 - prefix = x[:-1]
24660 - prefix_len = len(prefix)
24661 - has_iuse = False
24662 - for y in iuse:
24663 - if y[:prefix_len] == prefix:
24664 - has_iuse = True
24665 - myflags.add(y)
24666 - if not has_iuse:
24667 - # There are no matching IUSE, so allow the
24668 - # wildcard to pass through. This allows
24669 - # linguas_* to trigger unset LINGUAS in
24670 - # cases when no linguas_ flags are in IUSE.
24671 - myflags.add(x)
24672 - else:
24673 - myflags.add(x)
24674 -
24675 - if curdb is configdict_defaults:
24676 - # USE_EXPAND flags from make.defaults are handled
24677 - # earlier, in order to provide useful incremental
24678 - # behavior (for sub-profiles).
24679 - continue
24680 -
24681 - for var in cur_use_expand:
24682 - var_lower = var.lower()
24683 - is_not_incremental = var not in myincrementals
24684 - if is_not_incremental:
24685 - prefix = var_lower + "_"
24686 - prefix_len = len(prefix)
24687 - for x in list(myflags):
24688 - if x[:prefix_len] == prefix:
24689 - myflags.remove(x)
24690 - for x in curdb[var].split():
24691 - if x[0] == "+":
24692 - if is_not_incremental:
24693 - writemsg(colorize("BAD", _("Invalid '+' "
24694 - "operator in non-incremental variable "
24695 - "'%s': '%s'\n") % (var, x)), noiselevel=-1)
24696 - continue
24697 - else:
24698 - writemsg(colorize("BAD", _("Invalid '+' "
24699 - "operator in incremental variable "
24700 - "'%s': '%s'\n") % (var, x)), noiselevel=-1)
24701 - x = x[1:]
24702 - if x[0] == "-":
24703 - if is_not_incremental:
24704 - writemsg(colorize("BAD", _("Invalid '-' "
24705 - "operator in non-incremental variable "
24706 - "'%s': '%s'\n") % (var, x)), noiselevel=-1)
24707 - continue
24708 - myflags.discard(var_lower + "_" + x[1:])
24709 - continue
24710 - myflags.add(var_lower + "_" + x)
24711 -
24712 - if hasattr(self, "features"):
24713 - self.features._features.clear()
24714 - else:
24715 - self.features = features_set(self)
24716 - self.features._features.update(self.get('FEATURES', '').split())
24717 - self.features._sync_env_var()
24718 - self.features._validate()
24719 -
24720 - myflags.update(self.useforce)
24721 - arch = self.configdict["defaults"].get("ARCH")
24722 - if arch:
24723 - myflags.add(arch)
24724 -
24725 - myflags.difference_update(self.usemask)
24726 - self.configlist[-1]["USE"]= " ".join(sorted(myflags))
24727 -
24728 - if self.mycpv is None:
24729 - # Generate global USE_EXPAND variables settings that are
24730 - # consistent with USE, for display by emerge --info. For
24731 - # package instances, these are instead generated via
24732 - # setcpv().
24733 - for k in use_expand:
24734 - prefix = k.lower() + '_'
24735 - prefix_len = len(prefix)
24736 - expand_flags = set( x[prefix_len:] for x in myflags \
24737 - if x[:prefix_len] == prefix )
24738 - var_split = use_expand_dict.get(k, '').split()
24739 - var_split = [ x for x in var_split if x in expand_flags ]
24740 - var_split.extend(sorted(expand_flags.difference(var_split)))
24741 - if var_split:
24742 - self.configlist[-1][k] = ' '.join(var_split)
24743 - elif k in self:
24744 - self.configlist[-1][k] = ''
24745 -
24746 - for k in use_expand_unprefixed:
24747 - var_split = self.get(k, '').split()
24748 - var_split = [ x for x in var_split if x in myflags ]
24749 - if var_split:
24750 - self.configlist[-1][k] = ' '.join(var_split)
24751 - elif k in self:
24752 - self.configlist[-1][k] = ''
24753 -
24754 - @property
24755 - def virts_p(self):
24756 - warnings.warn("portage config.virts_p attribute " + \
24757 - "is deprecated, use config.get_virts_p()",
24758 - DeprecationWarning, stacklevel=2)
24759 - return self.get_virts_p()
24760 -
24761 - @property
24762 - def virtuals(self):
24763 - warnings.warn("portage config.virtuals attribute " + \
24764 - "is deprecated, use config.getvirtuals()",
24765 - DeprecationWarning, stacklevel=2)
24766 - return self.getvirtuals()
24767 -
24768 - def get_virts_p(self):
24769 - # Ensure that we don't trigger the _treeVirtuals
24770 - # assertion in VirtualsManager._compile_virtuals().
24771 - self.getvirtuals()
24772 - return self._virtuals_manager.get_virts_p()
24773 -
24774 - def getvirtuals(self):
24775 - if self._virtuals_manager._treeVirtuals is None:
24776 - #Hack around the fact that VirtualsManager needs a vartree
24777 - #and vartree needs a config instance.
24778 - #This code should be part of VirtualsManager.getvirtuals().
24779 - if self.local_config:
24780 - temp_vartree = vartree(settings=self)
24781 - self._virtuals_manager._populate_treeVirtuals(temp_vartree)
24782 - else:
24783 - self._virtuals_manager._treeVirtuals = {}
24784 -
24785 - return self._virtuals_manager.getvirtuals()
24786 -
24787 - def _populate_treeVirtuals_if_needed(self, vartree):
24788 - """Reduce the provides into a list by CP."""
24789 - if self._virtuals_manager._treeVirtuals is None:
24790 - if self.local_config:
24791 - self._virtuals_manager._populate_treeVirtuals(vartree)
24792 - else:
24793 - self._virtuals_manager._treeVirtuals = {}
24794 -
24795 - def __delitem__(self,mykey):
24796 - self.pop(mykey)
24797 -
24798 - def __getitem__(self, key):
24799 - try:
24800 - return self._getitem(key)
24801 - except KeyError:
24802 - if portage._internal_caller:
24803 - stack = traceback.format_stack()[:-1] + traceback.format_exception(*sys.exc_info())[1:]
24804 - try:
24805 - # Ensure that output is written to terminal.
24806 - with open("/dev/tty", "w") as f:
24807 - f.write("=" * 96 + "\n")
24808 - f.write("=" * 8 + " Traceback for invalid call to portage.package.ebuild.config.config.__getitem__ " + "=" * 8 + "\n")
24809 - f.writelines(stack)
24810 - f.write("=" * 96 + "\n")
24811 - except Exception:
24812 - pass
24813 - raise
24814 - else:
24815 - warnings.warn(_("Passing nonexistent key %r to %s is deprecated. Use %s instead.") %
24816 - (key, "portage.package.ebuild.config.config.__getitem__",
24817 - "portage.package.ebuild.config.config.get"), DeprecationWarning, stacklevel=2)
24818 - return ""
24819 -
24820 - def _getitem(self, mykey):
24821 -
24822 - if mykey in self._constant_keys:
24823 - # These two point to temporary values when
24824 - # portage plans to update itself.
24825 - if mykey == "PORTAGE_BIN_PATH":
24826 - return portage._bin_path
24827 - if mykey == "PORTAGE_PYM_PATH":
24828 - return portage._pym_path
24829 -
24830 - if mykey == "PORTAGE_PYTHONPATH":
24831 - value = [x for x in \
24832 - self.backupenv.get("PYTHONPATH", "").split(":") if x]
24833 - need_pym_path = True
24834 - if value:
24835 - try:
24836 - need_pym_path = not os.path.samefile(value[0],
24837 - portage._pym_path)
24838 - except OSError:
24839 - pass
24840 - if need_pym_path:
24841 - value.insert(0, portage._pym_path)
24842 - return ":".join(value)
24843 -
24844 - if mykey == "PORTAGE_GID":
24845 - return "%s" % portage_gid
24846 -
24847 - for d in self.lookuplist:
24848 - try:
24849 - return d[mykey]
24850 - except KeyError:
24851 - pass
24852 -
24853 - deprecated_key = self._deprecated_keys.get(mykey)
24854 - if deprecated_key is not None:
24855 - value = self._getitem(deprecated_key)
24856 - #warnings.warn(_("Key %s has been renamed to %s. Please ",
24857 - # "update your configuration") % (deprecated_key, mykey),
24858 - # UserWarning)
24859 - return value
24860 -
24861 - raise KeyError(mykey)
24862 -
24863 - def get(self, k, x=None):
24864 - try:
24865 - return self._getitem(k)
24866 - except KeyError:
24867 - return x
24868 -
24869 - def pop(self, key, *args):
24870 - self.modifying()
24871 - if len(args) > 1:
24872 - raise TypeError(
24873 - "pop expected at most 2 arguments, got " + \
24874 - repr(1 + len(args)))
24875 - v = self
24876 - for d in reversed(self.lookuplist):
24877 - v = d.pop(key, v)
24878 - if v is self:
24879 - if args:
24880 - return args[0]
24881 - raise KeyError(key)
24882 - return v
24883 -
24884 - def __contains__(self, mykey):
24885 - """Called to implement membership test operators (in and not in)."""
24886 - try:
24887 - self._getitem(mykey)
24888 - except KeyError:
24889 - return False
24890 - else:
24891 - return True
24892 -
24893 - def setdefault(self, k, x=None):
24894 - v = self.get(k)
24895 - if v is not None:
24896 - return v
24897 - self[k] = x
24898 - return x
24899 -
24900 - def __iter__(self):
24901 - keys = set()
24902 - keys.update(self._constant_keys)
24903 - for d in self.lookuplist:
24904 - keys.update(d)
24905 - return iter(keys)
24906 -
24907 - def iterkeys(self):
24908 - return iter(self)
24909 -
24910 - def iteritems(self):
24911 - for k in self:
24912 - yield (k, self._getitem(k))
24913 -
24914 - def __setitem__(self,mykey,myvalue):
24915 - "set a value; will be thrown away at reset() time"
24916 - if not isinstance(myvalue, str):
24917 - raise ValueError("Invalid type being used as a value: '%s': '%s'" % (str(mykey),str(myvalue)))
24918 -
24919 - # Avoid potential UnicodeDecodeError exceptions later.
24920 - mykey = _unicode_decode(mykey)
24921 - myvalue = _unicode_decode(myvalue)
24922 -
24923 - self.modifying()
24924 - self.modifiedkeys.append(mykey)
24925 - self.configdict["env"][mykey]=myvalue
24926 -
24927 - def environ(self):
24928 - "return our locally-maintained environment"
24929 - mydict={}
24930 - environ_filter = self._environ_filter
24931 -
24932 - eapi = self.get('EAPI')
24933 - eapi_attrs = _get_eapi_attrs(eapi)
24934 - phase = self.get('EBUILD_PHASE')
24935 - emerge_from = self.get('EMERGE_FROM')
24936 - filter_calling_env = False
24937 - if self.mycpv is not None and \
24938 - not (emerge_from == 'ebuild' and phase == 'setup') and \
24939 - phase not in ('clean', 'cleanrm', 'depend', 'fetch'):
24940 - temp_dir = self.get('T')
24941 - if temp_dir is not None and \
24942 - os.path.exists(os.path.join(temp_dir, 'environment')):
24943 - filter_calling_env = True
24944 -
24945 - environ_whitelist = self._environ_whitelist
24946 - for x, myvalue in self.iteritems():
24947 - if x in environ_filter:
24948 - continue
24949 - if not isinstance(myvalue, str):
24950 - writemsg(_("!!! Non-string value in config: %s=%s\n") % \
24951 - (x, myvalue), noiselevel=-1)
24952 - continue
24953 - if filter_calling_env and \
24954 - x not in environ_whitelist and \
24955 - not self._environ_whitelist_re.match(x):
24956 - # Do not allow anything to leak into the ebuild
24957 - # environment unless it is explicitly whitelisted.
24958 - # This ensures that variables unset by the ebuild
24959 - # remain unset (bug #189417).
24960 - continue
24961 - mydict[x] = myvalue
24962 - if "HOME" not in mydict and "BUILD_PREFIX" in mydict:
24963 - writemsg("*** HOME not set. Setting to "+mydict["BUILD_PREFIX"]+"\n")
24964 - mydict["HOME"]=mydict["BUILD_PREFIX"][:]
24965 -
24966 - if filter_calling_env:
24967 - if phase:
24968 - whitelist = []
24969 - if "rpm" == phase:
24970 - whitelist.append("RPMDIR")
24971 - for k in whitelist:
24972 - v = self.get(k)
24973 - if v is not None:
24974 - mydict[k] = v
24975 -
24976 - # At some point we may want to stop exporting FEATURES to the ebuild
24977 - # environment, in order to prevent ebuilds from abusing it. In
24978 - # preparation for that, export it as PORTAGE_FEATURES so that bashrc
24979 - # users will be able to migrate any FEATURES conditional code to
24980 - # use this alternative variable.
24981 - mydict["PORTAGE_FEATURES"] = self["FEATURES"]
24982 -
24983 - # Filtered by IUSE and implicit IUSE.
24984 - mydict["USE"] = self.get("PORTAGE_USE", "")
24985 -
24986 - # Don't export AA to the ebuild environment in EAPIs that forbid it
24987 - if not eapi_exports_AA(eapi):
24988 - mydict.pop("AA", None)
24989 -
24990 - if not eapi_exports_merge_type(eapi):
24991 - mydict.pop("MERGE_TYPE", None)
24992 -
24993 - src_like_phase = (phase == 'setup' or
24994 - _phase_func_map.get(phase, '').startswith('src_'))
24995 -
24996 - if not (src_like_phase and eapi_attrs.sysroot):
24997 - mydict.pop("ESYSROOT", None)
24998 -
24999 - if not (src_like_phase and eapi_attrs.broot):
25000 - mydict.pop("BROOT", None)
25001 -
25002 - # Prefix variables are supported beginning with EAPI 3, or when
25003 - # force-prefix is in FEATURES, since older EAPIs would otherwise be
25004 - # useless with prefix configurations. This brings compatibility with
25005 - # the prefix branch of portage, which also supports EPREFIX for all
25006 - # EAPIs (for obvious reasons).
25007 - if phase == 'depend' or \
25008 - ('force-prefix' not in self.features and
25009 - eapi is not None and not eapi_supports_prefix(eapi)):
25010 - mydict.pop("ED", None)
25011 - mydict.pop("EPREFIX", None)
25012 - mydict.pop("EROOT", None)
25013 - mydict.pop("ESYSROOT", None)
25014 -
25015 - if phase not in ("pretend", "setup", "preinst", "postinst") or \
25016 - not eapi_exports_replace_vars(eapi):
25017 - mydict.pop("REPLACING_VERSIONS", None)
25018 -
25019 - if phase not in ("prerm", "postrm") or \
25020 - not eapi_exports_replace_vars(eapi):
25021 - mydict.pop("REPLACED_BY_VERSION", None)
25022 -
25023 - if phase is not None and eapi_attrs.exports_EBUILD_PHASE_FUNC:
25024 - phase_func = _phase_func_map.get(phase)
25025 - if phase_func is not None:
25026 - mydict["EBUILD_PHASE_FUNC"] = phase_func
25027 -
25028 - if eapi_attrs.posixish_locale:
25029 - split_LC_ALL(mydict)
25030 - mydict["LC_COLLATE"] = "C"
25031 - # check_locale() returns None when check can not be executed.
25032 - if check_locale(silent=True, env=mydict) is False:
25033 - # try another locale
25034 - for l in ("C.UTF-8", "en_US.UTF-8", "en_GB.UTF-8", "C"):
25035 - mydict["LC_CTYPE"] = l
25036 - if check_locale(silent=True, env=mydict):
25037 - # TODO: output the following only once
25038 - # writemsg(_("!!! LC_CTYPE unsupported, using %s instead\n")
25039 - # % mydict["LC_CTYPE"])
25040 - break
25041 - else:
25042 - raise AssertionError("C locale did not pass the test!")
25043 -
25044 - if not eapi_attrs.exports_PORTDIR:
25045 - mydict.pop("PORTDIR", None)
25046 - if not eapi_attrs.exports_ECLASSDIR:
25047 - mydict.pop("ECLASSDIR", None)
25048 -
25049 - if not eapi_attrs.path_variables_end_with_trailing_slash:
25050 - for v in ("D", "ED", "ROOT", "EROOT", "ESYSROOT", "BROOT"):
25051 - if v in mydict:
25052 - mydict[v] = mydict[v].rstrip(os.path.sep)
25053 -
25054 - # Since SYSROOT=/ interacts badly with autotools.eclass (bug 654600),
25055 - # and no EAPI expects SYSROOT to have a trailing slash, always strip
25056 - # the trailing slash from SYSROOT.
25057 - if 'SYSROOT' in mydict:
25058 - mydict['SYSROOT'] = mydict['SYSROOT'].rstrip(os.sep)
25059 -
25060 - try:
25061 - builddir = mydict["PORTAGE_BUILDDIR"]
25062 - distdir = mydict["DISTDIR"]
25063 - except KeyError:
25064 - pass
25065 - else:
25066 - mydict["PORTAGE_ACTUAL_DISTDIR"] = distdir
25067 - mydict["DISTDIR"] = os.path.join(builddir, "distdir")
25068 -
25069 - return mydict
25070 -
25071 - def thirdpartymirrors(self):
25072 - if getattr(self, "_thirdpartymirrors", None) is None:
25073 - thirdparty_lists = []
25074 - for repo_name in reversed(self.repositories.prepos_order):
25075 - thirdparty_lists.append(grabdict(os.path.join(
25076 - self.repositories[repo_name].location,
25077 - "profiles", "thirdpartymirrors")))
25078 - self._thirdpartymirrors = stack_dictlist(thirdparty_lists, incremental=True)
25079 - return self._thirdpartymirrors
25080 -
25081 - def archlist(self):
25082 - _archlist = []
25083 - for myarch in self["PORTAGE_ARCHLIST"].split():
25084 - _archlist.append(myarch)
25085 - _archlist.append("~" + myarch)
25086 - return _archlist
25087 -
25088 - def selinux_enabled(self):
25089 - if getattr(self, "_selinux_enabled", None) is None:
25090 - self._selinux_enabled = 0
25091 - if "selinux" in self["USE"].split():
25092 - if selinux:
25093 - if selinux.is_selinux_enabled() == 1:
25094 - self._selinux_enabled = 1
25095 - else:
25096 - self._selinux_enabled = 0
25097 - else:
25098 - writemsg(_("!!! SELinux module not found. Please verify that it was installed.\n"),
25099 - noiselevel=-1)
25100 - self._selinux_enabled = 0
25101 -
25102 - return self._selinux_enabled
25103 -
25104 - keys = __iter__
25105 - items = iteritems
25106 ++
25107 + """
25108 + This class encompasses the main portage configuration. Data is pulled from
25109 + ROOT/PORTDIR/profiles/, from ROOT/etc/make.profile incrementally through all
25110 + parent profiles as well as from ROOT/PORTAGE_CONFIGROOT/* for user specified
25111 + overrides.
25112 +
25113 + Generally if you need data like USE flags, FEATURES, environment variables,
25114 + virtuals ...etc you look in here.
25115 + """
25116 +
25117 + _constant_keys = frozenset(
25118 + ["PORTAGE_BIN_PATH", "PORTAGE_GID", "PORTAGE_PYM_PATH", "PORTAGE_PYTHONPATH"]
25119 + )
25120 +
25121 + _deprecated_keys = {
25122 + "PORTAGE_LOGDIR": "PORT_LOGDIR",
25123 + "PORTAGE_LOGDIR_CLEAN": "PORT_LOGDIR_CLEAN",
25124 + "SIGNED_OFF_BY": "DCO_SIGNED_OFF_BY",
25125 + }
25126 +
25127 + _setcpv_aux_keys = (
25128 + "BDEPEND",
25129 + "DEFINED_PHASES",
25130 + "DEPEND",
25131 + "EAPI",
25132 + "IDEPEND",
25133 + "INHERITED",
25134 + "IUSE",
25135 + "REQUIRED_USE",
25136 + "KEYWORDS",
25137 + "LICENSE",
25138 + "PDEPEND",
25139 + "PROPERTIES",
25140 + "RDEPEND",
25141 + "SLOT",
25142 + "repository",
25143 + "RESTRICT",
25144 + "LICENSE",
25145 + )
25146 +
25147 + _module_aliases = {
25148 + "cache.metadata_overlay.database": "portage.cache.flat_hash.mtime_md5_database",
25149 + "portage.cache.metadata_overlay.database": "portage.cache.flat_hash.mtime_md5_database",
25150 + }
25151 +
25152 + _case_insensitive_vars = special_env_vars.case_insensitive_vars
25153 + _default_globals = special_env_vars.default_globals
25154 + _env_blacklist = special_env_vars.env_blacklist
25155 + _environ_filter = special_env_vars.environ_filter
25156 + _environ_whitelist = special_env_vars.environ_whitelist
25157 + _environ_whitelist_re = special_env_vars.environ_whitelist_re
25158 + _global_only_vars = special_env_vars.global_only_vars
25159 +
25160 + def __init__(
25161 + self,
25162 + clone=None,
25163 + mycpv=None,
25164 + config_profile_path=None,
25165 + config_incrementals=None,
25166 + config_root=None,
25167 + target_root=None,
25168 + sysroot=None,
25169 + eprefix=None,
25170 + local_config=True,
25171 + env=None,
25172 + _unmatched_removal=False,
25173 + repositories=None,
25174 + ):
25175 + """
25176 + @param clone: If provided, init will use deepcopy to copy by value the instance.
25177 + @type clone: Instance of config class.
25178 + @param mycpv: CPV to load up (see setcpv), this is the same as calling init with mycpv=None
25179 + and then calling instance.setcpv(mycpv).
25180 + @type mycpv: String
25181 + @param config_profile_path: Configurable path to the profile (usually PROFILE_PATH from portage.const)
25182 + @type config_profile_path: String
25183 + @param config_incrementals: List of incremental variables
25184 + (defaults to portage.const.INCREMENTALS)
25185 + @type config_incrementals: List
25186 + @param config_root: path to read local config from (defaults to "/", see PORTAGE_CONFIGROOT)
25187 + @type config_root: String
25188 + @param target_root: the target root, which typically corresponds to the
25189 + value of the $ROOT env variable (default is /)
25190 + @type target_root: String
25191 + @param sysroot: the sysroot to build against, which typically corresponds
25192 + to the value of the $SYSROOT env variable (default is /)
25193 + @type sysroot: String
25194 + @param eprefix: set the EPREFIX variable (default is portage.const.EPREFIX)
25195 + @type eprefix: String
25196 + @param local_config: Enables loading of local config (/etc/portage); used most by repoman to
25197 + ignore local config (keywording and unmasking)
25198 + @type local_config: Boolean
25199 + @param env: The calling environment which is used to override settings.
25200 + Defaults to os.environ if unspecified.
25201 + @type env: dict
25202 + @param _unmatched_removal: Enabled by repoman when the
25203 + --unmatched-removal option is given.
25204 + @type _unmatched_removal: Boolean
25205 + @param repositories: Configuration of repositories.
25206 + Defaults to portage.repository.config.load_repository_config().
25207 + @type repositories: Instance of portage.repository.config.RepoConfigLoader class.
25208 + """
25209 +
25210 + # This is important when config is reloaded after emerge --sync.
25211 + _eapi_cache.clear()
25212 +
25213 + # When initializing the global portage.settings instance, avoid
25214 + # raising exceptions whenever possible since exceptions thrown
25215 + # from 'import portage' or 'import portage.exceptions' statements
25216 + # can practically render the api unusable for api consumers.
25217 + tolerant = hasattr(portage, "_initializing_globals")
25218 + self._tolerant = tolerant
25219 + self._unmatched_removal = _unmatched_removal
25220 +
25221 + self.locked = 0
25222 + self.mycpv = None
25223 + self._setcpv_args_hash = None
25224 + self.puse = ""
25225 + self._penv = []
25226 + self.modifiedkeys = []
25227 + self.uvlist = []
25228 + self._accept_chost_re = None
25229 + self._accept_properties = None
25230 + self._accept_restrict = None
25231 + self._features_overrides = []
25232 + self._make_defaults = None
25233 + self._parent_stable = None
25234 + self._soname_provided = None
25235 +
25236 + # _unknown_features records unknown features that
25237 + # have triggered warning messages, and ensures that
25238 + # the same warning isn't shown twice.
25239 + self._unknown_features = set()
25240 +
25241 + self.local_config = local_config
25242 +
25243 + if clone:
25244 + # For immutable attributes, use shallow copy for
25245 + # speed and memory conservation.
25246 + self._tolerant = clone._tolerant
25247 + self._unmatched_removal = clone._unmatched_removal
25248 + self.categories = clone.categories
25249 + self.depcachedir = clone.depcachedir
25250 + self.incrementals = clone.incrementals
25251 + self.module_priority = clone.module_priority
25252 + self.profile_path = clone.profile_path
25253 + self.profiles = clone.profiles
25254 + self.packages = clone.packages
25255 + self.repositories = clone.repositories
25256 + self.unpack_dependencies = clone.unpack_dependencies
25257 + self._default_features_use = clone._default_features_use
25258 + self._iuse_effective = clone._iuse_effective
25259 + self._iuse_implicit_match = clone._iuse_implicit_match
25260 + self._non_user_variables = clone._non_user_variables
25261 + self._env_d_blacklist = clone._env_d_blacklist
25262 + self._pbashrc = clone._pbashrc
25263 + self._repo_make_defaults = clone._repo_make_defaults
25264 + self.usemask = clone.usemask
25265 + self.useforce = clone.useforce
25266 + self.puse = clone.puse
25267 + self.user_profile_dir = clone.user_profile_dir
25268 + self.local_config = clone.local_config
25269 + self.make_defaults_use = clone.make_defaults_use
25270 + self.mycpv = clone.mycpv
25271 + self._setcpv_args_hash = clone._setcpv_args_hash
25272 + self._soname_provided = clone._soname_provided
25273 + self._profile_bashrc = clone._profile_bashrc
25274 +
25275 + # immutable attributes (internal policy ensures lack of mutation)
25276 + self._locations_manager = clone._locations_manager
25277 + self._use_manager = clone._use_manager
25278 + # force instantiation of lazy immutable objects when cloning, so
25279 + # that they're not instantiated more than once
25280 + self._keywords_manager_obj = clone._keywords_manager
25281 + self._mask_manager_obj = clone._mask_manager
25282 +
25283 + # shared mutable attributes
25284 + self._unknown_features = clone._unknown_features
25285 +
25286 + self.modules = copy.deepcopy(clone.modules)
25287 + self._penv = copy.deepcopy(clone._penv)
25288 +
25289 + self.configdict = copy.deepcopy(clone.configdict)
25290 + self.configlist = [
25291 + self.configdict["env.d"],
25292 + self.configdict["repo"],
25293 + self.configdict["features"],
25294 + self.configdict["pkginternal"],
25295 + self.configdict["globals"],
25296 + self.configdict["defaults"],
25297 + self.configdict["conf"],
25298 + self.configdict["pkg"],
25299 + self.configdict["env"],
25300 + ]
25301 + self.lookuplist = self.configlist[:]
25302 + self.lookuplist.reverse()
25303 + self._use_expand_dict = copy.deepcopy(clone._use_expand_dict)
25304 + self.backupenv = self.configdict["backupenv"]
25305 + self.prevmaskdict = copy.deepcopy(clone.prevmaskdict)
25306 + self.pprovideddict = copy.deepcopy(clone.pprovideddict)
25307 + self.features = features_set(self)
25308 + self.features._features = copy.deepcopy(clone.features._features)
25309 + self._features_overrides = copy.deepcopy(clone._features_overrides)
25310 +
25311 + # Strictly speaking _license_manager is not immutable. Users need to ensure that
25312 + # extract_global_changes() is called right after __init__ (if at all).
25313 + # It also has the mutable member _undef_lic_groups. It is used to track
25314 + # undefined license groups, to not display an error message for the same
25315 + # group again and again. Because of this, it's useful to share it between
25316 + # all LicenseManager instances.
25317 + self._license_manager = clone._license_manager
25318 +
25319 + # force instantiation of lazy objects when cloning, so
25320 + # that they're not instantiated more than once
25321 + self._virtuals_manager_obj = copy.deepcopy(clone._virtuals_manager)
25322 +
25323 + self._accept_properties = copy.deepcopy(clone._accept_properties)
25324 + self._ppropertiesdict = copy.deepcopy(clone._ppropertiesdict)
25325 + self._accept_restrict = copy.deepcopy(clone._accept_restrict)
25326 + self._paccept_restrict = copy.deepcopy(clone._paccept_restrict)
25327 + self._penvdict = copy.deepcopy(clone._penvdict)
25328 + self._pbashrcdict = copy.deepcopy(clone._pbashrcdict)
25329 + self._expand_map = copy.deepcopy(clone._expand_map)
25330 +
25331 + else:
25332 + # lazily instantiated objects
25333 + self._keywords_manager_obj = None
25334 + self._mask_manager_obj = None
25335 + self._virtuals_manager_obj = None
25336 +
25337 + locations_manager = LocationsManager(
25338 + config_root=config_root,
25339 + config_profile_path=config_profile_path,
25340 + eprefix=eprefix,
25341 + local_config=local_config,
25342 + target_root=target_root,
25343 + sysroot=sysroot,
25344 + )
25345 + self._locations_manager = locations_manager
25346 +
25347 + eprefix = locations_manager.eprefix
25348 + config_root = locations_manager.config_root
25349 + sysroot = locations_manager.sysroot
25350 + esysroot = locations_manager.esysroot
25351 + broot = locations_manager.broot
25352 + abs_user_config = locations_manager.abs_user_config
25353 + make_conf_paths = [
25354 + os.path.join(config_root, "etc", "make.conf"),
25355 + os.path.join(config_root, MAKE_CONF_FILE),
25356 + ]
25357 + try:
25358 + if os.path.samefile(*make_conf_paths):
25359 + make_conf_paths.pop()
25360 + except OSError:
25361 + pass
25362 +
25363 + make_conf_count = 0
25364 + make_conf = {}
25365 + for x in make_conf_paths:
25366 + mygcfg = getconfig(
25367 + x,
25368 + tolerant=tolerant,
25369 + allow_sourcing=True,
25370 + expand=make_conf,
25371 + recursive=True,
25372 + )
25373 + if mygcfg is not None:
25374 + make_conf.update(mygcfg)
25375 + make_conf_count += 1
25376 +
25377 + if make_conf_count == 2:
25378 + writemsg(
25379 + "!!! %s\n"
25380 + % _("Found 2 make.conf files, using both '%s' and '%s'")
25381 + % tuple(make_conf_paths),
25382 + noiselevel=-1,
25383 + )
25384 +
25385 + # __* variables set in make.conf are local and are not be propagated.
25386 + make_conf = {k: v for k, v in make_conf.items() if not k.startswith("__")}
25387 +
25388 + # Allow ROOT setting to come from make.conf if it's not overridden
25389 + # by the constructor argument (from the calling environment).
25390 + locations_manager.set_root_override(make_conf.get("ROOT"))
25391 + target_root = locations_manager.target_root
25392 + eroot = locations_manager.eroot
25393 + self.global_config_path = locations_manager.global_config_path
25394 +
25395 + # The expand_map is used for variable substitution
25396 + # in getconfig() calls, and the getconfig() calls
25397 + # update expand_map with the value of each variable
25398 + # assignment that occurs. Variable substitution occurs
25399 + # in the following order, which corresponds to the
25400 + # order of appearance in self.lookuplist:
25401 + #
25402 + # * env.d
25403 + # * make.globals
25404 + # * make.defaults
25405 + # * make.conf
25406 + #
25407 + # Notably absent is "env", since we want to avoid any
25408 + # interaction with the calling environment that might
25409 + # lead to unexpected results.
25410 +
25411 + env_d = (
25412 + getconfig(
25413 + os.path.join(eroot, "etc", "profile.env"),
25414 + tolerant=tolerant,
25415 + expand=False,
25416 + )
25417 + or {}
25418 + )
25419 + expand_map = env_d.copy()
25420 + self._expand_map = expand_map
25421 +
25422 + # Allow make.globals and make.conf to set paths relative to vars like ${EPREFIX}.
25423 + expand_map["BROOT"] = broot
25424 + expand_map["EPREFIX"] = eprefix
25425 + expand_map["EROOT"] = eroot
25426 + expand_map["ESYSROOT"] = esysroot
25427 + expand_map["PORTAGE_CONFIGROOT"] = config_root
25428 + expand_map["ROOT"] = target_root
25429 + expand_map["SYSROOT"] = sysroot
25430 +
25431 + if portage._not_installed:
25432 + make_globals_path = os.path.join(
25433 + PORTAGE_BASE_PATH, "cnf", "make.globals"
25434 + )
25435 + else:
25436 + make_globals_path = os.path.join(
25437 + self.global_config_path, "make.globals"
25438 + )
25439 + old_make_globals = os.path.join(config_root, "etc", "make.globals")
25440 + if os.path.isfile(old_make_globals) and not os.path.samefile(
25441 + make_globals_path, old_make_globals
25442 + ):
25443 + # Don't warn if they refer to the same path, since
25444 + # that can be used for backward compatibility with
25445 + # old software.
25446 + writemsg(
25447 + "!!! %s\n"
25448 + % _(
25449 + "Found obsolete make.globals file: "
25450 + "'%s', (using '%s' instead)"
25451 + )
25452 + % (old_make_globals, make_globals_path),
25453 + noiselevel=-1,
25454 + )
25455 +
25456 + make_globals = getconfig(
25457 + make_globals_path, tolerant=tolerant, expand=expand_map
25458 + )
25459 + if make_globals is None:
25460 + make_globals = {}
25461 +
25462 + for k, v in self._default_globals.items():
25463 + make_globals.setdefault(k, v)
25464 +
25465 + if config_incrementals is None:
25466 + self.incrementals = INCREMENTALS
25467 + else:
25468 + self.incrementals = config_incrementals
25469 + if not isinstance(self.incrementals, frozenset):
25470 + self.incrementals = frozenset(self.incrementals)
25471 +
25472 + self.module_priority = ("user", "default")
25473 + self.modules = {}
25474 + modules_file = os.path.join(config_root, MODULES_FILE_PATH)
25475 + modules_loader = KeyValuePairFileLoader(modules_file, None, None)
25476 + modules_dict, modules_errors = modules_loader.load()
25477 + self.modules["user"] = modules_dict
25478 + if self.modules["user"] is None:
25479 + self.modules["user"] = {}
25480 + user_auxdbmodule = self.modules["user"].get("portdbapi.auxdbmodule")
25481 + if (
25482 + user_auxdbmodule is not None
25483 + and user_auxdbmodule in self._module_aliases
25484 + ):
25485 + warnings.warn(
25486 + "'%s' is deprecated: %s" % (user_auxdbmodule, modules_file)
25487 + )
25488 +
25489 + self.modules["default"] = {
25490 + "portdbapi.auxdbmodule": "portage.cache.flat_hash.mtime_md5_database",
25491 + }
25492 +
25493 + self.configlist = []
25494 +
25495 + # back up our incremental variables:
25496 + self.configdict = {}
25497 + self._use_expand_dict = {}
25498 + # configlist will contain: [ env.d, globals, features, defaults, conf, pkg, backupenv, env ]
25499 + self.configlist.append({})
25500 + self.configdict["env.d"] = self.configlist[-1]
25501 +
25502 + self.configlist.append({})
25503 + self.configdict["repo"] = self.configlist[-1]
25504 +
25505 + self.configlist.append({})
25506 + self.configdict["features"] = self.configlist[-1]
25507 +
25508 + self.configlist.append({})
25509 + self.configdict["pkginternal"] = self.configlist[-1]
25510 +
25511 + # env_d will be None if profile.env doesn't exist.
25512 + if env_d:
25513 + self.configdict["env.d"].update(env_d)
25514 +
25515 + # backupenv is used for calculating incremental variables.
25516 + if env is None:
25517 + env = os.environ
25518 +
25519 + # Avoid potential UnicodeDecodeError exceptions later.
25520 + env_unicode = dict(
25521 + (_unicode_decode(k), _unicode_decode(v)) for k, v in env.items()
25522 + )
25523 +
25524 + self.backupenv = env_unicode
25525 +
25526 + if env_d:
25527 + # Remove duplicate values so they don't override updated
25528 + # profile.env values later (profile.env is reloaded in each
25529 + # call to self.regenerate).
25530 + for k, v in env_d.items():
25531 + try:
25532 + if self.backupenv[k] == v:
25533 + del self.backupenv[k]
25534 + except KeyError:
25535 + pass
25536 + del k, v
25537 +
25538 + self.configdict["env"] = LazyItemsDict(self.backupenv)
25539 +
25540 + self.configlist.append(make_globals)
25541 + self.configdict["globals"] = self.configlist[-1]
25542 +
25543 + self.make_defaults_use = []
25544 +
25545 + # Loading Repositories
25546 + self["PORTAGE_CONFIGROOT"] = config_root
25547 + self["ROOT"] = target_root
25548 + self["SYSROOT"] = sysroot
25549 + self["EPREFIX"] = eprefix
25550 + self["EROOT"] = eroot
25551 + self["ESYSROOT"] = esysroot
25552 + self["BROOT"] = broot
25553 + known_repos = []
25554 + portdir = ""
25555 + portdir_overlay = ""
25556 + portdir_sync = None
25557 + for confs in [make_globals, make_conf, self.configdict["env"]]:
25558 + v = confs.get("PORTDIR")
25559 + if v is not None:
25560 + portdir = v
25561 + known_repos.append(v)
25562 + v = confs.get("PORTDIR_OVERLAY")
25563 + if v is not None:
25564 + portdir_overlay = v
25565 + known_repos.extend(shlex_split(v))
25566 + v = confs.get("SYNC")
25567 + if v is not None:
25568 + portdir_sync = v
25569 + if "PORTAGE_RSYNC_EXTRA_OPTS" in confs:
25570 + self["PORTAGE_RSYNC_EXTRA_OPTS"] = confs["PORTAGE_RSYNC_EXTRA_OPTS"]
25571 +
25572 + self["PORTDIR"] = portdir
25573 + self["PORTDIR_OVERLAY"] = portdir_overlay
25574 + if portdir_sync:
25575 + self["SYNC"] = portdir_sync
25576 + self.lookuplist = [self.configdict["env"]]
25577 + if repositories is None:
25578 + self.repositories = load_repository_config(self)
25579 + else:
25580 + self.repositories = repositories
25581 +
25582 + known_repos.extend(repo.location for repo in self.repositories)
25583 + known_repos = frozenset(known_repos)
25584 +
25585 + self["PORTAGE_REPOSITORIES"] = self.repositories.config_string()
25586 + self.backup_changes("PORTAGE_REPOSITORIES")
25587 +
25588 + # filling PORTDIR and PORTDIR_OVERLAY variable for compatibility
25589 + main_repo = self.repositories.mainRepo()
25590 + if main_repo is not None:
25591 + self["PORTDIR"] = main_repo.location
25592 + self.backup_changes("PORTDIR")
25593 + expand_map["PORTDIR"] = self["PORTDIR"]
25594 +
25595 + # repoman controls PORTDIR_OVERLAY via the environment, so no
25596 + # special cases are needed here.
25597 + portdir_overlay = list(self.repositories.repoLocationList())
25598 + if portdir_overlay and portdir_overlay[0] == self["PORTDIR"]:
25599 + portdir_overlay = portdir_overlay[1:]
25600 +
25601 + new_ov = []
25602 + if portdir_overlay:
25603 + for ov in portdir_overlay:
25604 + ov = normalize_path(ov)
25605 + if isdir_raise_eaccess(ov) or portage._sync_mode:
25606 + new_ov.append(portage._shell_quote(ov))
25607 + else:
25608 + writemsg(
25609 + _("!!! Invalid PORTDIR_OVERLAY" " (not a dir): '%s'\n")
25610 + % ov,
25611 + noiselevel=-1,
25612 + )
25613 +
25614 + self["PORTDIR_OVERLAY"] = " ".join(new_ov)
25615 + self.backup_changes("PORTDIR_OVERLAY")
25616 + expand_map["PORTDIR_OVERLAY"] = self["PORTDIR_OVERLAY"]
25617 +
25618 + locations_manager.set_port_dirs(self["PORTDIR"], self["PORTDIR_OVERLAY"])
25619 + locations_manager.load_profiles(self.repositories, known_repos)
25620 +
25621 + profiles_complex = locations_manager.profiles_complex
25622 + self.profiles = locations_manager.profiles
25623 + self.profile_path = locations_manager.profile_path
25624 + self.user_profile_dir = locations_manager.user_profile_dir
25625 +
25626 + try:
25627 + packages_list = [
25628 + grabfile_package(
25629 + os.path.join(x.location, "packages"),
25630 + verify_eapi=True,
25631 + eapi=x.eapi,
25632 + eapi_default=None,
25633 + allow_repo=allow_profile_repo_deps(x),
25634 + allow_build_id=x.allow_build_id,
25635 + )
25636 + for x in profiles_complex
25637 + ]
25638 + except EnvironmentError as e:
25639 + _raise_exc(e)
25640 +
25641 + self.packages = tuple(stack_lists(packages_list, incremental=1))
25642 +
25643 + # revmaskdict
25644 + self.prevmaskdict = {}
25645 + for x in self.packages:
25646 + # Negative atoms are filtered by the above stack_lists() call.
25647 + if not isinstance(x, Atom):
25648 + x = Atom(x.lstrip("*"))
25649 + self.prevmaskdict.setdefault(x.cp, []).append(x)
25650 +
25651 + self.unpack_dependencies = load_unpack_dependencies_configuration(
25652 + self.repositories
25653 + )
25654 +
25655 + mygcfg = {}
25656 + if profiles_complex:
25657 + mygcfg_dlists = []
25658 + for x in profiles_complex:
25659 + # Prevent accidents triggered by USE="${USE} ..." settings
25660 + # at the top of make.defaults which caused parent profile
25661 + # USE to override parent profile package.use settings.
25662 + # It would be nice to guard USE_EXPAND variables like
25663 + # this too, but unfortunately USE_EXPAND is not known
25664 + # until after make.defaults has been evaluated, so that
25665 + # will require some form of make.defaults preprocessing.
25666 + expand_map.pop("USE", None)
25667 + mygcfg_dlists.append(
25668 + getconfig(
25669 + os.path.join(x.location, "make.defaults"),
25670 + tolerant=tolerant,
25671 + expand=expand_map,
25672 + recursive=x.portage1_directories,
25673 + )
25674 + )
25675 + self._make_defaults = mygcfg_dlists
25676 + mygcfg = stack_dicts(mygcfg_dlists, incrementals=self.incrementals)
25677 + if mygcfg is None:
25678 + mygcfg = {}
25679 + self.configlist.append(mygcfg)
25680 + self.configdict["defaults"] = self.configlist[-1]
25681 +
25682 + mygcfg = {}
25683 + for x in make_conf_paths:
25684 + mygcfg.update(
25685 + getconfig(
25686 + x,
25687 + tolerant=tolerant,
25688 + allow_sourcing=True,
25689 + expand=expand_map,
25690 + recursive=True,
25691 + )
25692 + or {}
25693 + )
25694 +
25695 + # __* variables set in make.conf are local and are not be propagated.
25696 + mygcfg = {k: v for k, v in mygcfg.items() if not k.startswith("__")}
25697 +
25698 + # Don't allow the user to override certain variables in make.conf
25699 + profile_only_variables = (
25700 + self.configdict["defaults"].get("PROFILE_ONLY_VARIABLES", "").split()
25701 + )
25702 + profile_only_variables = stack_lists([profile_only_variables])
25703 + non_user_variables = set()
25704 + non_user_variables.update(profile_only_variables)
25705 + non_user_variables.update(self._env_blacklist)
25706 + non_user_variables.update(self._global_only_vars)
25707 + non_user_variables = frozenset(non_user_variables)
25708 + self._non_user_variables = non_user_variables
25709 +
25710 + self._env_d_blacklist = frozenset(
25711 + chain(
25712 + profile_only_variables,
25713 + self._env_blacklist,
25714 + )
25715 + )
25716 + env_d = self.configdict["env.d"]
25717 + for k in self._env_d_blacklist:
25718 + env_d.pop(k, None)
25719 +
25720 + for k in profile_only_variables:
25721 + mygcfg.pop(k, None)
25722 +
25723 + self.configlist.append(mygcfg)
25724 + self.configdict["conf"] = self.configlist[-1]
25725 +
25726 + self.configlist.append(LazyItemsDict())
25727 + self.configdict["pkg"] = self.configlist[-1]
25728 +
25729 + self.configdict["backupenv"] = self.backupenv
25730 +
25731 + # Don't allow the user to override certain variables in the env
25732 + for k in profile_only_variables:
25733 + self.backupenv.pop(k, None)
25734 +
25735 + self.configlist.append(self.configdict["env"])
25736 +
25737 + # make lookuplist for loading package.*
25738 + self.lookuplist = self.configlist[:]
25739 + self.lookuplist.reverse()
25740 +
25741 + # Blacklist vars that could interfere with portage internals.
25742 + for blacklisted in self._env_blacklist:
25743 + for cfg in self.lookuplist:
25744 + cfg.pop(blacklisted, None)
25745 + self.backupenv.pop(blacklisted, None)
25746 + del blacklisted, cfg
25747 +
25748 + self["PORTAGE_CONFIGROOT"] = config_root
25749 + self.backup_changes("PORTAGE_CONFIGROOT")
25750 + self["ROOT"] = target_root
25751 + self.backup_changes("ROOT")
25752 + self["SYSROOT"] = sysroot
25753 + self.backup_changes("SYSROOT")
25754 + self["EPREFIX"] = eprefix
25755 + self.backup_changes("EPREFIX")
25756 + self["EROOT"] = eroot
25757 + self.backup_changes("EROOT")
25758 + self["ESYSROOT"] = esysroot
25759 + self.backup_changes("ESYSROOT")
25760 + self["BROOT"] = broot
25761 + self.backup_changes("BROOT")
25762 +
25763 + # The prefix of the running portage instance is used in the
25764 + # ebuild environment to implement the --host-root option for
25765 + # best_version and has_version.
25766 + self["PORTAGE_OVERRIDE_EPREFIX"] = portage.const.EPREFIX
25767 + self.backup_changes("PORTAGE_OVERRIDE_EPREFIX")
25768 +
25769 + self._ppropertiesdict = portage.dep.ExtendedAtomDict(dict)
25770 + self._paccept_restrict = portage.dep.ExtendedAtomDict(dict)
25771 + self._penvdict = portage.dep.ExtendedAtomDict(dict)
25772 + self._pbashrcdict = {}
25773 + self._pbashrc = ()
25774 +
25775 + self._repo_make_defaults = {}
25776 + for repo in self.repositories.repos_with_profiles():
25777 + d = (
25778 + getconfig(
25779 + os.path.join(repo.location, "profiles", "make.defaults"),
25780 + tolerant=tolerant,
25781 + expand=self.configdict["globals"].copy(),
25782 + recursive=repo.portage1_profiles,
25783 + )
25784 + or {}
25785 + )
25786 + if d:
25787 + for k in chain(
25788 + self._env_blacklist,
25789 + profile_only_variables,
25790 + self._global_only_vars,
25791 + ):
25792 + d.pop(k, None)
25793 + self._repo_make_defaults[repo.name] = d
25794 +
25795 + # Read all USE related files from profiles and optionally from user config.
25796 + self._use_manager = UseManager(
25797 + self.repositories,
25798 + profiles_complex,
25799 + abs_user_config,
25800 + self._isStable,
25801 + user_config=local_config,
25802 + )
25803 + # Initialize all USE related variables we track ourselves.
25804 + self.usemask = self._use_manager.getUseMask()
25805 + self.useforce = self._use_manager.getUseForce()
25806 + self.configdict["conf"][
25807 + "USE"
25808 + ] = self._use_manager.extract_global_USE_changes(
25809 + self.configdict["conf"].get("USE", "")
25810 + )
25811 +
25812 + # Read license_groups and optionally license_groups and package.license from user config
25813 + self._license_manager = LicenseManager(
25814 + locations_manager.profile_locations,
25815 + abs_user_config,
25816 + user_config=local_config,
25817 + )
25818 + # Extract '*/*' entries from package.license
25819 + self.configdict["conf"][
25820 + "ACCEPT_LICENSE"
25821 + ] = self._license_manager.extract_global_changes(
25822 + self.configdict["conf"].get("ACCEPT_LICENSE", "")
25823 + )
25824 +
25825 + # profile.bashrc
25826 + self._profile_bashrc = tuple(
25827 + os.path.isfile(os.path.join(profile.location, "profile.bashrc"))
25828 + for profile in profiles_complex
25829 + )
25830 +
25831 + if local_config:
25832 + # package.properties
25833 + propdict = grabdict_package(
25834 + os.path.join(abs_user_config, "package.properties"),
25835 + recursive=1,
25836 + allow_wildcard=True,
25837 + allow_repo=True,
25838 + verify_eapi=False,
25839 + allow_build_id=True,
25840 + )
25841 + v = propdict.pop("*/*", None)
25842 + if v is not None:
25843 + if "ACCEPT_PROPERTIES" in self.configdict["conf"]:
25844 + self.configdict["conf"]["ACCEPT_PROPERTIES"] += " " + " ".join(
25845 + v
25846 + )
25847 + else:
25848 + self.configdict["conf"]["ACCEPT_PROPERTIES"] = " ".join(v)
25849 + for k, v in propdict.items():
25850 + self._ppropertiesdict.setdefault(k.cp, {})[k] = v
25851 +
25852 + # package.accept_restrict
25853 + d = grabdict_package(
25854 + os.path.join(abs_user_config, "package.accept_restrict"),
25855 + recursive=True,
25856 + allow_wildcard=True,
25857 + allow_repo=True,
25858 + verify_eapi=False,
25859 + allow_build_id=True,
25860 + )
25861 + v = d.pop("*/*", None)
25862 + if v is not None:
25863 + if "ACCEPT_RESTRICT" in self.configdict["conf"]:
25864 + self.configdict["conf"]["ACCEPT_RESTRICT"] += " " + " ".join(v)
25865 + else:
25866 + self.configdict["conf"]["ACCEPT_RESTRICT"] = " ".join(v)
25867 + for k, v in d.items():
25868 + self._paccept_restrict.setdefault(k.cp, {})[k] = v
25869 +
25870 + # package.env
25871 + penvdict = grabdict_package(
25872 + os.path.join(abs_user_config, "package.env"),
25873 + recursive=1,
25874 + allow_wildcard=True,
25875 + allow_repo=True,
25876 + verify_eapi=False,
25877 + allow_build_id=True,
25878 + )
25879 + v = penvdict.pop("*/*", None)
25880 + if v is not None:
25881 + global_wildcard_conf = {}
25882 + self._grab_pkg_env(v, global_wildcard_conf)
25883 + incrementals = self.incrementals
25884 + conf_configdict = self.configdict["conf"]
25885 + for k, v in global_wildcard_conf.items():
25886 + if k in incrementals:
25887 + if k in conf_configdict:
25888 + conf_configdict[k] = conf_configdict[k] + " " + v
25889 + else:
25890 + conf_configdict[k] = v
25891 + else:
25892 + conf_configdict[k] = v
25893 + expand_map[k] = v
25894 +
25895 + for k, v in penvdict.items():
25896 + self._penvdict.setdefault(k.cp, {})[k] = v
25897 +
25898 + # package.bashrc
25899 + for profile in profiles_complex:
25900 + if not "profile-bashrcs" in profile.profile_formats:
25901 + continue
25902 + self._pbashrcdict[profile] = portage.dep.ExtendedAtomDict(dict)
25903 + bashrc = grabdict_package(
25904 + os.path.join(profile.location, "package.bashrc"),
25905 + recursive=1,
25906 + allow_wildcard=True,
25907 + allow_repo=allow_profile_repo_deps(profile),
25908 + verify_eapi=True,
25909 + eapi=profile.eapi,
25910 + eapi_default=None,
25911 + allow_build_id=profile.allow_build_id,
25912 + )
25913 + if not bashrc:
25914 + continue
25915 +
25916 + for k, v in bashrc.items():
25917 + envfiles = [
25918 + os.path.join(profile.location, "bashrc", envname)
25919 + for envname in v
25920 + ]
25921 + self._pbashrcdict[profile].setdefault(k.cp, {}).setdefault(
25922 + k, []
25923 + ).extend(envfiles)
25924 +
25925 + # getting categories from an external file now
25926 + self.categories = [
25927 + grabfile(os.path.join(x, "categories"))
25928 + for x in locations_manager.profile_and_user_locations
25929 + ]
25930 + category_re = dbapi._category_re
25931 + # categories used to be a tuple, but now we use a frozenset
25932 + # for hashed category validation in pordbapi.cp_list()
25933 + self.categories = frozenset(
25934 + x
25935 + for x in stack_lists(self.categories, incremental=1)
25936 + if category_re.match(x) is not None
25937 + )
25938 +
25939 + archlist = [
25940 + grabfile(os.path.join(x, "arch.list"))
25941 + for x in locations_manager.profile_and_user_locations
25942 + ]
25943 + archlist = sorted(stack_lists(archlist, incremental=1))
25944 + self.configdict["conf"]["PORTAGE_ARCHLIST"] = " ".join(archlist)
25945 +
25946 + pkgprovidedlines = []
25947 + for x in profiles_complex:
25948 + provpath = os.path.join(x.location, "package.provided")
25949 + if os.path.exists(provpath):
25950 + if _get_eapi_attrs(x.eapi).allows_package_provided:
25951 + pkgprovidedlines.append(
25952 + grabfile(provpath, recursive=x.portage1_directories)
25953 + )
25954 + else:
25955 + # TODO: bail out?
25956 + writemsg(
25957 + (
25958 + _("!!! package.provided not allowed in EAPI %s: ")
25959 + % x.eapi
25960 + )
25961 + + x.location
25962 + + "\n",
25963 + noiselevel=-1,
25964 + )
25965 +
25966 + pkgprovidedlines = stack_lists(pkgprovidedlines, incremental=1)
25967 + has_invalid_data = False
25968 + for x in range(len(pkgprovidedlines) - 1, -1, -1):
25969 + myline = pkgprovidedlines[x]
25970 + if not isvalidatom("=" + myline):
25971 + writemsg(
25972 + _("Invalid package name in package.provided: %s\n") % myline,
25973 + noiselevel=-1,
25974 + )
25975 + has_invalid_data = True
25976 + del pkgprovidedlines[x]
25977 + continue
25978 + cpvr = catpkgsplit(pkgprovidedlines[x])
25979 + if not cpvr or cpvr[0] == "null":
25980 + writemsg(
25981 + _("Invalid package name in package.provided: ")
25982 + + pkgprovidedlines[x]
25983 + + "\n",
25984 + noiselevel=-1,
25985 + )
25986 + has_invalid_data = True
25987 + del pkgprovidedlines[x]
25988 + continue
25989 + if has_invalid_data:
25990 + writemsg(
25991 + _("See portage(5) for correct package.provided usage.\n"),
25992 + noiselevel=-1,
25993 + )
25994 + self.pprovideddict = {}
25995 + for x in pkgprovidedlines:
25996 + x_split = catpkgsplit(x)
25997 + if x_split is None:
25998 + continue
25999 + mycatpkg = cpv_getkey(x)
26000 + if mycatpkg in self.pprovideddict:
26001 + self.pprovideddict[mycatpkg].append(x)
26002 + else:
26003 + self.pprovideddict[mycatpkg] = [x]
26004 +
26005 + # reasonable defaults; this is important as without USE_ORDER,
26006 + # USE will always be "" (nothing set)!
26007 + if "USE_ORDER" not in self:
26008 + self[
26009 + "USE_ORDER"
26010 + ] = "env:pkg:conf:defaults:pkginternal:features:repo:env.d"
26011 + self.backup_changes("USE_ORDER")
26012 +
26013 + if "CBUILD" not in self and "CHOST" in self:
26014 + self["CBUILD"] = self["CHOST"]
26015 + self.backup_changes("CBUILD")
26016 +
26017 + if "USERLAND" not in self:
26018 + # Set default USERLAND so that our test cases can assume that
26019 + # it's always set. This allows isolated-functions.sh to avoid
26020 + # calling uname -s when sourced.
26021 + system = platform.system()
26022 + if system is not None and (
26023 + system.endswith("BSD") or system == "DragonFly"
26024 + ):
26025 + self["USERLAND"] = "BSD"
26026 + else:
26027 + self["USERLAND"] = "GNU"
26028 + self.backup_changes("USERLAND")
26029 +
26030 + default_inst_ids = {
26031 + "PORTAGE_INST_GID": "0",
26032 + "PORTAGE_INST_UID": "0",
26033 + }
26034 +
26035 + eroot_or_parent = first_existing(eroot)
26036 + unprivileged = False
26037 + try:
26038 ++ # PREFIX LOCAL: inventing UID/GID based on a path is a very
26039 ++ # bad idea, it breaks almost everything since group ids
26040 ++ # don't have to match, when a user has many
26041 ++ # This in particularly breaks the configure-set portage
26042 ++ # group and user (in portage/data.py)
26043 ++ raise OSError(2, "No such file or directory")
26044 + eroot_st = os.stat(eroot_or_parent)
26045 + except OSError:
26046 + pass
26047 + else:
26048 +
26049 + if portage.data._unprivileged_mode(eroot_or_parent, eroot_st):
26050 + unprivileged = True
26051 +
26052 + default_inst_ids["PORTAGE_INST_GID"] = str(eroot_st.st_gid)
26053 + default_inst_ids["PORTAGE_INST_UID"] = str(eroot_st.st_uid)
26054 +
26055 + if "PORTAGE_USERNAME" not in self:
26056 + try:
26057 + pwd_struct = pwd.getpwuid(eroot_st.st_uid)
26058 + except KeyError:
26059 + pass
26060 + else:
26061 + self["PORTAGE_USERNAME"] = pwd_struct.pw_name
26062 + self.backup_changes("PORTAGE_USERNAME")
26063 +
26064 + if "PORTAGE_GRPNAME" not in self:
26065 + try:
26066 + grp_struct = grp.getgrgid(eroot_st.st_gid)
26067 + except KeyError:
26068 + pass
26069 + else:
26070 + self["PORTAGE_GRPNAME"] = grp_struct.gr_name
26071 + self.backup_changes("PORTAGE_GRPNAME")
26072 +
26073 + for var, default_val in default_inst_ids.items():
26074 + try:
26075 + self[var] = str(int(self.get(var, default_val)))
26076 + except ValueError:
26077 + writemsg(
26078 + _(
26079 + "!!! %s='%s' is not a valid integer. "
26080 + "Falling back to %s.\n"
26081 + )
26082 + % (var, self[var], default_val),
26083 + noiselevel=-1,
26084 + )
26085 + self[var] = default_val
26086 + self.backup_changes(var)
26087 +
26088 + self.depcachedir = self.get("PORTAGE_DEPCACHEDIR")
26089 + if self.depcachedir is None:
26090 + self.depcachedir = os.path.join(
26091 + os.sep, portage.const.EPREFIX, DEPCACHE_PATH.lstrip(os.sep)
26092 + )
26093 + if unprivileged and target_root != os.sep:
26094 + # In unprivileged mode, automatically make
26095 + # depcachedir relative to target_root if the
26096 + # default depcachedir is not writable.
26097 + if not os.access(first_existing(self.depcachedir), os.W_OK):
26098 + self.depcachedir = os.path.join(
26099 + eroot, DEPCACHE_PATH.lstrip(os.sep)
26100 + )
26101 +
26102 + self["PORTAGE_DEPCACHEDIR"] = self.depcachedir
26103 + self.backup_changes("PORTAGE_DEPCACHEDIR")
26104 +
26105 + if portage._internal_caller:
26106 + self["PORTAGE_INTERNAL_CALLER"] = "1"
26107 + self.backup_changes("PORTAGE_INTERNAL_CALLER")
26108 +
26109 + # initialize self.features
26110 + self.regenerate()
26111 + feature_use = []
26112 + if "test" in self.features:
26113 + feature_use.append("test")
26114 + self.configdict["features"]["USE"] = self._default_features_use = " ".join(
26115 + feature_use
26116 + )
26117 + if feature_use:
26118 + # Regenerate USE so that the initial "test" flag state is
26119 + # correct for evaluation of !test? conditionals in RESTRICT.
26120 + self.regenerate()
26121 +
26122 + if unprivileged:
26123 + self.features.add("unprivileged")
26124 +
26125 + if bsd_chflags:
26126 + self.features.add("chflags")
26127 +
26128 + self._init_iuse()
26129 +
26130 + self._validate_commands()
26131 +
26132 + for k in self._case_insensitive_vars:
26133 + if k in self:
26134 + self[k] = self[k].lower()
26135 + self.backup_changes(k)
26136 +
26137 + # The first constructed config object initializes these modules,
26138 + # and subsequent calls to the _init() functions have no effect.
26139 + portage.output._init(config_root=self["PORTAGE_CONFIGROOT"])
26140 + portage.data._init(self)
26141 +
26142 + if mycpv:
26143 + self.setcpv(mycpv)
26144 +
26145 + def _init_iuse(self):
26146 + self._iuse_effective = self._calc_iuse_effective()
26147 + self._iuse_implicit_match = _iuse_implicit_match_cache(self)
26148 +
26149 + @property
26150 + def mygcfg(self):
26151 + warnings.warn("portage.config.mygcfg is deprecated", stacklevel=3)
26152 + return {}
26153 +
26154 + def _validate_commands(self):
26155 + for k in special_env_vars.validate_commands:
26156 + v = self.get(k)
26157 + if v is not None:
26158 + valid, v_split = validate_cmd_var(v)
26159 +
26160 + if not valid:
26161 + if v_split:
26162 + writemsg_level(
26163 + _("%s setting is invalid: '%s'\n") % (k, v),
26164 + level=logging.ERROR,
26165 + noiselevel=-1,
26166 + )
26167 +
26168 + # before deleting the invalid setting, backup
26169 + # the default value if available
26170 + v = self.configdict["globals"].get(k)
26171 + if v is not None:
26172 + default_valid, v_split = validate_cmd_var(v)
26173 + if not default_valid:
26174 + if v_split:
26175 + writemsg_level(
26176 + _(
26177 + "%s setting from make.globals"
26178 + + " is invalid: '%s'\n"
26179 + )
26180 + % (k, v),
26181 + level=logging.ERROR,
26182 + noiselevel=-1,
26183 + )
26184 + # make.globals seems corrupt, so try for
26185 + # a hardcoded default instead
26186 + v = self._default_globals.get(k)
26187 +
26188 + # delete all settings for this key,
26189 + # including the invalid one
26190 + del self[k]
26191 + self.backupenv.pop(k, None)
26192 + if v:
26193 + # restore validated default
26194 + self.configdict["globals"][k] = v
26195 +
26196 + def _init_dirs(self):
26197 + """
26198 + Create a few directories that are critical to portage operation
26199 + """
26200 + if not os.access(self["EROOT"], os.W_OK):
26201 + return
26202 +
26203 + # gid, mode, mask, preserve_perms
26204 + dir_mode_map = {
26205 + "tmp": (-1, 0o1777, 0, True),
26206 + "var/tmp": (-1, 0o1777, 0, True),
26207 + PRIVATE_PATH: (portage_gid, 0o2750, 0o2, False),
26208 + CACHE_PATH: (portage_gid, 0o755, 0o2, False),
26209 + }
26210 +
26211 + for mypath, (gid, mode, modemask, preserve_perms) in dir_mode_map.items():
26212 + mydir = os.path.join(self["EROOT"], mypath)
26213 + if preserve_perms and os.path.isdir(mydir):
26214 + # Only adjust permissions on some directories if
26215 + # they don't exist yet. This gives freedom to the
26216 + # user to adjust permissions to suit their taste.
26217 + continue
26218 + try:
26219 + ensure_dirs(mydir, gid=gid, mode=mode, mask=modemask)
26220 + except PortageException as e:
26221 + writemsg(
26222 + _("!!! Directory initialization failed: '%s'\n") % mydir,
26223 + noiselevel=-1,
26224 + )
26225 + writemsg("!!! %s\n" % str(e), noiselevel=-1)
26226 +
26227 + @property
26228 + def _keywords_manager(self):
26229 + if self._keywords_manager_obj is None:
26230 + self._keywords_manager_obj = KeywordsManager(
26231 + self._locations_manager.profiles_complex,
26232 + self._locations_manager.abs_user_config,
26233 + self.local_config,
26234 + global_accept_keywords=self.configdict["defaults"].get(
26235 + "ACCEPT_KEYWORDS", ""
26236 + ),
26237 + )
26238 + return self._keywords_manager_obj
26239 +
26240 + @property
26241 + def _mask_manager(self):
26242 + if self._mask_manager_obj is None:
26243 + self._mask_manager_obj = MaskManager(
26244 + self.repositories,
26245 + self._locations_manager.profiles_complex,
26246 + self._locations_manager.abs_user_config,
26247 + user_config=self.local_config,
26248 + strict_umatched_removal=self._unmatched_removal,
26249 + )
26250 + return self._mask_manager_obj
26251 +
26252 + @property
26253 + def _virtuals_manager(self):
26254 + if self._virtuals_manager_obj is None:
26255 + self._virtuals_manager_obj = VirtualsManager(self.profiles)
26256 + return self._virtuals_manager_obj
26257 +
26258 + @property
26259 + def pkeywordsdict(self):
26260 + result = self._keywords_manager.pkeywordsdict.copy()
26261 + for k, v in result.items():
26262 + result[k] = v.copy()
26263 + return result
26264 +
26265 + @property
26266 + def pmaskdict(self):
26267 + return self._mask_manager._pmaskdict.copy()
26268 +
26269 + @property
26270 + def punmaskdict(self):
26271 + return self._mask_manager._punmaskdict.copy()
26272 +
26273 + @property
26274 + def soname_provided(self):
26275 + if self._soname_provided is None:
26276 + d = stack_dictlist(
26277 + (
26278 + grabdict(os.path.join(x, "soname.provided"), recursive=True)
26279 + for x in self.profiles
26280 + ),
26281 + incremental=True,
26282 + )
26283 + self._soname_provided = frozenset(
26284 + SonameAtom(cat, soname)
26285 + for cat, sonames in d.items()
26286 + for soname in sonames
26287 + )
26288 + return self._soname_provided
26289 +
26290 + def expandLicenseTokens(self, tokens):
26291 + """Take a token from ACCEPT_LICENSE or package.license and expand it
26292 + if it's a group token (indicated by @) or just return it if it's not a
26293 + group. If a group is negated then negate all group elements."""
26294 + return self._license_manager.expandLicenseTokens(tokens)
26295 +
26296 + def validate(self):
26297 + """Validate miscellaneous settings and display warnings if necessary.
26298 + (This code was previously in the global scope of portage.py)"""
26299 +
26300 + groups = self.get("ACCEPT_KEYWORDS", "").split()
26301 + archlist = self.archlist()
26302 + if not archlist:
26303 + writemsg(
26304 + _(
26305 + "--- 'profiles/arch.list' is empty or "
26306 + "not available. Empty ebuild repository?\n"
26307 + ),
26308 + noiselevel=1,
26309 + )
26310 + else:
26311 + for group in groups:
26312 + if (
26313 + group not in archlist
26314 + and not (group.startswith("-") and group[1:] in archlist)
26315 + and group not in ("*", "~*", "**")
26316 + ):
26317 + writemsg(
26318 + _("!!! INVALID ACCEPT_KEYWORDS: %s\n") % str(group),
26319 + noiselevel=-1,
26320 + )
26321 +
26322 + profile_broken = False
26323 +
26324 + # getmaskingstatus requires ARCH for ACCEPT_KEYWORDS support
26325 + arch = self.get("ARCH")
26326 + if not self.profile_path or not arch:
26327 + profile_broken = True
26328 + else:
26329 + # If any one of these files exists, then
26330 + # the profile is considered valid.
26331 + for x in ("make.defaults", "parent", "packages", "use.force", "use.mask"):
26332 + if exists_raise_eaccess(os.path.join(self.profile_path, x)):
26333 + break
26334 + else:
26335 + profile_broken = True
26336 +
26337 + if profile_broken and not portage._sync_mode:
26338 + abs_profile_path = None
26339 + for x in (PROFILE_PATH, "etc/make.profile"):
26340 + x = os.path.join(self["PORTAGE_CONFIGROOT"], x)
26341 + try:
26342 + os.lstat(x)
26343 + except OSError:
26344 + pass
26345 + else:
26346 + abs_profile_path = x
26347 + break
26348 +
26349 + if abs_profile_path is None:
26350 + abs_profile_path = os.path.join(
26351 + self["PORTAGE_CONFIGROOT"], PROFILE_PATH
26352 + )
26353 +
26354 + writemsg(
26355 + _(
26356 + "\n\n!!! %s is not a symlink and will probably prevent most merges.\n"
26357 + )
26358 + % abs_profile_path,
26359 + noiselevel=-1,
26360 + )
26361 + writemsg(
26362 + _("!!! It should point into a profile within %s/profiles/\n")
26363 + % self["PORTDIR"]
26364 + )
26365 + writemsg(
26366 + _(
26367 + "!!! (You can safely ignore this message when syncing. It's harmless.)\n\n\n"
26368 + )
26369 + )
26370 +
26371 + abs_user_virtuals = os.path.join(self["PORTAGE_CONFIGROOT"], USER_VIRTUALS_FILE)
26372 + if os.path.exists(abs_user_virtuals):
26373 + writemsg("\n!!! /etc/portage/virtuals is deprecated in favor of\n")
26374 + writemsg("!!! /etc/portage/profile/virtuals. Please move it to\n")
26375 + writemsg("!!! this new location.\n\n")
26376 +
26377 + if not sandbox_capable and (
26378 + "sandbox" in self.features or "usersandbox" in self.features
26379 + ):
26380 + if self.profile_path is not None and os.path.realpath(
26381 + self.profile_path
26382 + ) == os.path.realpath(
26383 + os.path.join(self["PORTAGE_CONFIGROOT"], PROFILE_PATH)
26384 + ):
26385 + # Don't show this warning when running repoman and the
26386 + # sandbox feature came from a profile that doesn't belong
26387 + # to the user.
26388 + writemsg(
26389 + colorize(
26390 + "BAD", _("!!! Problem with sandbox" " binary. Disabling...\n\n")
26391 + ),
26392 + noiselevel=-1,
26393 + )
26394 +
26395 + if "fakeroot" in self.features and not fakeroot_capable:
26396 + writemsg(
26397 + _(
26398 + "!!! FEATURES=fakeroot is enabled, but the "
26399 + "fakeroot binary is not installed.\n"
26400 + ),
26401 + noiselevel=-1,
26402 + )
26403 +
26404 + if "webrsync-gpg" in self.features:
26405 + writemsg(
26406 + _(
26407 + "!!! FEATURES=webrsync-gpg is deprecated, see the make.conf(5) man page.\n"
26408 + ),
26409 + noiselevel=-1,
26410 + )
26411 +
26412 + if os.getuid() == 0 and not hasattr(os, "setgroups"):
26413 + warning_shown = False
26414 +
26415 + if "userpriv" in self.features:
26416 + writemsg(
26417 + _(
26418 + "!!! FEATURES=userpriv is enabled, but "
26419 + "os.setgroups is not available.\n"
26420 + ),
26421 + noiselevel=-1,
26422 + )
26423 + warning_shown = True
26424 +
26425 + if "userfetch" in self.features:
26426 + writemsg(
26427 + _(
26428 + "!!! FEATURES=userfetch is enabled, but "
26429 + "os.setgroups is not available.\n"
26430 + ),
26431 + noiselevel=-1,
26432 + )
26433 + warning_shown = True
26434 +
26435 + if warning_shown and platform.python_implementation() == "PyPy":
26436 + writemsg(
26437 + _("!!! See https://bugs.pypy.org/issue833 for details.\n"),
26438 + noiselevel=-1,
26439 + )
26440 +
26441 + binpkg_compression = self.get("BINPKG_COMPRESS")
26442 + if binpkg_compression:
26443 + try:
26444 + compression = _compressors[binpkg_compression]
26445 + except KeyError as e:
26446 + writemsg(
26447 + "!!! BINPKG_COMPRESS contains invalid or "
26448 + "unsupported compression method: %s" % e.args[0],
26449 + noiselevel=-1,
26450 + )
26451 + else:
26452 + try:
26453 + compression_binary = shlex_split(
26454 + portage.util.varexpand(compression["compress"], mydict=self)
26455 + )[0]
26456 + except IndexError as e:
26457 + writemsg(
26458 + "!!! BINPKG_COMPRESS contains invalid or "
26459 + "unsupported compression method: %s" % e.args[0],
26460 + noiselevel=-1,
26461 + )
26462 + else:
26463 + if portage.process.find_binary(compression_binary) is None:
26464 + missing_package = compression["package"]
26465 + writemsg(
26466 + "!!! BINPKG_COMPRESS unsupported %s. "
26467 + "Missing package: %s"
26468 + % (binpkg_compression, missing_package),
26469 + noiselevel=-1,
26470 + )
26471 +
26472 + def load_best_module(self, property_string):
26473 + best_mod = best_from_dict(property_string, self.modules, self.module_priority)
26474 + mod = None
26475 + try:
26476 + mod = load_mod(best_mod)
26477 + except ImportError:
26478 + if best_mod in self._module_aliases:
26479 + mod = load_mod(self._module_aliases[best_mod])
26480 + elif not best_mod.startswith("cache."):
26481 + raise
26482 + else:
26483 + best_mod = "portage." + best_mod
26484 + try:
26485 + mod = load_mod(best_mod)
26486 + except ImportError:
26487 + raise
26488 + return mod
26489 +
26490 + def lock(self):
26491 + self.locked = 1
26492 +
26493 + def unlock(self):
26494 + self.locked = 0
26495 +
26496 + def modifying(self):
26497 + if self.locked:
26498 + raise Exception(_("Configuration is locked."))
26499 +
26500 + def backup_changes(self, key=None):
26501 + self.modifying()
26502 + if key and key in self.configdict["env"]:
26503 + self.backupenv[key] = copy.deepcopy(self.configdict["env"][key])
26504 + else:
26505 + raise KeyError(_("No such key defined in environment: %s") % key)
26506 +
26507 + def reset(self, keeping_pkg=0, use_cache=None):
26508 + """
26509 + Restore environment from self.backupenv, call self.regenerate()
26510 + @param keeping_pkg: Should we keep the setcpv() data or delete it.
26511 + @type keeping_pkg: Boolean
26512 + @rype: None
26513 + """
26514 +
26515 + if use_cache is not None:
26516 + warnings.warn(
26517 + "The use_cache parameter for config.reset() is deprecated and without effect.",
26518 + DeprecationWarning,
26519 + stacklevel=2,
26520 + )
26521 +
26522 + self.modifying()
26523 + self.configdict["env"].clear()
26524 + self.configdict["env"].update(self.backupenv)
26525 +
26526 + self.modifiedkeys = []
26527 + if not keeping_pkg:
26528 + self.mycpv = None
26529 + self._setcpv_args_hash = None
26530 + self.puse = ""
26531 + del self._penv[:]
26532 + self.configdict["pkg"].clear()
26533 + self.configdict["pkginternal"].clear()
26534 + self.configdict["features"]["USE"] = self._default_features_use
26535 + self.configdict["repo"].clear()
26536 + self.configdict["defaults"]["USE"] = " ".join(self.make_defaults_use)
26537 + self.usemask = self._use_manager.getUseMask()
26538 + self.useforce = self._use_manager.getUseForce()
26539 + self.regenerate()
26540 +
26541 + class _lazy_vars:
26542 +
26543 + __slots__ = ("built_use", "settings", "values")
26544 +
26545 + def __init__(self, built_use, settings):
26546 + self.built_use = built_use
26547 + self.settings = settings
26548 + self.values = None
26549 +
26550 + def __getitem__(self, k):
26551 + if self.values is None:
26552 + self.values = self._init_values()
26553 + return self.values[k]
26554 +
26555 + def _init_values(self):
26556 + values = {}
26557 + settings = self.settings
26558 + use = self.built_use
26559 + if use is None:
26560 + use = frozenset(settings["PORTAGE_USE"].split())
26561 +
26562 + values[
26563 + "ACCEPT_LICENSE"
26564 + ] = settings._license_manager.get_prunned_accept_license(
26565 + settings.mycpv,
26566 + use,
26567 + settings.get("LICENSE", ""),
26568 + settings.get("SLOT"),
26569 + settings.get("PORTAGE_REPO_NAME"),
26570 + )
26571 + values["PORTAGE_PROPERTIES"] = self._flatten("PROPERTIES", use, settings)
26572 + values["PORTAGE_RESTRICT"] = self._flatten("RESTRICT", use, settings)
26573 + return values
26574 +
26575 + def _flatten(self, var, use, settings):
26576 + try:
26577 + restrict = set(
26578 + use_reduce(settings.get(var, ""), uselist=use, flat=True)
26579 + )
26580 + except InvalidDependString:
26581 + restrict = set()
26582 + return " ".join(sorted(restrict))
26583 +
26584 + class _lazy_use_expand:
26585 + """
26586 + Lazily evaluate USE_EXPAND variables since they are only needed when
26587 + an ebuild shell is spawned. Variables values are made consistent with
26588 + the previously calculated USE settings.
26589 + """
26590 +
26591 + def __init__(
26592 + self,
26593 + settings,
26594 + unfiltered_use,
26595 + use,
26596 + usemask,
26597 + iuse_effective,
26598 + use_expand_split,
26599 + use_expand_dict,
26600 + ):
26601 + self._settings = settings
26602 + self._unfiltered_use = unfiltered_use
26603 + self._use = use
26604 + self._usemask = usemask
26605 + self._iuse_effective = iuse_effective
26606 + self._use_expand_split = use_expand_split
26607 + self._use_expand_dict = use_expand_dict
26608 +
26609 + def __getitem__(self, key):
26610 + prefix = key.lower() + "_"
26611 + prefix_len = len(prefix)
26612 + expand_flags = set(
26613 + x[prefix_len:] for x in self._use if x[:prefix_len] == prefix
26614 + )
26615 + var_split = self._use_expand_dict.get(key, "").split()
26616 + # Preserve the order of var_split because it can matter for things
26617 + # like LINGUAS.
26618 + var_split = [x for x in var_split if x in expand_flags]
26619 + var_split.extend(expand_flags.difference(var_split))
26620 + has_wildcard = "*" in expand_flags
26621 + if has_wildcard:
26622 + var_split = [x for x in var_split if x != "*"]
26623 + has_iuse = set()
26624 + for x in self._iuse_effective:
26625 + if x[:prefix_len] == prefix:
26626 + has_iuse.add(x[prefix_len:])
26627 + if has_wildcard:
26628 + # * means to enable everything in IUSE that's not masked
26629 + if has_iuse:
26630 + usemask = self._usemask
26631 + for suffix in has_iuse:
26632 + x = prefix + suffix
26633 + if x not in usemask:
26634 + if suffix not in expand_flags:
26635 + var_split.append(suffix)
26636 + else:
26637 + # If there is a wildcard and no matching flags in IUSE then
26638 + # LINGUAS should be unset so that all .mo files are
26639 + # installed.
26640 + var_split = []
26641 + # Make the flags unique and filter them according to IUSE.
26642 + # Also, continue to preserve order for things like LINGUAS
26643 + # and filter any duplicates that variable may contain.
26644 + filtered_var_split = []
26645 + remaining = has_iuse.intersection(var_split)
26646 + for x in var_split:
26647 + if x in remaining:
26648 + remaining.remove(x)
26649 + filtered_var_split.append(x)
26650 + var_split = filtered_var_split
26651 +
26652 + return " ".join(var_split)
26653 +
26654 + def _setcpv_recursion_gate(f):
26655 + """
26656 + Raise AssertionError for recursive setcpv calls.
26657 + """
26658 +
26659 + def wrapper(self, *args, **kwargs):
26660 + if hasattr(self, "_setcpv_active"):
26661 + raise AssertionError("setcpv recursion detected")
26662 + self._setcpv_active = True
26663 + try:
26664 + return f(self, *args, **kwargs)
26665 + finally:
26666 + del self._setcpv_active
26667 +
26668 + return wrapper
26669 +
26670 + @_setcpv_recursion_gate
26671 + def setcpv(self, mycpv, use_cache=None, mydb=None):
26672 + """
26673 + Load a particular CPV into the config, this lets us see the
26674 + Default USE flags for a particular ebuild as well as the USE
26675 + flags from package.use.
26676 +
26677 + @param mycpv: A cpv to load
26678 + @type mycpv: string
26679 + @param mydb: a dbapi instance that supports aux_get with the IUSE key.
26680 + @type mydb: dbapi or derivative.
26681 + @rtype: None
26682 + """
26683 +
26684 + if use_cache is not None:
26685 + warnings.warn(
26686 + "The use_cache parameter for config.setcpv() is deprecated and without effect.",
26687 + DeprecationWarning,
26688 + stacklevel=2,
26689 + )
26690 +
26691 + self.modifying()
26692 +
26693 + pkg = None
26694 + built_use = None
26695 + explicit_iuse = None
26696 + if not isinstance(mycpv, str):
26697 + pkg = mycpv
26698 + mycpv = pkg.cpv
26699 + mydb = pkg._metadata
26700 + explicit_iuse = pkg.iuse.all
26701 + args_hash = (mycpv, id(pkg))
26702 + if pkg.built:
26703 + built_use = pkg.use.enabled
26704 + else:
26705 + args_hash = (mycpv, id(mydb))
26706 +
26707 + if args_hash == self._setcpv_args_hash:
26708 + return
26709 + self._setcpv_args_hash = args_hash
26710 +
26711 + has_changed = False
26712 + self.mycpv = mycpv
26713 + cat, pf = catsplit(mycpv)
26714 + cp = cpv_getkey(mycpv)
26715 + cpv_slot = self.mycpv
26716 + pkginternaluse = ""
26717 + pkginternaluse_list = []
26718 + feature_use = []
26719 + iuse = ""
26720 + pkg_configdict = self.configdict["pkg"]
26721 + previous_iuse = pkg_configdict.get("IUSE")
26722 + previous_iuse_effective = pkg_configdict.get("IUSE_EFFECTIVE")
26723 + previous_features = pkg_configdict.get("FEATURES")
26724 + previous_penv = self._penv
26725 +
26726 + aux_keys = self._setcpv_aux_keys
26727 +
26728 + # Discard any existing metadata and package.env settings from
26729 + # the previous package instance.
26730 + pkg_configdict.clear()
26731 +
26732 + pkg_configdict["CATEGORY"] = cat
26733 + pkg_configdict["PF"] = pf
26734 + repository = None
26735 + eapi = None
26736 + if mydb:
26737 + if not hasattr(mydb, "aux_get"):
26738 + for k in aux_keys:
26739 + if k in mydb:
26740 + # Make these lazy, since __getitem__ triggers
26741 + # evaluation of USE conditionals which can't
26742 + # occur until PORTAGE_USE is calculated below.
26743 + pkg_configdict.addLazySingleton(k, mydb.__getitem__, k)
26744 + else:
26745 + # When calling dbapi.aux_get(), grab USE for built/installed
26746 + # packages since we want to save it PORTAGE_BUILT_USE for
26747 + # evaluating conditional USE deps in atoms passed via IPC to
26748 + # helpers like has_version and best_version.
26749 + aux_keys = set(aux_keys)
26750 + if hasattr(mydb, "_aux_cache_keys"):
26751 + aux_keys = aux_keys.intersection(mydb._aux_cache_keys)
26752 + aux_keys.add("USE")
26753 + aux_keys = list(aux_keys)
26754 + for k, v in zip(aux_keys, mydb.aux_get(self.mycpv, aux_keys)):
26755 + pkg_configdict[k] = v
26756 + built_use = frozenset(pkg_configdict.pop("USE").split())
26757 + if not built_use:
26758 + # Empty USE means this dbapi instance does not contain
26759 + # built packages.
26760 + built_use = None
26761 + eapi = pkg_configdict["EAPI"]
26762 +
26763 + repository = pkg_configdict.pop("repository", None)
26764 + if repository is not None:
26765 + pkg_configdict["PORTAGE_REPO_NAME"] = repository
26766 + iuse = pkg_configdict["IUSE"]
26767 + if pkg is None:
26768 + self.mycpv = _pkg_str(
26769 + self.mycpv, metadata=pkg_configdict, settings=self
26770 + )
26771 + cpv_slot = self.mycpv
26772 + else:
26773 + cpv_slot = pkg
26774 + for x in iuse.split():
26775 + if x.startswith("+"):
26776 + pkginternaluse_list.append(x[1:])
26777 + elif x.startswith("-"):
26778 + pkginternaluse_list.append(x)
26779 + pkginternaluse = " ".join(pkginternaluse_list)
26780 +
26781 + eapi_attrs = _get_eapi_attrs(eapi)
26782 +
26783 + if pkginternaluse != self.configdict["pkginternal"].get("USE", ""):
26784 + self.configdict["pkginternal"]["USE"] = pkginternaluse
26785 + has_changed = True
26786 +
26787 + repo_env = []
26788 + if repository and repository != Package.UNKNOWN_REPO:
26789 + repos = []
26790 + try:
26791 + repos.extend(
26792 + repo.name for repo in self.repositories[repository].masters
26793 + )
26794 + except KeyError:
26795 + pass
26796 + repos.append(repository)
26797 + for repo in repos:
26798 + d = self._repo_make_defaults.get(repo)
26799 + if d is None:
26800 + d = {}
26801 + else:
26802 + # make a copy, since we might modify it with
26803 + # package.use settings
26804 + d = d.copy()
26805 + cpdict = self._use_manager._repo_puse_dict.get(repo, {}).get(cp)
26806 + if cpdict:
26807 + repo_puse = ordered_by_atom_specificity(cpdict, cpv_slot)
26808 + if repo_puse:
26809 + for x in repo_puse:
26810 + d["USE"] = d.get("USE", "") + " " + " ".join(x)
26811 + if d:
26812 + repo_env.append(d)
26813 +
26814 + if repo_env or self.configdict["repo"]:
26815 + self.configdict["repo"].clear()
26816 + self.configdict["repo"].update(
26817 + stack_dicts(repo_env, incrementals=self.incrementals)
26818 + )
26819 + has_changed = True
26820 +
26821 + defaults = []
26822 + for i, pkgprofileuse_dict in enumerate(self._use_manager._pkgprofileuse):
26823 + if self.make_defaults_use[i]:
26824 + defaults.append(self.make_defaults_use[i])
26825 + cpdict = pkgprofileuse_dict.get(cp)
26826 + if cpdict:
26827 + pkg_defaults = ordered_by_atom_specificity(cpdict, cpv_slot)
26828 + if pkg_defaults:
26829 + defaults.extend(pkg_defaults)
26830 + defaults = " ".join(defaults)
26831 + if defaults != self.configdict["defaults"].get("USE", ""):
26832 + self.configdict["defaults"]["USE"] = defaults
26833 + has_changed = True
26834 +
26835 + useforce = self._use_manager.getUseForce(cpv_slot)
26836 + if useforce != self.useforce:
26837 + self.useforce = useforce
26838 + has_changed = True
26839 +
26840 + usemask = self._use_manager.getUseMask(cpv_slot)
26841 + if usemask != self.usemask:
26842 + self.usemask = usemask
26843 + has_changed = True
26844 +
26845 + oldpuse = self.puse
26846 + self.puse = self._use_manager.getPUSE(cpv_slot)
26847 + if oldpuse != self.puse:
26848 + has_changed = True
26849 + self.configdict["pkg"]["PKGUSE"] = self.puse[:] # For saving to PUSE file
26850 + self.configdict["pkg"]["USE"] = self.puse[:] # this gets appended to USE
26851 +
26852 + if previous_features:
26853 + # The package from the previous setcpv call had package.env
26854 + # settings which modified FEATURES. Therefore, trigger a
26855 + # regenerate() call in order to ensure that self.features
26856 + # is accurate.
26857 + has_changed = True
26858 + # Prevent stale features USE from corrupting the evaluation
26859 + # of USE conditional RESTRICT.
26860 + self.configdict["features"]["USE"] = self._default_features_use
26861 +
26862 + self._penv = []
26863 + cpdict = self._penvdict.get(cp)
26864 + if cpdict:
26865 + penv_matches = ordered_by_atom_specificity(cpdict, cpv_slot)
26866 + if penv_matches:
26867 + for x in penv_matches:
26868 + self._penv.extend(x)
26869 +
26870 + bashrc_files = []
26871 +
26872 + for profile, profile_bashrc in zip(
26873 + self._locations_manager.profiles_complex, self._profile_bashrc
26874 + ):
26875 + if profile_bashrc:
26876 + bashrc_files.append(os.path.join(profile.location, "profile.bashrc"))
26877 + if profile in self._pbashrcdict:
26878 + cpdict = self._pbashrcdict[profile].get(cp)
26879 + if cpdict:
26880 + bashrc_matches = ordered_by_atom_specificity(cpdict, cpv_slot)
26881 + for x in bashrc_matches:
26882 + bashrc_files.extend(x)
26883 +
26884 + self._pbashrc = tuple(bashrc_files)
26885 +
26886 + protected_pkg_keys = set(pkg_configdict)
26887 + protected_pkg_keys.discard("USE")
26888 +
26889 + # If there are _any_ package.env settings for this package
26890 + # then it automatically triggers config.reset(), in order
26891 + # to account for possible incremental interaction between
26892 + # package.use, package.env, and overrides from the calling
26893 + # environment (configdict['env']).
26894 + if self._penv:
26895 + has_changed = True
26896 + # USE is special because package.use settings override
26897 + # it. Discard any package.use settings here and they'll
26898 + # be added back later.
26899 + pkg_configdict.pop("USE", None)
26900 + self._grab_pkg_env(
26901 + self._penv, pkg_configdict, protected_keys=protected_pkg_keys
26902 + )
26903 +
26904 + # Now add package.use settings, which override USE from
26905 + # package.env
26906 + if self.puse:
26907 + if "USE" in pkg_configdict:
26908 + pkg_configdict["USE"] = pkg_configdict["USE"] + " " + self.puse
26909 + else:
26910 + pkg_configdict["USE"] = self.puse
26911 +
26912 + elif previous_penv:
26913 + has_changed = True
26914 +
26915 + if not (
26916 + previous_iuse == iuse
26917 + and previous_iuse_effective is not None == eapi_attrs.iuse_effective
26918 + ):
26919 + has_changed = True
26920 +
26921 + if has_changed:
26922 + # This can modify self.features due to package.env settings.
26923 + self.reset(keeping_pkg=1)
26924 +
26925 + if "test" in self.features:
26926 + # This is independent of IUSE and RESTRICT, so that the same
26927 + # value can be shared between packages with different settings,
26928 + # which is important when evaluating USE conditional RESTRICT.
26929 + feature_use.append("test")
26930 +
26931 + feature_use = " ".join(feature_use)
26932 + if feature_use != self.configdict["features"]["USE"]:
26933 + # Regenerate USE for evaluation of conditional RESTRICT.
26934 + self.configdict["features"]["USE"] = feature_use
26935 + self.reset(keeping_pkg=1)
26936 + has_changed = True
26937 +
26938 + if explicit_iuse is None:
26939 + explicit_iuse = frozenset(x.lstrip("+-") for x in iuse.split())
26940 + if eapi_attrs.iuse_effective:
26941 + iuse_implicit_match = self._iuse_effective_match
26942 + else:
26943 + iuse_implicit_match = self._iuse_implicit_match
26944 +
26945 + if pkg is None:
26946 + raw_properties = pkg_configdict.get("PROPERTIES")
26947 + raw_restrict = pkg_configdict.get("RESTRICT")
26948 + else:
26949 + raw_properties = pkg._raw_metadata["PROPERTIES"]
26950 + raw_restrict = pkg._raw_metadata["RESTRICT"]
26951 +
26952 + restrict_test = False
26953 + if raw_restrict:
26954 + try:
26955 + if built_use is not None:
26956 + properties = use_reduce(
26957 + raw_properties, uselist=built_use, flat=True
26958 + )
26959 + restrict = use_reduce(raw_restrict, uselist=built_use, flat=True)
26960 + else:
26961 + properties = use_reduce(
26962 + raw_properties,
26963 + uselist=frozenset(
26964 + x
26965 + for x in self["USE"].split()
26966 + if x in explicit_iuse or iuse_implicit_match(x)
26967 + ),
26968 + flat=True,
26969 + )
26970 + restrict = use_reduce(
26971 + raw_restrict,
26972 + uselist=frozenset(
26973 + x
26974 + for x in self["USE"].split()
26975 + if x in explicit_iuse or iuse_implicit_match(x)
26976 + ),
26977 + flat=True,
26978 + )
26979 + except PortageException:
26980 + pass
26981 + else:
26982 + allow_test = self.get("ALLOW_TEST", "").split()
26983 + restrict_test = (
26984 + "test" in restrict
26985 + and not "all" in allow_test
26986 + and not ("test_network" in properties and "network" in allow_test)
26987 + )
26988 +
26989 + if restrict_test and "test" in self.features:
26990 + # Handle it like IUSE="-test", since features USE is
26991 + # independent of RESTRICT.
26992 + pkginternaluse_list.append("-test")
26993 + pkginternaluse = " ".join(pkginternaluse_list)
26994 + self.configdict["pkginternal"]["USE"] = pkginternaluse
26995 + # TODO: can we avoid that?
26996 + self.reset(keeping_pkg=1)
26997 + has_changed = True
26998 +
26999 + env_configdict = self.configdict["env"]
27000 +
27001 + # Ensure that "pkg" values are always preferred over "env" values.
27002 + # This must occur _after_ the above reset() call, since reset()
27003 + # copies values from self.backupenv.
27004 + for k in protected_pkg_keys:
27005 + env_configdict.pop(k, None)
27006 +
27007 + lazy_vars = self._lazy_vars(built_use, self)
27008 + env_configdict.addLazySingleton(
27009 + "ACCEPT_LICENSE", lazy_vars.__getitem__, "ACCEPT_LICENSE"
27010 + )
27011 + env_configdict.addLazySingleton(
27012 + "PORTAGE_PROPERTIES", lazy_vars.__getitem__, "PORTAGE_PROPERTIES"
27013 + )
27014 + env_configdict.addLazySingleton(
27015 + "PORTAGE_RESTRICT", lazy_vars.__getitem__, "PORTAGE_RESTRICT"
27016 + )
27017 +
27018 + if built_use is not None:
27019 + pkg_configdict["PORTAGE_BUILT_USE"] = " ".join(built_use)
27020 +
27021 + # If reset() has not been called, it's safe to return
27022 + # early if IUSE has not changed.
27023 + if not has_changed:
27024 + return
27025 +
27026 + # Filter out USE flags that aren't part of IUSE. This has to
27027 + # be done for every setcpv() call since practically every
27028 + # package has different IUSE.
27029 + use = set(self["USE"].split())
27030 + unfiltered_use = frozenset(use)
27031 +
27032 + if eapi_attrs.iuse_effective:
27033 + portage_iuse = set(self._iuse_effective)
27034 + portage_iuse.update(explicit_iuse)
27035 + if built_use is not None:
27036 + # When the binary package was built, the profile may have
27037 + # had different IUSE_IMPLICIT settings, so any member of
27038 + # the built USE setting is considered to be a member of
27039 + # IUSE_EFFECTIVE (see bug 640318).
27040 + portage_iuse.update(built_use)
27041 + self.configdict["pkg"]["IUSE_EFFECTIVE"] = " ".join(sorted(portage_iuse))
27042 +
27043 + self.configdict["env"]["BASH_FUNC____in_portage_iuse%%"] = (
27044 + "() { "
27045 + "if [[ ${#___PORTAGE_IUSE_HASH[@]} -lt 1 ]]; then "
27046 + " declare -gA ___PORTAGE_IUSE_HASH=(%s); "
27047 + "fi; "
27048 + "[[ -n ${___PORTAGE_IUSE_HASH[$1]} ]]; "
27049 + "}"
27050 + ) % " ".join('["%s"]=1' % x for x in portage_iuse)
27051 + else:
27052 + portage_iuse = self._get_implicit_iuse()
27053 + portage_iuse.update(explicit_iuse)
27054 +
27055 + # The _get_implicit_iuse() returns a regular expression
27056 + # so we can't use the (faster) map. Fall back to
27057 + # implementing ___in_portage_iuse() the older/slower way.
27058 +
27059 + # PORTAGE_IUSE is not always needed so it's lazily evaluated.
27060 + self.configdict["env"].addLazySingleton(
27061 + "PORTAGE_IUSE", _lazy_iuse_regex, portage_iuse
27062 + )
27063 + self.configdict["env"][
27064 + "BASH_FUNC____in_portage_iuse%%"
27065 + ] = "() { [[ $1 =~ ${PORTAGE_IUSE} ]]; }"
27066 +
27067 + ebuild_force_test = not restrict_test and self.get("EBUILD_FORCE_TEST") == "1"
27068 +
27069 + if "test" in explicit_iuse or iuse_implicit_match("test"):
27070 + if "test" in self.features:
27071 + if ebuild_force_test and "test" in self.usemask:
27072 + self.usemask = frozenset(x for x in self.usemask if x != "test")
27073 + if restrict_test or ("test" in self.usemask and not ebuild_force_test):
27074 + # "test" is in IUSE and USE=test is masked, so execution
27075 + # of src_test() probably is not reliable. Therefore,
27076 + # temporarily disable FEATURES=test just for this package.
27077 + self["FEATURES"] = " ".join(x for x in self.features if x != "test")
27078 +
27079 + # Allow _* flags from USE_EXPAND wildcards to pass through here.
27080 + use.difference_update(
27081 + [
27082 + x
27083 + for x in use
27084 + if (x not in explicit_iuse and not iuse_implicit_match(x))
27085 + and x[-2:] != "_*"
27086 + ]
27087 + )
27088 +
27089 + # Use the calculated USE flags to regenerate the USE_EXPAND flags so
27090 + # that they are consistent. For optimal performance, use slice
27091 + # comparison instead of startswith().
27092 + use_expand_split = set(x.lower() for x in self.get("USE_EXPAND", "").split())
27093 + lazy_use_expand = self._lazy_use_expand(
27094 + self,
27095 + unfiltered_use,
27096 + use,
27097 + self.usemask,
27098 + portage_iuse,
27099 + use_expand_split,
27100 + self._use_expand_dict,
27101 + )
27102 +
27103 + use_expand_iuses = dict((k, set()) for k in use_expand_split)
27104 + for x in portage_iuse:
27105 + x_split = x.split("_")
27106 + if len(x_split) == 1:
27107 + continue
27108 + for i in range(len(x_split) - 1):
27109 + k = "_".join(x_split[: i + 1])
27110 + if k in use_expand_split:
27111 + use_expand_iuses[k].add(x)
27112 + break
27113 +
27114 + for k, use_expand_iuse in use_expand_iuses.items():
27115 + if k + "_*" in use:
27116 + use.update(x for x in use_expand_iuse if x not in usemask)
27117 + k = k.upper()
27118 + self.configdict["env"].addLazySingleton(k, lazy_use_expand.__getitem__, k)
27119 +
27120 + for k in self.get("USE_EXPAND_UNPREFIXED", "").split():
27121 + var_split = self.get(k, "").split()
27122 + var_split = [x for x in var_split if x in use]
27123 + if var_split:
27124 + self.configlist[-1][k] = " ".join(var_split)
27125 + elif k in self:
27126 + self.configlist[-1][k] = ""
27127 +
27128 + # Filtered for the ebuild environment. Store this in a separate
27129 + # attribute since we still want to be able to see global USE
27130 + # settings for things like emerge --info.
27131 +
27132 + self.configdict["env"]["PORTAGE_USE"] = " ".join(
27133 + sorted(x for x in use if x[-2:] != "_*")
27134 + )
27135 +
27136 + # Clear the eapi cache here rather than in the constructor, since
27137 + # setcpv triggers lazy instantiation of things like _use_manager.
27138 + _eapi_cache.clear()
27139 +
27140 + def _grab_pkg_env(self, penv, container, protected_keys=None):
27141 + if protected_keys is None:
27142 + protected_keys = ()
27143 + abs_user_config = os.path.join(self["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
27144 + non_user_variables = self._non_user_variables
27145 + # Make a copy since we don't want per-package settings
27146 + # to pollute the global expand_map.
27147 + expand_map = self._expand_map.copy()
27148 + incrementals = self.incrementals
27149 + for envname in penv:
27150 + penvfile = os.path.join(abs_user_config, "env", envname)
27151 + penvconfig = getconfig(
27152 + penvfile,
27153 + tolerant=self._tolerant,
27154 + allow_sourcing=True,
27155 + expand=expand_map,
27156 + )
27157 + if penvconfig is None:
27158 + writemsg(
27159 + "!!! %s references non-existent file: %s\n"
27160 + % (os.path.join(abs_user_config, "package.env"), penvfile),
27161 + noiselevel=-1,
27162 + )
27163 + else:
27164 + for k, v in penvconfig.items():
27165 + if k in protected_keys or k in non_user_variables:
27166 + writemsg(
27167 + "!!! Illegal variable "
27168 + + "'%s' assigned in '%s'\n" % (k, penvfile),
27169 + noiselevel=-1,
27170 + )
27171 + elif k in incrementals:
27172 + if k in container:
27173 + container[k] = container[k] + " " + v
27174 + else:
27175 + container[k] = v
27176 + else:
27177 + container[k] = v
27178 +
27179 + def _iuse_effective_match(self, flag):
27180 + return flag in self._iuse_effective
27181 +
27182 + def _calc_iuse_effective(self):
27183 + """
27184 + Beginning with EAPI 5, IUSE_EFFECTIVE is defined by PMS.
27185 + """
27186 + iuse_effective = []
27187 + iuse_effective.extend(self.get("IUSE_IMPLICIT", "").split())
27188 +
27189 + # USE_EXPAND_IMPLICIT should contain things like ARCH, ELIBC,
27190 + # KERNEL, and USERLAND.
27191 + use_expand_implicit = frozenset(self.get("USE_EXPAND_IMPLICIT", "").split())
27192 +
27193 + # USE_EXPAND_UNPREFIXED should contain at least ARCH, and
27194 + # USE_EXPAND_VALUES_ARCH should contain all valid ARCH flags.
27195 + for v in self.get("USE_EXPAND_UNPREFIXED", "").split():
27196 + if v not in use_expand_implicit:
27197 + continue
27198 + iuse_effective.extend(self.get("USE_EXPAND_VALUES_" + v, "").split())
27199 +
27200 + use_expand = frozenset(self.get("USE_EXPAND", "").split())
27201 + for v in use_expand_implicit:
27202 + if v not in use_expand:
27203 + continue
27204 + lower_v = v.lower()
27205 + for x in self.get("USE_EXPAND_VALUES_" + v, "").split():
27206 + iuse_effective.append(lower_v + "_" + x)
27207 +
27208 + return frozenset(iuse_effective)
27209 +
27210 + def _get_implicit_iuse(self):
27211 + """
27212 + Prior to EAPI 5, these flags are considered to
27213 + be implicit members of IUSE:
27214 + * Flags derived from ARCH
27215 + * Flags derived from USE_EXPAND_HIDDEN variables
27216 + * Masked flags, such as those from {,package}use.mask
27217 + * Forced flags, such as those from {,package}use.force
27218 + * build and bootstrap flags used by bootstrap.sh
27219 + """
27220 + iuse_implicit = set()
27221 + # Flags derived from ARCH.
27222 + arch = self.configdict["defaults"].get("ARCH")
27223 + if arch:
27224 + iuse_implicit.add(arch)
27225 + iuse_implicit.update(self.get("PORTAGE_ARCHLIST", "").split())
27226 +
27227 + # Flags derived from USE_EXPAND_HIDDEN variables
27228 + # such as ELIBC, KERNEL, and USERLAND.
27229 + use_expand_hidden = self.get("USE_EXPAND_HIDDEN", "").split()
27230 + for x in use_expand_hidden:
27231 + iuse_implicit.add(x.lower() + "_.*")
27232 +
27233 + # Flags that have been masked or forced.
27234 + iuse_implicit.update(self.usemask)
27235 + iuse_implicit.update(self.useforce)
27236 +
27237 + # build and bootstrap flags used by bootstrap.sh
27238 + iuse_implicit.add("build")
27239 + iuse_implicit.add("bootstrap")
27240 +
27241 + return iuse_implicit
27242 +
27243 + def _getUseMask(self, pkg, stable=None):
27244 + return self._use_manager.getUseMask(pkg, stable=stable)
27245 +
27246 + def _getUseForce(self, pkg, stable=None):
27247 + return self._use_manager.getUseForce(pkg, stable=stable)
27248 +
27249 + def _getMaskAtom(self, cpv, metadata):
27250 + """
27251 + Take a package and return a matching package.mask atom, or None if no
27252 + such atom exists or it has been cancelled by package.unmask.
27253 +
27254 + @param cpv: The package name
27255 + @type cpv: String
27256 + @param metadata: A dictionary of raw package metadata
27257 + @type metadata: dict
27258 + @rtype: String
27259 + @return: A matching atom string or None if one is not found.
27260 + """
27261 + return self._mask_manager.getMaskAtom(
27262 + cpv, metadata["SLOT"], metadata.get("repository")
27263 + )
27264 +
27265 + def _getRawMaskAtom(self, cpv, metadata):
27266 + """
27267 + Take a package and return a matching package.mask atom, or None if no
27268 + such atom exists or it has been cancelled by package.unmask.
27269 +
27270 + @param cpv: The package name
27271 + @type cpv: String
27272 + @param metadata: A dictionary of raw package metadata
27273 + @type metadata: dict
27274 + @rtype: String
27275 + @return: A matching atom string or None if one is not found.
27276 + """
27277 + return self._mask_manager.getRawMaskAtom(
27278 + cpv, metadata["SLOT"], metadata.get("repository")
27279 + )
27280 +
27281 + def _getProfileMaskAtom(self, cpv, metadata):
27282 + """
27283 + Take a package and return a matching profile atom, or None if no
27284 + such atom exists. Note that a profile atom may or may not have a "*"
27285 + prefix.
27286 +
27287 + @param cpv: The package name
27288 + @type cpv: String
27289 + @param metadata: A dictionary of raw package metadata
27290 + @type metadata: dict
27291 + @rtype: String
27292 + @return: A matching profile atom string or None if one is not found.
27293 + """
27294 +
27295 + warnings.warn(
27296 + "The config._getProfileMaskAtom() method is deprecated.",
27297 + DeprecationWarning,
27298 + stacklevel=2,
27299 + )
27300 +
27301 + cp = cpv_getkey(cpv)
27302 + profile_atoms = self.prevmaskdict.get(cp)
27303 + if profile_atoms:
27304 + pkg = "".join((cpv, _slot_separator, metadata["SLOT"]))
27305 + repo = metadata.get("repository")
27306 + if repo and repo != Package.UNKNOWN_REPO:
27307 + pkg = "".join((pkg, _repo_separator, repo))
27308 + pkg_list = [pkg]
27309 + for x in profile_atoms:
27310 + if match_from_list(x, pkg_list):
27311 + continue
27312 + return x
27313 + return None
27314 +
27315 + def _isStable(self, pkg):
27316 + return self._keywords_manager.isStable(
27317 + pkg,
27318 + self.get("ACCEPT_KEYWORDS", ""),
27319 + self.configdict["backupenv"].get("ACCEPT_KEYWORDS", ""),
27320 + )
27321 +
27322 + def _getKeywords(self, cpv, metadata):
27323 + return self._keywords_manager.getKeywords(
27324 + cpv,
27325 + metadata["SLOT"],
27326 + metadata.get("KEYWORDS", ""),
27327 + metadata.get("repository"),
27328 + )
27329 +
27330 + def _getMissingKeywords(self, cpv, metadata):
27331 + """
27332 + Take a package and return a list of any KEYWORDS that the user may
27333 + need to accept for the given package. If the KEYWORDS are empty
27334 + and the ** keyword has not been accepted, the returned list will
27335 + contain ** alone (in order to distinguish from the case of "none
27336 + missing").
27337 +
27338 + @param cpv: The package name (for package.keywords support)
27339 + @type cpv: String
27340 + @param metadata: A dictionary of raw package metadata
27341 + @type metadata: dict
27342 + @rtype: List
27343 + @return: A list of KEYWORDS that have not been accepted.
27344 + """
27345 +
27346 + # Hack: Need to check the env directly here as otherwise stacking
27347 + # doesn't work properly as negative values are lost in the config
27348 + # object (bug #139600)
27349 + backuped_accept_keywords = self.configdict["backupenv"].get(
27350 + "ACCEPT_KEYWORDS", ""
27351 + )
27352 + global_accept_keywords = self.get("ACCEPT_KEYWORDS", "")
27353 +
27354 + return self._keywords_manager.getMissingKeywords(
27355 + cpv,
27356 + metadata["SLOT"],
27357 + metadata.get("KEYWORDS", ""),
27358 + metadata.get("repository"),
27359 + global_accept_keywords,
27360 + backuped_accept_keywords,
27361 + )
27362 +
27363 + def _getRawMissingKeywords(self, cpv, metadata):
27364 + """
27365 + Take a package and return a list of any KEYWORDS that the user may
27366 + need to accept for the given package. If the KEYWORDS are empty,
27367 + the returned list will contain ** alone (in order to distinguish
27368 + from the case of "none missing"). This DOES NOT apply any user config
27369 + package.accept_keywords acceptance.
27370 +
27371 + @param cpv: The package name (for package.keywords support)
27372 + @type cpv: String
27373 + @param metadata: A dictionary of raw package metadata
27374 + @type metadata: dict
27375 + @rtype: List
27376 + @return: lists of KEYWORDS that have not been accepted
27377 + and the keywords it looked for.
27378 + """
27379 + return self._keywords_manager.getRawMissingKeywords(
27380 + cpv,
27381 + metadata["SLOT"],
27382 + metadata.get("KEYWORDS", ""),
27383 + metadata.get("repository"),
27384 + self.get("ACCEPT_KEYWORDS", ""),
27385 + )
27386 +
27387 + def _getPKeywords(self, cpv, metadata):
27388 + global_accept_keywords = self.get("ACCEPT_KEYWORDS", "")
27389 +
27390 + return self._keywords_manager.getPKeywords(
27391 + cpv, metadata["SLOT"], metadata.get("repository"), global_accept_keywords
27392 + )
27393 +
27394 + def _getMissingLicenses(self, cpv, metadata):
27395 + """
27396 + Take a LICENSE string and return a list of any licenses that the user
27397 + may need to accept for the given package. The returned list will not
27398 + contain any licenses that have already been accepted. This method
27399 + can throw an InvalidDependString exception.
27400 +
27401 + @param cpv: The package name (for package.license support)
27402 + @type cpv: String
27403 + @param metadata: A dictionary of raw package metadata
27404 + @type metadata: dict
27405 + @rtype: List
27406 + @return: A list of licenses that have not been accepted.
27407 + """
27408 + return self._license_manager.getMissingLicenses(
27409 + cpv,
27410 + metadata["USE"],
27411 + metadata["LICENSE"],
27412 + metadata["SLOT"],
27413 + metadata.get("repository"),
27414 + )
27415 +
27416 + def _getMissingProperties(self, cpv, metadata):
27417 + """
27418 + Take a PROPERTIES string and return a list of any properties the user
27419 + may need to accept for the given package. The returned list will not
27420 + contain any properties that have already been accepted. This method
27421 + can throw an InvalidDependString exception.
27422 +
27423 + @param cpv: The package name (for package.properties support)
27424 + @type cpv: String
27425 + @param metadata: A dictionary of raw package metadata
27426 + @type metadata: dict
27427 + @rtype: List
27428 + @return: A list of properties that have not been accepted.
27429 + """
27430 + accept_properties = self._accept_properties
27431 + try:
27432 + cpv.slot
27433 + except AttributeError:
27434 + cpv = _pkg_str(cpv, metadata=metadata, settings=self)
27435 + cp = cpv_getkey(cpv)
27436 + cpdict = self._ppropertiesdict.get(cp)
27437 + if cpdict:
27438 + pproperties_list = ordered_by_atom_specificity(cpdict, cpv)
27439 + if pproperties_list:
27440 + accept_properties = list(self._accept_properties)
27441 + for x in pproperties_list:
27442 + accept_properties.extend(x)
27443 +
27444 + properties_str = metadata.get("PROPERTIES", "")
27445 + properties = set(use_reduce(properties_str, matchall=1, flat=True))
27446 +
27447 + acceptable_properties = set()
27448 + for x in accept_properties:
27449 + if x == "*":
27450 + acceptable_properties.update(properties)
27451 + elif x == "-*":
27452 + acceptable_properties.clear()
27453 + elif x[:1] == "-":
27454 + acceptable_properties.discard(x[1:])
27455 + else:
27456 + acceptable_properties.add(x)
27457 +
27458 + if "?" in properties_str:
27459 + use = metadata["USE"].split()
27460 + else:
27461 + use = []
27462 +
27463 + return [
27464 + x
27465 + for x in use_reduce(properties_str, uselist=use, flat=True)
27466 + if x not in acceptable_properties
27467 + ]
27468 +
27469 + def _getMissingRestrict(self, cpv, metadata):
27470 + """
27471 + Take a RESTRICT string and return a list of any tokens the user
27472 + may need to accept for the given package. The returned list will not
27473 + contain any tokens that have already been accepted. This method
27474 + can throw an InvalidDependString exception.
27475 +
27476 + @param cpv: The package name (for package.accept_restrict support)
27477 + @type cpv: String
27478 + @param metadata: A dictionary of raw package metadata
27479 + @type metadata: dict
27480 + @rtype: List
27481 + @return: A list of tokens that have not been accepted.
27482 + """
27483 + accept_restrict = self._accept_restrict
27484 + try:
27485 + cpv.slot
27486 + except AttributeError:
27487 + cpv = _pkg_str(cpv, metadata=metadata, settings=self)
27488 + cp = cpv_getkey(cpv)
27489 + cpdict = self._paccept_restrict.get(cp)
27490 + if cpdict:
27491 + paccept_restrict_list = ordered_by_atom_specificity(cpdict, cpv)
27492 + if paccept_restrict_list:
27493 + accept_restrict = list(self._accept_restrict)
27494 + for x in paccept_restrict_list:
27495 + accept_restrict.extend(x)
27496 +
27497 + restrict_str = metadata.get("RESTRICT", "")
27498 + all_restricts = set(use_reduce(restrict_str, matchall=1, flat=True))
27499 +
27500 + acceptable_restricts = set()
27501 + for x in accept_restrict:
27502 + if x == "*":
27503 + acceptable_restricts.update(all_restricts)
27504 + elif x == "-*":
27505 + acceptable_restricts.clear()
27506 + elif x[:1] == "-":
27507 + acceptable_restricts.discard(x[1:])
27508 + else:
27509 + acceptable_restricts.add(x)
27510 +
27511 + if "?" in restrict_str:
27512 + use = metadata["USE"].split()
27513 + else:
27514 + use = []
27515 +
27516 + return [
27517 + x
27518 + for x in use_reduce(restrict_str, uselist=use, flat=True)
27519 + if x not in acceptable_restricts
27520 + ]
27521 +
27522 + def _accept_chost(self, cpv, metadata):
27523 + """
27524 + @return True if pkg CHOST is accepted, False otherwise.
27525 + """
27526 + if self._accept_chost_re is None:
27527 + accept_chost = self.get("ACCEPT_CHOSTS", "").split()
27528 + if not accept_chost:
27529 + chost = self.get("CHOST")
27530 + if chost:
27531 + accept_chost.append(chost)
27532 + if not accept_chost:
27533 + self._accept_chost_re = re.compile(".*")
27534 + elif len(accept_chost) == 1:
27535 + try:
27536 + self._accept_chost_re = re.compile(r"^%s$" % accept_chost[0])
27537 + except re.error as e:
27538 + writemsg(
27539 + _("!!! Invalid ACCEPT_CHOSTS value: '%s': %s\n")
27540 + % (accept_chost[0], e),
27541 + noiselevel=-1,
27542 + )
27543 + self._accept_chost_re = re.compile("^$")
27544 + else:
27545 + try:
27546 + self._accept_chost_re = re.compile(
27547 + r"^(%s)$" % "|".join(accept_chost)
27548 + )
27549 + except re.error as e:
27550 + writemsg(
27551 + _("!!! Invalid ACCEPT_CHOSTS value: '%s': %s\n")
27552 + % (" ".join(accept_chost), e),
27553 + noiselevel=-1,
27554 + )
27555 + self._accept_chost_re = re.compile("^$")
27556 +
27557 + pkg_chost = metadata.get("CHOST", "")
27558 + return not pkg_chost or self._accept_chost_re.match(pkg_chost) is not None
27559 +
27560 + def setinst(self, mycpv, mydbapi):
27561 + """This used to update the preferences for old-style virtuals.
27562 + It is no-op now."""
27563 + pass
27564 +
27565 + def reload(self):
27566 + """Reload things like /etc/profile.env that can change during runtime."""
27567 + env_d_filename = os.path.join(self["EROOT"], "etc", "profile.env")
27568 + self.configdict["env.d"].clear()
27569 + env_d = getconfig(env_d_filename, tolerant=self._tolerant, expand=False)
27570 + if env_d:
27571 + # env_d will be None if profile.env doesn't exist.
27572 + for k in self._env_d_blacklist:
27573 + env_d.pop(k, None)
27574 + self.configdict["env.d"].update(env_d)
27575 +
27576 + def regenerate(self, useonly=0, use_cache=None):
27577 + """
27578 + Regenerate settings
27579 + This involves regenerating valid USE flags, re-expanding USE_EXPAND flags
27580 + re-stacking USE flags (-flag and -*), as well as any other INCREMENTAL
27581 + variables. This also updates the env.d configdict; useful in case an ebuild
27582 + changes the environment.
27583 +
27584 + If FEATURES has already stacked, it is not stacked twice.
27585 +
27586 + @param useonly: Only regenerate USE flags (not any other incrementals)
27587 + @type useonly: Boolean
27588 + @rtype: None
27589 + """
27590 +
27591 + if use_cache is not None:
27592 + warnings.warn(
27593 + "The use_cache parameter for config.regenerate() is deprecated and without effect.",
27594 + DeprecationWarning,
27595 + stacklevel=2,
27596 + )
27597 +
27598 + self.modifying()
27599 +
27600 + if useonly:
27601 + myincrementals = ["USE"]
27602 + else:
27603 + myincrementals = self.incrementals
27604 + myincrementals = set(myincrementals)
27605 +
27606 + # Process USE last because it depends on USE_EXPAND which is also
27607 + # an incremental!
27608 + myincrementals.discard("USE")
27609 +
27610 + mydbs = self.configlist[:-1]
27611 + mydbs.append(self.backupenv)
27612 +
27613 + # ACCEPT_LICENSE is a lazily evaluated incremental, so that * can be
27614 + # used to match all licenses without every having to explicitly expand
27615 + # it to all licenses.
27616 + if self.local_config:
27617 + mysplit = []
27618 + for curdb in mydbs:
27619 + mysplit.extend(curdb.get("ACCEPT_LICENSE", "").split())
27620 + mysplit = prune_incremental(mysplit)
27621 + accept_license_str = " ".join(mysplit) or "* -@EULA"
27622 + self.configlist[-1]["ACCEPT_LICENSE"] = accept_license_str
27623 + self._license_manager.set_accept_license_str(accept_license_str)
27624 + else:
27625 + # repoman will accept any license
27626 + self._license_manager.set_accept_license_str("*")
27627 +
27628 + # ACCEPT_PROPERTIES works like ACCEPT_LICENSE, without groups
27629 + if self.local_config:
27630 + mysplit = []
27631 + for curdb in mydbs:
27632 + mysplit.extend(curdb.get("ACCEPT_PROPERTIES", "").split())
27633 + mysplit = prune_incremental(mysplit)
27634 + self.configlist[-1]["ACCEPT_PROPERTIES"] = " ".join(mysplit)
27635 + if tuple(mysplit) != self._accept_properties:
27636 + self._accept_properties = tuple(mysplit)
27637 + else:
27638 + # repoman will accept any property
27639 + self._accept_properties = ("*",)
27640 +
27641 + if self.local_config:
27642 + mysplit = []
27643 + for curdb in mydbs:
27644 + mysplit.extend(curdb.get("ACCEPT_RESTRICT", "").split())
27645 + mysplit = prune_incremental(mysplit)
27646 + self.configlist[-1]["ACCEPT_RESTRICT"] = " ".join(mysplit)
27647 + if tuple(mysplit) != self._accept_restrict:
27648 + self._accept_restrict = tuple(mysplit)
27649 + else:
27650 + # repoman will accept any property
27651 + self._accept_restrict = ("*",)
27652 +
27653 + increment_lists = {}
27654 + for k in myincrementals:
27655 + incremental_list = []
27656 + increment_lists[k] = incremental_list
27657 + for curdb in mydbs:
27658 + v = curdb.get(k)
27659 + if v is not None:
27660 + incremental_list.append(v.split())
27661 +
27662 + if "FEATURES" in increment_lists:
27663 + increment_lists["FEATURES"].append(self._features_overrides)
27664 +
27665 + myflags = set()
27666 + for mykey, incremental_list in increment_lists.items():
27667 +
27668 + myflags.clear()
27669 + for mysplit in incremental_list:
27670 +
27671 + for x in mysplit:
27672 + if x == "-*":
27673 + # "-*" is a special "minus" var that means "unset all settings".
27674 + # so USE="-* gnome" will have *just* gnome enabled.
27675 + myflags.clear()
27676 + continue
27677 +
27678 + if x[0] == "+":
27679 + # Not legal. People assume too much. Complain.
27680 + writemsg(
27681 + colorize(
27682 + "BAD",
27683 + _("%s values should not start with a '+': %s")
27684 + % (mykey, x),
27685 + )
27686 + + "\n",
27687 + noiselevel=-1,
27688 + )
27689 + x = x[1:]
27690 + if not x:
27691 + continue
27692 +
27693 + if x[0] == "-":
27694 + myflags.discard(x[1:])
27695 + continue
27696 +
27697 + # We got here, so add it now.
27698 + myflags.add(x)
27699 +
27700 + # store setting in last element of configlist, the original environment:
27701 + if myflags or mykey in self:
27702 + self.configlist[-1][mykey] = " ".join(sorted(myflags))
27703 +
27704 + # Do the USE calculation last because it depends on USE_EXPAND.
27705 + use_expand = self.get("USE_EXPAND", "").split()
27706 + use_expand_dict = self._use_expand_dict
27707 + use_expand_dict.clear()
27708 + for k in use_expand:
27709 + v = self.get(k)
27710 + if v is not None:
27711 + use_expand_dict[k] = v
27712 +
27713 + use_expand_unprefixed = self.get("USE_EXPAND_UNPREFIXED", "").split()
27714 +
27715 + # In order to best accomodate the long-standing practice of
27716 + # setting default USE_EXPAND variables in the profile's
27717 + # make.defaults, we translate these variables into their
27718 + # equivalent USE flags so that useful incremental behavior
27719 + # is enabled (for sub-profiles).
27720 + configdict_defaults = self.configdict["defaults"]
27721 + if self._make_defaults is not None:
27722 + for i, cfg in enumerate(self._make_defaults):
27723 + if not cfg:
27724 + self.make_defaults_use.append("")
27725 + continue
27726 + use = cfg.get("USE", "")
27727 + expand_use = []
27728 +
27729 + for k in use_expand_unprefixed:
27730 + v = cfg.get(k)
27731 + if v is not None:
27732 + expand_use.extend(v.split())
27733 +
27734 + for k in use_expand_dict:
27735 + v = cfg.get(k)
27736 + if v is None:
27737 + continue
27738 + prefix = k.lower() + "_"
27739 + for x in v.split():
27740 + if x[:1] == "-":
27741 + expand_use.append("-" + prefix + x[1:])
27742 + else:
27743 + expand_use.append(prefix + x)
27744 +
27745 + if expand_use:
27746 + expand_use.append(use)
27747 + use = " ".join(expand_use)
27748 + self.make_defaults_use.append(use)
27749 + self.make_defaults_use = tuple(self.make_defaults_use)
27750 + # Preserve both positive and negative flags here, since
27751 + # negative flags may later interact with other flags pulled
27752 + # in via USE_ORDER.
27753 + configdict_defaults["USE"] = " ".join(filter(None, self.make_defaults_use))
27754 + # Set to None so this code only runs once.
27755 + self._make_defaults = None
27756 +
27757 + if not self.uvlist:
27758 + for x in self["USE_ORDER"].split(":"):
27759 + if x in self.configdict:
27760 + self.uvlist.append(self.configdict[x])
27761 + self.uvlist.reverse()
27762 +
27763 + # For optimal performance, use slice
27764 + # comparison instead of startswith().
27765 + iuse = self.configdict["pkg"].get("IUSE")
27766 + if iuse is not None:
27767 + iuse = [x.lstrip("+-") for x in iuse.split()]
27768 + myflags = set()
27769 + for curdb in self.uvlist:
27770 +
27771 + for k in use_expand_unprefixed:
27772 + v = curdb.get(k)
27773 + if v is None:
27774 + continue
27775 + for x in v.split():
27776 + if x[:1] == "-":
27777 + myflags.discard(x[1:])
27778 + else:
27779 + myflags.add(x)
27780 +
27781 + cur_use_expand = [x for x in use_expand if x in curdb]
27782 + mysplit = curdb.get("USE", "").split()
27783 + if not mysplit and not cur_use_expand:
27784 + continue
27785 + for x in mysplit:
27786 + if x == "-*":
27787 + myflags.clear()
27788 + continue
27789 +
27790 + if x[0] == "+":
27791 + writemsg(
27792 + colorize(
27793 + "BAD",
27794 + _("USE flags should not start " "with a '+': %s\n") % x,
27795 + ),
27796 + noiselevel=-1,
27797 + )
27798 + x = x[1:]
27799 + if not x:
27800 + continue
27801 +
27802 + if x[0] == "-":
27803 + if x[-2:] == "_*":
27804 + prefix = x[1:-1]
27805 + prefix_len = len(prefix)
27806 + myflags.difference_update(
27807 + [y for y in myflags if y[:prefix_len] == prefix]
27808 + )
27809 + myflags.discard(x[1:])
27810 + continue
27811 +
27812 + if iuse is not None and x[-2:] == "_*":
27813 + # Expand wildcards here, so that cases like
27814 + # USE="linguas_* -linguas_en_US" work correctly.
27815 + prefix = x[:-1]
27816 + prefix_len = len(prefix)
27817 + has_iuse = False
27818 + for y in iuse:
27819 + if y[:prefix_len] == prefix:
27820 + has_iuse = True
27821 + myflags.add(y)
27822 + if not has_iuse:
27823 + # There are no matching IUSE, so allow the
27824 + # wildcard to pass through. This allows
27825 + # linguas_* to trigger unset LINGUAS in
27826 + # cases when no linguas_ flags are in IUSE.
27827 + myflags.add(x)
27828 + else:
27829 + myflags.add(x)
27830 +
27831 + if curdb is configdict_defaults:
27832 + # USE_EXPAND flags from make.defaults are handled
27833 + # earlier, in order to provide useful incremental
27834 + # behavior (for sub-profiles).
27835 + continue
27836 +
27837 + for var in cur_use_expand:
27838 + var_lower = var.lower()
27839 + is_not_incremental = var not in myincrementals
27840 + if is_not_incremental:
27841 + prefix = var_lower + "_"
27842 + prefix_len = len(prefix)
27843 + for x in list(myflags):
27844 + if x[:prefix_len] == prefix:
27845 + myflags.remove(x)
27846 + for x in curdb[var].split():
27847 + if x[0] == "+":
27848 + if is_not_incremental:
27849 + writemsg(
27850 + colorize(
27851 + "BAD",
27852 + _(
27853 + "Invalid '+' "
27854 + "operator in non-incremental variable "
27855 + "'%s': '%s'\n"
27856 + )
27857 + % (var, x),
27858 + ),
27859 + noiselevel=-1,
27860 + )
27861 + continue
27862 + else:
27863 + writemsg(
27864 + colorize(
27865 + "BAD",
27866 + _(
27867 + "Invalid '+' "
27868 + "operator in incremental variable "
27869 + "'%s': '%s'\n"
27870 + )
27871 + % (var, x),
27872 + ),
27873 + noiselevel=-1,
27874 + )
27875 + x = x[1:]
27876 + if x[0] == "-":
27877 + if is_not_incremental:
27878 + writemsg(
27879 + colorize(
27880 + "BAD",
27881 + _(
27882 + "Invalid '-' "
27883 + "operator in non-incremental variable "
27884 + "'%s': '%s'\n"
27885 + )
27886 + % (var, x),
27887 + ),
27888 + noiselevel=-1,
27889 + )
27890 + continue
27891 + myflags.discard(var_lower + "_" + x[1:])
27892 + continue
27893 + myflags.add(var_lower + "_" + x)
27894 +
27895 + if hasattr(self, "features"):
27896 + self.features._features.clear()
27897 + else:
27898 + self.features = features_set(self)
27899 + self.features._features.update(self.get("FEATURES", "").split())
27900 + self.features._sync_env_var()
27901 + self.features._validate()
27902 +
27903 + myflags.update(self.useforce)
27904 + arch = self.configdict["defaults"].get("ARCH")
27905 + if arch:
27906 + myflags.add(arch)
27907 +
27908 + myflags.difference_update(self.usemask)
27909 + self.configlist[-1]["USE"] = " ".join(sorted(myflags))
27910 +
27911 + if self.mycpv is None:
27912 + # Generate global USE_EXPAND variables settings that are
27913 + # consistent with USE, for display by emerge --info. For
27914 + # package instances, these are instead generated via
27915 + # setcpv().
27916 + for k in use_expand:
27917 + prefix = k.lower() + "_"
27918 + prefix_len = len(prefix)
27919 + expand_flags = set(
27920 + x[prefix_len:] for x in myflags if x[:prefix_len] == prefix
27921 + )
27922 + var_split = use_expand_dict.get(k, "").split()
27923 + var_split = [x for x in var_split if x in expand_flags]
27924 + var_split.extend(sorted(expand_flags.difference(var_split)))
27925 + if var_split:
27926 + self.configlist[-1][k] = " ".join(var_split)
27927 + elif k in self:
27928 + self.configlist[-1][k] = ""
27929 +
27930 + for k in use_expand_unprefixed:
27931 + var_split = self.get(k, "").split()
27932 + var_split = [x for x in var_split if x in myflags]
27933 + if var_split:
27934 + self.configlist[-1][k] = " ".join(var_split)
27935 + elif k in self:
27936 + self.configlist[-1][k] = ""
27937 +
27938 + @property
27939 + def virts_p(self):
27940 + warnings.warn(
27941 + "portage config.virts_p attribute "
27942 + + "is deprecated, use config.get_virts_p()",
27943 + DeprecationWarning,
27944 + stacklevel=2,
27945 + )
27946 + return self.get_virts_p()
27947 +
27948 + @property
27949 + def virtuals(self):
27950 + warnings.warn(
27951 + "portage config.virtuals attribute "
27952 + + "is deprecated, use config.getvirtuals()",
27953 + DeprecationWarning,
27954 + stacklevel=2,
27955 + )
27956 + return self.getvirtuals()
27957 +
27958 + def get_virts_p(self):
27959 + # Ensure that we don't trigger the _treeVirtuals
27960 + # assertion in VirtualsManager._compile_virtuals().
27961 + self.getvirtuals()
27962 + return self._virtuals_manager.get_virts_p()
27963 +
27964 + def getvirtuals(self):
27965 + if self._virtuals_manager._treeVirtuals is None:
27966 + # Hack around the fact that VirtualsManager needs a vartree
27967 + # and vartree needs a config instance.
27968 + # This code should be part of VirtualsManager.getvirtuals().
27969 + if self.local_config:
27970 + temp_vartree = vartree(settings=self)
27971 + self._virtuals_manager._populate_treeVirtuals(temp_vartree)
27972 + else:
27973 + self._virtuals_manager._treeVirtuals = {}
27974 +
27975 + return self._virtuals_manager.getvirtuals()
27976 +
27977 + def _populate_treeVirtuals_if_needed(self, vartree):
27978 + """Reduce the provides into a list by CP."""
27979 + if self._virtuals_manager._treeVirtuals is None:
27980 + if self.local_config:
27981 + self._virtuals_manager._populate_treeVirtuals(vartree)
27982 + else:
27983 + self._virtuals_manager._treeVirtuals = {}
27984 +
27985 + def __delitem__(self, mykey):
27986 + self.pop(mykey)
27987 +
27988 + def __getitem__(self, key):
27989 + try:
27990 + return self._getitem(key)
27991 + except KeyError:
27992 + if portage._internal_caller:
27993 + stack = (
27994 + traceback.format_stack()[:-1]
27995 + + traceback.format_exception(*sys.exc_info())[1:]
27996 + )
27997 + try:
27998 + # Ensure that output is written to terminal.
27999 + with open("/dev/tty", "w") as f:
28000 + f.write("=" * 96 + "\n")
28001 + f.write(
28002 + "=" * 8
28003 + + " Traceback for invalid call to portage.package.ebuild.config.config.__getitem__ "
28004 + + "=" * 8
28005 + + "\n"
28006 + )
28007 + f.writelines(stack)
28008 + f.write("=" * 96 + "\n")
28009 + except Exception:
28010 + pass
28011 + raise
28012 + else:
28013 + warnings.warn(
28014 + _("Passing nonexistent key %r to %s is deprecated. Use %s instead.")
28015 + % (
28016 + key,
28017 + "portage.package.ebuild.config.config.__getitem__",
28018 + "portage.package.ebuild.config.config.get",
28019 + ),
28020 + DeprecationWarning,
28021 + stacklevel=2,
28022 + )
28023 + return ""
28024 +
28025 + def _getitem(self, mykey):
28026 +
28027 + if mykey in self._constant_keys:
28028 + # These two point to temporary values when
28029 + # portage plans to update itself.
28030 + if mykey == "PORTAGE_BIN_PATH":
28031 + return portage._bin_path
28032 + if mykey == "PORTAGE_PYM_PATH":
28033 + return portage._pym_path
28034 +
28035 + if mykey == "PORTAGE_PYTHONPATH":
28036 + value = [
28037 + x for x in self.backupenv.get("PYTHONPATH", "").split(":") if x
28038 + ]
28039 + need_pym_path = True
28040 + if value:
28041 + try:
28042 + need_pym_path = not os.path.samefile(
28043 + value[0], portage._pym_path
28044 + )
28045 + except OSError:
28046 + pass
28047 + if need_pym_path:
28048 + value.insert(0, portage._pym_path)
28049 + return ":".join(value)
28050 +
28051 + if mykey == "PORTAGE_GID":
28052 + return "%s" % portage_gid
28053 +
28054 + for d in self.lookuplist:
28055 + try:
28056 + return d[mykey]
28057 + except KeyError:
28058 + pass
28059 +
28060 + deprecated_key = self._deprecated_keys.get(mykey)
28061 + if deprecated_key is not None:
28062 + value = self._getitem(deprecated_key)
28063 + # warnings.warn(_("Key %s has been renamed to %s. Please ",
28064 + # "update your configuration") % (deprecated_key, mykey),
28065 + # UserWarning)
28066 + return value
28067 +
28068 + raise KeyError(mykey)
28069 +
28070 + def get(self, k, x=None):
28071 + try:
28072 + return self._getitem(k)
28073 + except KeyError:
28074 + return x
28075 +
28076 + def pop(self, key, *args):
28077 + self.modifying()
28078 + if len(args) > 1:
28079 + raise TypeError(
28080 + "pop expected at most 2 arguments, got " + repr(1 + len(args))
28081 + )
28082 + v = self
28083 + for d in reversed(self.lookuplist):
28084 + v = d.pop(key, v)
28085 + if v is self:
28086 + if args:
28087 + return args[0]
28088 + raise KeyError(key)
28089 + return v
28090 +
28091 + def __contains__(self, mykey):
28092 + """Called to implement membership test operators (in and not in)."""
28093 + try:
28094 + self._getitem(mykey)
28095 + except KeyError:
28096 + return False
28097 + else:
28098 + return True
28099 +
28100 + def setdefault(self, k, x=None):
28101 + v = self.get(k)
28102 + if v is not None:
28103 + return v
28104 + self[k] = x
28105 + return x
28106 +
28107 + def __iter__(self):
28108 + keys = set()
28109 + keys.update(self._constant_keys)
28110 + for d in self.lookuplist:
28111 + keys.update(d)
28112 + return iter(keys)
28113 +
28114 + def iterkeys(self):
28115 + return iter(self)
28116 +
28117 + def iteritems(self):
28118 + for k in self:
28119 + yield (k, self._getitem(k))
28120 +
28121 + def __setitem__(self, mykey, myvalue):
28122 + "set a value; will be thrown away at reset() time"
28123 + if not isinstance(myvalue, str):
28124 + raise ValueError(
28125 + "Invalid type being used as a value: '%s': '%s'"
28126 + % (str(mykey), str(myvalue))
28127 + )
28128 +
28129 + # Avoid potential UnicodeDecodeError exceptions later.
28130 + mykey = _unicode_decode(mykey)
28131 + myvalue = _unicode_decode(myvalue)
28132 +
28133 + self.modifying()
28134 + self.modifiedkeys.append(mykey)
28135 + self.configdict["env"][mykey] = myvalue
28136 +
28137 + def environ(self):
28138 + "return our locally-maintained environment"
28139 + mydict = {}
28140 + environ_filter = self._environ_filter
28141 +
28142 + eapi = self.get("EAPI")
28143 + eapi_attrs = _get_eapi_attrs(eapi)
28144 + phase = self.get("EBUILD_PHASE")
28145 + emerge_from = self.get("EMERGE_FROM")
28146 + filter_calling_env = False
28147 + if (
28148 + self.mycpv is not None
28149 + and not (emerge_from == "ebuild" and phase == "setup")
28150 + and phase not in ("clean", "cleanrm", "depend", "fetch")
28151 + ):
28152 + temp_dir = self.get("T")
28153 + if temp_dir is not None and os.path.exists(
28154 + os.path.join(temp_dir, "environment")
28155 + ):
28156 + filter_calling_env = True
28157 +
28158 + environ_whitelist = self._environ_whitelist
28159 + for x, myvalue in self.iteritems():
28160 + if x in environ_filter:
28161 + continue
28162 + if not isinstance(myvalue, str):
28163 + writemsg(
28164 + _("!!! Non-string value in config: %s=%s\n") % (x, myvalue),
28165 + noiselevel=-1,
28166 + )
28167 + continue
28168 + if (
28169 + filter_calling_env
28170 + and x not in environ_whitelist
28171 + and not self._environ_whitelist_re.match(x)
28172 + ):
28173 + # Do not allow anything to leak into the ebuild
28174 + # environment unless it is explicitly whitelisted.
28175 + # This ensures that variables unset by the ebuild
28176 + # remain unset (bug #189417).
28177 + continue
28178 + mydict[x] = myvalue
28179 + if "HOME" not in mydict and "BUILD_PREFIX" in mydict:
28180 + writemsg("*** HOME not set. Setting to " + mydict["BUILD_PREFIX"] + "\n")
28181 + mydict["HOME"] = mydict["BUILD_PREFIX"][:]
28182 +
28183 + if filter_calling_env:
28184 + if phase:
28185 + whitelist = []
28186 + if "rpm" == phase:
28187 + whitelist.append("RPMDIR")
28188 + for k in whitelist:
28189 + v = self.get(k)
28190 + if v is not None:
28191 + mydict[k] = v
28192 +
28193 + # At some point we may want to stop exporting FEATURES to the ebuild
28194 + # environment, in order to prevent ebuilds from abusing it. In
28195 + # preparation for that, export it as PORTAGE_FEATURES so that bashrc
28196 + # users will be able to migrate any FEATURES conditional code to
28197 + # use this alternative variable.
28198 + mydict["PORTAGE_FEATURES"] = self["FEATURES"]
28199 +
28200 + # Filtered by IUSE and implicit IUSE.
28201 + mydict["USE"] = self.get("PORTAGE_USE", "")
28202 +
28203 + # Don't export AA to the ebuild environment in EAPIs that forbid it
28204 + if not eapi_exports_AA(eapi):
28205 + mydict.pop("AA", None)
28206 +
28207 + if not eapi_exports_merge_type(eapi):
28208 + mydict.pop("MERGE_TYPE", None)
28209 +
28210 + src_like_phase = phase == "setup" or _phase_func_map.get(phase, "").startswith(
28211 + "src_"
28212 + )
28213 +
28214 + if not (src_like_phase and eapi_attrs.sysroot):
28215 + mydict.pop("ESYSROOT", None)
28216 +
28217 + if not (src_like_phase and eapi_attrs.broot):
28218 + mydict.pop("BROOT", None)
28219 +
28220 + # Prefix variables are supported beginning with EAPI 3, or when
28221 + # force-prefix is in FEATURES, since older EAPIs would otherwise be
28222 + # useless with prefix configurations. This brings compatibility with
28223 + # the prefix branch of portage, which also supports EPREFIX for all
28224 + # EAPIs (for obvious reasons).
28225 + if phase == "depend" or (
28226 + "force-prefix" not in self.features
28227 + and eapi is not None
28228 + and not eapi_supports_prefix(eapi)
28229 + ):
28230 + mydict.pop("ED", None)
28231 + mydict.pop("EPREFIX", None)
28232 + mydict.pop("EROOT", None)
28233 + mydict.pop("ESYSROOT", None)
28234 +
28235 + if (
28236 + phase
28237 + not in (
28238 + "pretend",
28239 + "setup",
28240 + "preinst",
28241 + "postinst",
28242 + )
28243 + or not eapi_exports_replace_vars(eapi)
28244 + ):
28245 + mydict.pop("REPLACING_VERSIONS", None)
28246 +
28247 + if phase not in ("prerm", "postrm") or not eapi_exports_replace_vars(eapi):
28248 + mydict.pop("REPLACED_BY_VERSION", None)
28249 +
28250 + if phase is not None and eapi_attrs.exports_EBUILD_PHASE_FUNC:
28251 + phase_func = _phase_func_map.get(phase)
28252 + if phase_func is not None:
28253 + mydict["EBUILD_PHASE_FUNC"] = phase_func
28254 +
28255 + if eapi_attrs.posixish_locale:
28256 + split_LC_ALL(mydict)
28257 + mydict["LC_COLLATE"] = "C"
28258 + # check_locale() returns None when check can not be executed.
28259 + if check_locale(silent=True, env=mydict) is False:
28260 + # try another locale
28261 + for l in ("C.UTF-8", "en_US.UTF-8", "en_GB.UTF-8", "C"):
28262 + mydict["LC_CTYPE"] = l
28263 + if check_locale(silent=True, env=mydict):
28264 + # TODO: output the following only once
28265 + # writemsg(_("!!! LC_CTYPE unsupported, using %s instead\n")
28266 + # % mydict["LC_CTYPE"])
28267 + break
28268 + else:
28269 + raise AssertionError("C locale did not pass the test!")
28270 +
28271 + if not eapi_attrs.exports_PORTDIR:
28272 + mydict.pop("PORTDIR", None)
28273 + if not eapi_attrs.exports_ECLASSDIR:
28274 + mydict.pop("ECLASSDIR", None)
28275 +
28276 + if not eapi_attrs.path_variables_end_with_trailing_slash:
28277 + for v in ("D", "ED", "ROOT", "EROOT", "ESYSROOT", "BROOT"):
28278 + if v in mydict:
28279 + mydict[v] = mydict[v].rstrip(os.path.sep)
28280 +
28281 + # Since SYSROOT=/ interacts badly with autotools.eclass (bug 654600),
28282 + # and no EAPI expects SYSROOT to have a trailing slash, always strip
28283 + # the trailing slash from SYSROOT.
28284 + if "SYSROOT" in mydict:
28285 + mydict["SYSROOT"] = mydict["SYSROOT"].rstrip(os.sep)
28286 +
28287 + try:
28288 + builddir = mydict["PORTAGE_BUILDDIR"]
28289 + distdir = mydict["DISTDIR"]
28290 + except KeyError:
28291 + pass
28292 + else:
28293 + mydict["PORTAGE_ACTUAL_DISTDIR"] = distdir
28294 + mydict["DISTDIR"] = os.path.join(builddir, "distdir")
28295 +
28296 + return mydict
28297 +
28298 + def thirdpartymirrors(self):
28299 + if getattr(self, "_thirdpartymirrors", None) is None:
28300 + thirdparty_lists = []
28301 + for repo_name in reversed(self.repositories.prepos_order):
28302 + thirdparty_lists.append(
28303 + grabdict(
28304 + os.path.join(
28305 + self.repositories[repo_name].location,
28306 + "profiles",
28307 + "thirdpartymirrors",
28308 + )
28309 + )
28310 + )
28311 + self._thirdpartymirrors = stack_dictlist(thirdparty_lists, incremental=True)
28312 + return self._thirdpartymirrors
28313 +
28314 + def archlist(self):
28315 + _archlist = []
28316 + for myarch in self["PORTAGE_ARCHLIST"].split():
28317 + _archlist.append(myarch)
28318 + _archlist.append("~" + myarch)
28319 + return _archlist
28320 +
28321 + def selinux_enabled(self):
28322 + if getattr(self, "_selinux_enabled", None) is None:
28323 + self._selinux_enabled = 0
28324 + if "selinux" in self["USE"].split():
28325 + if selinux:
28326 + if selinux.is_selinux_enabled() == 1:
28327 + self._selinux_enabled = 1
28328 + else:
28329 + self._selinux_enabled = 0
28330 + else:
28331 + writemsg(
28332 + _(
28333 + "!!! SELinux module not found. Please verify that it was installed.\n"
28334 + ),
28335 + noiselevel=-1,
28336 + )
28337 + self._selinux_enabled = 0
28338 +
28339 + return self._selinux_enabled
28340 +
28341 + keys = __iter__
28342 + items = iteritems
28343 ++>>>>>>> origin/master
28344 diff --cc lib/portage/package/ebuild/doebuild.py
28345 index 69132e651,ac627f555..af8845f34
28346 --- a/lib/portage/package/ebuild/doebuild.py
28347 +++ b/lib/portage/package/ebuild/doebuild.py
28348 @@@ -22,48 -22,81 +22,86 @@@ from textwrap import wra
28349 import time
28350 import warnings
28351 import zlib
28352 +import platform
28353
28354 import portage
28355 - portage.proxy.lazyimport.lazyimport(globals(),
28356 - 'portage.package.ebuild.config:check_config_instance',
28357 - 'portage.package.ebuild.digestcheck:digestcheck',
28358 - 'portage.package.ebuild.digestgen:digestgen',
28359 - 'portage.package.ebuild.fetch:_drop_privs_userfetch,_want_userfetch,fetch',
28360 - 'portage.package.ebuild.prepare_build_dirs:_prepare_fake_distdir',
28361 - 'portage.package.ebuild._ipc.QueryCommand:QueryCommand',
28362 - 'portage.dep._slot_operator:evaluate_slot_operator_equal_deps',
28363 - 'portage.package.ebuild._spawn_nofetch:spawn_nofetch',
28364 - 'portage.util.elf.header:ELFHeader',
28365 - 'portage.dep.soname.multilib_category:compute_multilib_category',
28366 - 'portage.util._desktop_entry:validate_desktop_entry',
28367 - 'portage.util._dyn_libs.NeededEntry:NeededEntry',
28368 - 'portage.util._dyn_libs.soname_deps:SonameDepsProcessor',
28369 - 'portage.util._async.SchedulerInterface:SchedulerInterface',
28370 - 'portage.util._eventloop.global_event_loop:global_event_loop',
28371 - 'portage.util.ExtractKernelVersion:ExtractKernelVersion'
28372 +
28373 + portage.proxy.lazyimport.lazyimport(
28374 + globals(),
28375 + "portage.package.ebuild.config:check_config_instance",
28376 + "portage.package.ebuild.digestcheck:digestcheck",
28377 + "portage.package.ebuild.digestgen:digestgen",
28378 + "portage.package.ebuild.fetch:_drop_privs_userfetch,_want_userfetch,fetch",
28379 + "portage.package.ebuild.prepare_build_dirs:_prepare_fake_distdir",
28380 + "portage.package.ebuild._ipc.QueryCommand:QueryCommand",
28381 + "portage.dep._slot_operator:evaluate_slot_operator_equal_deps",
28382 + "portage.package.ebuild._spawn_nofetch:spawn_nofetch",
28383 + "portage.util.elf.header:ELFHeader",
28384 + "portage.dep.soname.multilib_category:compute_multilib_category",
28385 + "portage.util._desktop_entry:validate_desktop_entry",
28386 + "portage.util._dyn_libs.NeededEntry:NeededEntry",
28387 + "portage.util._dyn_libs.soname_deps:SonameDepsProcessor",
28388 + "portage.util._async.SchedulerInterface:SchedulerInterface",
28389 + "portage.util._eventloop.global_event_loop:global_event_loop",
28390 + "portage.util.ExtractKernelVersion:ExtractKernelVersion",
28391 )
28392
28393 - from portage import bsd_chflags, \
28394 - eapi_is_supported, merge, os, selinux, shutil, \
28395 - unmerge, _encodings, _os_merge, \
28396 - _shell_quote, _unicode_decode, _unicode_encode
28397 - from portage.const import EBUILD_SH_ENV_FILE, EBUILD_SH_ENV_DIR, \
28398 - EBUILD_SH_BINARY, INVALID_ENV_FILE, MISC_SH_BINARY, PORTAGE_PYM_PACKAGES, EPREFIX, MACOSSANDBOX_PROFILE
28399 - from portage.data import portage_gid, portage_uid, secpass, \
28400 - uid, userpriv_groups
28401 + from portage import (
28402 + bsd_chflags,
28403 + eapi_is_supported,
28404 + merge,
28405 + os,
28406 + selinux,
28407 + shutil,
28408 + unmerge,
28409 + _encodings,
28410 + _os_merge,
28411 + _shell_quote,
28412 + _unicode_decode,
28413 + _unicode_encode,
28414 + )
28415 + from portage.const import (
28416 + EBUILD_SH_ENV_FILE,
28417 + EBUILD_SH_ENV_DIR,
28418 + EBUILD_SH_BINARY,
28419 + INVALID_ENV_FILE,
28420 + MISC_SH_BINARY,
28421 + PORTAGE_PYM_PACKAGES,
28422 ++ # BEGIN PREFIX LOCAL
28423 ++ EPREFIX,
28424 ++ MACOSSANDBOX_PROFILE,
28425 ++ # END PREFIX LOCAL
28426 + )
28427 + from portage.data import portage_gid, portage_uid, secpass, uid, userpriv_groups
28428 from portage.dbapi.porttree import _parse_uri_map
28429 - from portage.dep import Atom, check_required_use, \
28430 - human_readable_required_use, paren_enclose, use_reduce
28431 - from portage.eapi import (eapi_exports_KV, eapi_exports_merge_type,
28432 - eapi_exports_replace_vars, eapi_exports_REPOSITORY,
28433 - eapi_has_required_use, eapi_has_src_prepare_and_src_configure,
28434 - eapi_has_pkg_pretend, _get_eapi_attrs)
28435 + from portage.dep import (
28436 + Atom,
28437 + check_required_use,
28438 + human_readable_required_use,
28439 + paren_enclose,
28440 + use_reduce,
28441 + )
28442 + from portage.eapi import (
28443 + eapi_exports_KV,
28444 + eapi_exports_merge_type,
28445 + eapi_exports_replace_vars,
28446 + eapi_exports_REPOSITORY,
28447 + eapi_has_required_use,
28448 + eapi_has_src_prepare_and_src_configure,
28449 + eapi_has_pkg_pretend,
28450 + _get_eapi_attrs,
28451 + )
28452 from portage.elog import elog_process, _preload_elog_modules
28453 from portage.elog.messages import eerror, eqawarn
28454 - from portage.exception import (DigestException, FileNotFound,
28455 - IncorrectParameter, InvalidData, InvalidDependString,
28456 - PermissionDenied, UnsupportedAPIException)
28457 + from portage.exception import (
28458 + DigestException,
28459 + FileNotFound,
28460 + IncorrectParameter,
28461 + InvalidData,
28462 + InvalidDependString,
28463 + PermissionDenied,
28464 + UnsupportedAPIException,
28465 + )
28466 from portage.localization import _
28467 from portage.output import colormap
28468 from portage.package.ebuild.prepare_build_dirs import prepare_build_dirs
28469 @@@ -92,1326 -127,1660 +132,1667 @@@ from _emerge.EbuildSpawnProcess import
28470 from _emerge.Package import Package
28471 from _emerge.RootConfig import RootConfig
28472
28473 -
28474 - _unsandboxed_phases = frozenset([
28475 - "clean", "cleanrm", "config",
28476 - "help", "info", "postinst",
28477 - "preinst", "pretend", "postrm",
28478 - "prerm", "setup"
28479 - ])
28480 + _unsandboxed_phases = frozenset(
28481 + [
28482 + "clean",
28483 + "cleanrm",
28484 + "config",
28485 + "help",
28486 + "info",
28487 + "postinst",
28488 + "preinst",
28489 + "pretend",
28490 + "postrm",
28491 + "prerm",
28492 + "setup",
28493 + ]
28494 + )
28495
28496 # phases in which IPC with host is allowed
28497 - _ipc_phases = frozenset([
28498 - "setup", "pretend", "config", "info",
28499 - "preinst", "postinst", "prerm", "postrm",
28500 - ])
28501 + _ipc_phases = frozenset(
28502 + [
28503 + "setup",
28504 + "pretend",
28505 + "config",
28506 + "info",
28507 + "preinst",
28508 + "postinst",
28509 + "prerm",
28510 + "postrm",
28511 + ]
28512 + )
28513
28514 # phases which execute in the global PID namespace
28515 - _global_pid_phases = frozenset([
28516 - 'config', 'depend', 'preinst', 'prerm', 'postinst', 'postrm'])
28517 + _global_pid_phases = frozenset(
28518 + ["config", "depend", "preinst", "prerm", "postinst", "postrm"]
28519 + )
28520
28521 _phase_func_map = {
28522 - "config": "pkg_config",
28523 - "setup": "pkg_setup",
28524 - "nofetch": "pkg_nofetch",
28525 - "unpack": "src_unpack",
28526 - "prepare": "src_prepare",
28527 - "configure": "src_configure",
28528 - "compile": "src_compile",
28529 - "test": "src_test",
28530 - "install": "src_install",
28531 - "preinst": "pkg_preinst",
28532 - "postinst": "pkg_postinst",
28533 - "prerm": "pkg_prerm",
28534 - "postrm": "pkg_postrm",
28535 - "info": "pkg_info",
28536 - "pretend": "pkg_pretend",
28537 + "config": "pkg_config",
28538 + "setup": "pkg_setup",
28539 + "nofetch": "pkg_nofetch",
28540 + "unpack": "src_unpack",
28541 + "prepare": "src_prepare",
28542 + "configure": "src_configure",
28543 + "compile": "src_compile",
28544 + "test": "src_test",
28545 + "install": "src_install",
28546 + "preinst": "pkg_preinst",
28547 + "postinst": "pkg_postinst",
28548 + "prerm": "pkg_prerm",
28549 + "postrm": "pkg_postrm",
28550 + "info": "pkg_info",
28551 + "pretend": "pkg_pretend",
28552 }
28553
28554 - _vdb_use_conditional_keys = Package._dep_keys + \
28555 - ('LICENSE', 'PROPERTIES', 'RESTRICT',)
28556 + _vdb_use_conditional_keys = Package._dep_keys + (
28557 + "LICENSE",
28558 + "PROPERTIES",
28559 + "RESTRICT",
28560 + )
28561 +
28562
28563 def _doebuild_spawn(phase, settings, actionmap=None, **kwargs):
28564 - """
28565 - All proper ebuild phases which execute ebuild.sh are spawned
28566 - via this function. No exceptions.
28567 - """
28568 -
28569 - if phase in _unsandboxed_phases:
28570 - kwargs['free'] = True
28571 -
28572 - kwargs['ipc'] = 'ipc-sandbox' not in settings.features or \
28573 - phase in _ipc_phases
28574 - kwargs['mountns'] = 'mount-sandbox' in settings.features
28575 - kwargs['networked'] = (
28576 - 'network-sandbox' not in settings.features or
28577 - (phase == 'unpack' and
28578 - 'live' in settings['PORTAGE_PROPERTIES'].split()) or
28579 - (phase == 'test' and
28580 - 'test_network' in settings['PORTAGE_PROPERTIES'].split()) or
28581 - phase in _ipc_phases or
28582 - 'network-sandbox' in settings['PORTAGE_RESTRICT'].split())
28583 - kwargs['pidns'] = ('pid-sandbox' in settings.features and
28584 - phase not in _global_pid_phases)
28585 -
28586 - if phase == 'depend':
28587 - kwargs['droppriv'] = 'userpriv' in settings.features
28588 - # It's not necessary to close_fds for this phase, since
28589 - # it should not spawn any daemons, and close_fds is
28590 - # best avoided since it can interact badly with some
28591 - # garbage collectors (see _setup_pipes docstring).
28592 - kwargs['close_fds'] = False
28593 -
28594 - if actionmap is not None and phase in actionmap:
28595 - kwargs.update(actionmap[phase]["args"])
28596 - cmd = actionmap[phase]["cmd"] % phase
28597 - else:
28598 - if phase == 'cleanrm':
28599 - ebuild_sh_arg = 'clean'
28600 - else:
28601 - ebuild_sh_arg = phase
28602 -
28603 - cmd = "%s %s" % (_shell_quote(
28604 - os.path.join(settings["PORTAGE_BIN_PATH"],
28605 - os.path.basename(EBUILD_SH_BINARY))),
28606 - ebuild_sh_arg)
28607 -
28608 - settings['EBUILD_PHASE'] = phase
28609 - try:
28610 - return spawn(cmd, settings, **kwargs)
28611 - finally:
28612 - settings.pop('EBUILD_PHASE', None)
28613 -
28614 - def _spawn_phase(phase, settings, actionmap=None, returnpid=False,
28615 - logfile=None, **kwargs):
28616 -
28617 - if returnpid:
28618 - return _doebuild_spawn(phase, settings, actionmap=actionmap,
28619 - returnpid=returnpid, logfile=logfile, **kwargs)
28620 -
28621 - # The logfile argument is unused here, since EbuildPhase uses
28622 - # the PORTAGE_LOG_FILE variable if set.
28623 - ebuild_phase = EbuildPhase(actionmap=actionmap, background=False,
28624 - phase=phase, scheduler=SchedulerInterface(asyncio._safe_loop()),
28625 - settings=settings, **kwargs)
28626 -
28627 - ebuild_phase.start()
28628 - ebuild_phase.wait()
28629 - return ebuild_phase.returncode
28630 + """
28631 + All proper ebuild phases which execute ebuild.sh are spawned
28632 + via this function. No exceptions.
28633 + """
28634 +
28635 + if phase in _unsandboxed_phases:
28636 + kwargs["free"] = True
28637 +
28638 + kwargs["ipc"] = "ipc-sandbox" not in settings.features or phase in _ipc_phases
28639 + kwargs["mountns"] = "mount-sandbox" in settings.features
28640 + kwargs["networked"] = (
28641 + "network-sandbox" not in settings.features
28642 + or (phase == "unpack" and "live" in settings["PORTAGE_PROPERTIES"].split())
28643 + or (
28644 + phase == "test" and "test_network" in settings["PORTAGE_PROPERTIES"].split()
28645 + )
28646 + or phase in _ipc_phases
28647 + or "network-sandbox" in settings["PORTAGE_RESTRICT"].split()
28648 + )
28649 + kwargs["pidns"] = (
28650 + "pid-sandbox" in settings.features and phase not in _global_pid_phases
28651 + )
28652 +
28653 + if phase == "depend":
28654 + kwargs["droppriv"] = "userpriv" in settings.features
28655 + # It's not necessary to close_fds for this phase, since
28656 + # it should not spawn any daemons, and close_fds is
28657 + # best avoided since it can interact badly with some
28658 + # garbage collectors (see _setup_pipes docstring).
28659 + kwargs["close_fds"] = False
28660 +
28661 + if actionmap is not None and phase in actionmap:
28662 + kwargs.update(actionmap[phase]["args"])
28663 + cmd = actionmap[phase]["cmd"] % phase
28664 + else:
28665 + if phase == "cleanrm":
28666 + ebuild_sh_arg = "clean"
28667 + else:
28668 + ebuild_sh_arg = phase
28669 +
28670 + cmd = "%s %s" % (
28671 + _shell_quote(
28672 + os.path.join(
28673 + settings["PORTAGE_BIN_PATH"], os.path.basename(EBUILD_SH_BINARY)
28674 + )
28675 + ),
28676 + ebuild_sh_arg,
28677 + )
28678 +
28679 + settings["EBUILD_PHASE"] = phase
28680 + try:
28681 + return spawn(cmd, settings, **kwargs)
28682 + finally:
28683 + settings.pop("EBUILD_PHASE", None)
28684 +
28685 +
28686 + def _spawn_phase(
28687 + phase, settings, actionmap=None, returnpid=False, logfile=None, **kwargs
28688 + ):
28689 +
28690 + if returnpid:
28691 + return _doebuild_spawn(
28692 + phase,
28693 + settings,
28694 + actionmap=actionmap,
28695 + returnpid=returnpid,
28696 + logfile=logfile,
28697 + **kwargs
28698 + )
28699 +
28700 + # The logfile argument is unused here, since EbuildPhase uses
28701 + # the PORTAGE_LOG_FILE variable if set.
28702 + ebuild_phase = EbuildPhase(
28703 + actionmap=actionmap,
28704 + background=False,
28705 + phase=phase,
28706 + scheduler=SchedulerInterface(asyncio._safe_loop()),
28707 + settings=settings,
28708 + **kwargs
28709 + )
28710 +
28711 + ebuild_phase.start()
28712 + ebuild_phase.wait()
28713 + return ebuild_phase.returncode
28714 +
28715
28716 def _doebuild_path(settings, eapi=None):
28717 - """
28718 - Generate the PATH variable.
28719 - """
28720 -
28721 - # Note: PORTAGE_BIN_PATH may differ from the global constant
28722 - # when portage is reinstalling itself.
28723 - portage_bin_path = [settings["PORTAGE_BIN_PATH"]]
28724 - if portage_bin_path[0] != portage.const.PORTAGE_BIN_PATH:
28725 - # Add a fallback path for restarting failed builds (bug 547086)
28726 - portage_bin_path.append(portage.const.PORTAGE_BIN_PATH)
28727 - prerootpath = [x for x in settings.get("PREROOTPATH", "").split(":") if x]
28728 - rootpath = [x for x in settings.get("ROOTPATH", "").split(":") if x]
28729 - rootpath_set = frozenset(rootpath)
28730 - overrides = [x for x in settings.get(
28731 - "__PORTAGE_TEST_PATH_OVERRIDE", "").split(":") if x]
28732 -
28733 - prefixes = []
28734 - # settings["EPREFIX"] should take priority over portage.const.EPREFIX
28735 - if portage.const.EPREFIX != settings["EPREFIX"] and settings["ROOT"] == os.sep:
28736 - prefixes.append(settings["EPREFIX"])
28737 - prefixes.append(portage.const.EPREFIX)
28738 -
28739 - path = overrides
28740 -
28741 - if "xattr" in settings.features:
28742 - for x in portage_bin_path:
28743 - path.append(os.path.join(x, "ebuild-helpers", "xattr"))
28744 -
28745 - if uid != 0 and \
28746 - "unprivileged" in settings.features and \
28747 - "fakeroot" not in settings.features:
28748 - for x in portage_bin_path:
28749 - path.append(os.path.join(x,
28750 - "ebuild-helpers", "unprivileged"))
28751 -
28752 - if settings.get("USERLAND", "GNU") != "GNU":
28753 - for x in portage_bin_path:
28754 - path.append(os.path.join(x, "ebuild-helpers", "bsd"))
28755 -
28756 - for x in portage_bin_path:
28757 - path.append(os.path.join(x, "ebuild-helpers"))
28758 - path.extend(prerootpath)
28759 -
28760 - for prefix in prefixes:
28761 - prefix = prefix if prefix else "/"
28762 - for x in ("usr/local/sbin", "usr/local/bin", "usr/sbin", "usr/bin", "sbin", "bin"):
28763 - # Respect order defined in ROOTPATH
28764 - x_abs = os.path.join(prefix, x)
28765 - if x_abs not in rootpath_set:
28766 - path.append(x_abs)
28767 -
28768 - path.extend(rootpath)
28769 -
28770 - # PREFIX LOCAL: append EXTRA_PATH from make.globals
28771 - extrapath = [x for x in settings.get("EXTRA_PATH", "").split(":") if x]
28772 - path.extend(extrapath)
28773 - # END PREFIX LOCAL
28774 -
28775 - settings["PATH"] = ":".join(path)
28776 -
28777 - def doebuild_environment(myebuild, mydo, myroot=None, settings=None,
28778 - debug=False, use_cache=None, db=None):
28779 - """
28780 - Create and store environment variable in the config instance
28781 - that's passed in as the "settings" parameter. This will raise
28782 - UnsupportedAPIException if the given ebuild has an unsupported
28783 - EAPI. All EAPI dependent code comes last, so that essential
28784 - variables like PORTAGE_BUILDDIR are still initialized even in
28785 - cases when UnsupportedAPIException needs to be raised, which
28786 - can be useful when uninstalling a package that has corrupt
28787 - EAPI metadata.
28788 - The myroot and use_cache parameters are unused.
28789 - """
28790 -
28791 - if settings is None:
28792 - raise TypeError("settings argument is required")
28793 -
28794 - if db is None:
28795 - raise TypeError("db argument is required")
28796 -
28797 - mysettings = settings
28798 - mydbapi = db
28799 - ebuild_path = os.path.abspath(myebuild)
28800 - pkg_dir = os.path.dirname(ebuild_path)
28801 - mytree = os.path.dirname(os.path.dirname(pkg_dir))
28802 - mypv = os.path.basename(ebuild_path)[:-7]
28803 - mysplit = _pkgsplit(mypv, eapi=mysettings.configdict["pkg"].get("EAPI"))
28804 - if mysplit is None:
28805 - raise IncorrectParameter(
28806 - _("Invalid ebuild path: '%s'") % myebuild)
28807 -
28808 - if mysettings.mycpv is not None and \
28809 - mysettings.configdict["pkg"].get("PF") == mypv and \
28810 - "CATEGORY" in mysettings.configdict["pkg"]:
28811 - # Assume that PF is enough to assume that we've got
28812 - # the correct CATEGORY, though this is not really
28813 - # a solid assumption since it's possible (though
28814 - # unlikely) that two packages in different
28815 - # categories have the same PF. Callers should call
28816 - # setcpv or create a clean clone of a locked config
28817 - # instance in order to ensure that this assumption
28818 - # does not fail like in bug #408817.
28819 - cat = mysettings.configdict["pkg"]["CATEGORY"]
28820 - mycpv = mysettings.mycpv
28821 - elif os.path.basename(pkg_dir) in (mysplit[0], mypv):
28822 - # portdbapi or vardbapi
28823 - cat = os.path.basename(os.path.dirname(pkg_dir))
28824 - mycpv = cat + "/" + mypv
28825 - else:
28826 - raise AssertionError("unable to determine CATEGORY")
28827 -
28828 - # Make a backup of PORTAGE_TMPDIR prior to calling config.reset()
28829 - # so that the caller can override it.
28830 - tmpdir = mysettings["PORTAGE_TMPDIR"]
28831 -
28832 - if mydo == 'depend':
28833 - if mycpv != mysettings.mycpv:
28834 - # Don't pass in mydbapi here since the resulting aux_get
28835 - # call would lead to infinite 'depend' phase recursion.
28836 - mysettings.setcpv(mycpv)
28837 - else:
28838 - # If EAPI isn't in configdict["pkg"], it means that setcpv()
28839 - # hasn't been called with the mydb argument, so we have to
28840 - # call it here (portage code always calls setcpv properly,
28841 - # but api consumers might not).
28842 - if mycpv != mysettings.mycpv or \
28843 - "EAPI" not in mysettings.configdict["pkg"]:
28844 - # Reload env.d variables and reset any previous settings.
28845 - mysettings.reload()
28846 - mysettings.reset()
28847 - mysettings.setcpv(mycpv, mydb=mydbapi)
28848 -
28849 - # config.reset() might have reverted a change made by the caller,
28850 - # so restore it to its original value. Sandbox needs canonical
28851 - # paths, so realpath it.
28852 - mysettings["PORTAGE_TMPDIR"] = os.path.realpath(tmpdir)
28853 -
28854 - mysettings.pop("EBUILD_PHASE", None) # remove from backupenv
28855 - mysettings["EBUILD_PHASE"] = mydo
28856 -
28857 - # Set requested Python interpreter for Portage helpers.
28858 - mysettings['PORTAGE_PYTHON'] = portage._python_interpreter
28859 -
28860 - # This is used by assert_sigpipe_ok() that's used by the ebuild
28861 - # unpack() helper. SIGPIPE is typically 13, but its better not
28862 - # to assume that.
28863 - mysettings['PORTAGE_SIGPIPE_STATUS'] = str(128 + signal.SIGPIPE)
28864 -
28865 - # We are disabling user-specific bashrc files.
28866 - mysettings["BASH_ENV"] = INVALID_ENV_FILE
28867 -
28868 - if debug: # Otherwise it overrides emerge's settings.
28869 - # We have no other way to set debug... debug can't be passed in
28870 - # due to how it's coded... Don't overwrite this so we can use it.
28871 - mysettings["PORTAGE_DEBUG"] = "1"
28872 -
28873 - mysettings["EBUILD"] = ebuild_path
28874 - mysettings["O"] = pkg_dir
28875 - mysettings.configdict["pkg"]["CATEGORY"] = cat
28876 - mysettings["PF"] = mypv
28877 -
28878 - if hasattr(mydbapi, 'repositories'):
28879 - repo = mydbapi.repositories.get_repo_for_location(mytree)
28880 - mysettings['PORTDIR'] = repo.eclass_db.porttrees[0]
28881 - mysettings['PORTAGE_ECLASS_LOCATIONS'] = repo.eclass_db.eclass_locations_string
28882 - mysettings.configdict["pkg"]["PORTAGE_REPO_NAME"] = repo.name
28883 -
28884 - mysettings["PORTDIR"] = os.path.realpath(mysettings["PORTDIR"])
28885 - mysettings.pop("PORTDIR_OVERLAY", None)
28886 - mysettings["DISTDIR"] = os.path.realpath(mysettings["DISTDIR"])
28887 - mysettings["RPMDIR"] = os.path.realpath(mysettings["RPMDIR"])
28888 -
28889 - mysettings["ECLASSDIR"] = mysettings["PORTDIR"]+"/eclass"
28890 -
28891 - mysettings["PORTAGE_BASHRC_FILES"] = "\n".join(mysettings._pbashrc)
28892 -
28893 - mysettings["P"] = mysplit[0]+"-"+mysplit[1]
28894 - mysettings["PN"] = mysplit[0]
28895 - mysettings["PV"] = mysplit[1]
28896 - mysettings["PR"] = mysplit[2]
28897 -
28898 - if noiselimit < 0:
28899 - mysettings["PORTAGE_QUIET"] = "1"
28900 -
28901 - if mysplit[2] == "r0":
28902 - mysettings["PVR"]=mysplit[1]
28903 - else:
28904 - mysettings["PVR"]=mysplit[1]+"-"+mysplit[2]
28905 -
28906 - # All temporary directories should be subdirectories of
28907 - # $PORTAGE_TMPDIR/portage, since it's common for /tmp and /var/tmp
28908 - # to be mounted with the "noexec" option (see bug #346899).
28909 - mysettings["BUILD_PREFIX"] = mysettings["PORTAGE_TMPDIR"]+"/portage"
28910 - mysettings["PKG_TMPDIR"] = mysettings["BUILD_PREFIX"]+"/._unmerge_"
28911 -
28912 - # Package {pre,post}inst and {pre,post}rm may overlap, so they must have separate
28913 - # locations in order to prevent interference.
28914 - if mydo in ("unmerge", "prerm", "postrm", "cleanrm"):
28915 - mysettings["PORTAGE_BUILDDIR"] = os.path.join(
28916 - mysettings["PKG_TMPDIR"],
28917 - mysettings["CATEGORY"], mysettings["PF"])
28918 - else:
28919 - mysettings["PORTAGE_BUILDDIR"] = os.path.join(
28920 - mysettings["BUILD_PREFIX"],
28921 - mysettings["CATEGORY"], mysettings["PF"])
28922 -
28923 - mysettings["HOME"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "homedir")
28924 - mysettings["WORKDIR"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "work")
28925 - mysettings["D"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "image") + os.sep
28926 - mysettings["T"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "temp")
28927 - mysettings["SANDBOX_LOG"] = os.path.join(mysettings["T"], "sandbox.log")
28928 - mysettings["FILESDIR"] = os.path.join(settings["PORTAGE_BUILDDIR"], "files")
28929 -
28930 - # Prefix forward compatability
28931 - eprefix_lstrip = mysettings["EPREFIX"].lstrip(os.sep)
28932 - mysettings["ED"] = os.path.join(
28933 - mysettings["D"], eprefix_lstrip).rstrip(os.sep) + os.sep
28934 -
28935 - mysettings["PORTAGE_BASHRC"] = os.path.join(
28936 - mysettings["PORTAGE_CONFIGROOT"], EBUILD_SH_ENV_FILE)
28937 - mysettings["PM_EBUILD_HOOK_DIR"] = os.path.join(
28938 - mysettings["PORTAGE_CONFIGROOT"], EBUILD_SH_ENV_DIR)
28939 -
28940 - # Allow color.map to control colors associated with einfo, ewarn, etc...
28941 - mysettings["PORTAGE_COLORMAP"] = colormap()
28942 -
28943 - if "COLUMNS" not in mysettings:
28944 - # Set COLUMNS, in order to prevent unnecessary stty calls
28945 - # inside the set_colors function of isolated-functions.sh.
28946 - # We cache the result in os.environ, in order to avoid
28947 - # multiple stty calls in cases when get_term_size() falls
28948 - # back to stty due to a missing or broken curses module.
28949 - columns = os.environ.get("COLUMNS")
28950 - if columns is None:
28951 - rows, columns = portage.output.get_term_size()
28952 - if columns < 1:
28953 - # Force a sane value for COLUMNS, so that tools
28954 - # like ls don't complain (see bug #394091).
28955 - columns = 80
28956 - columns = str(columns)
28957 - os.environ["COLUMNS"] = columns
28958 - mysettings["COLUMNS"] = columns
28959 -
28960 - # EAPI is always known here, even for the "depend" phase, because
28961 - # EbuildMetadataPhase gets it from _parse_eapi_ebuild_head().
28962 - eapi = mysettings.configdict['pkg']['EAPI']
28963 - _doebuild_path(mysettings, eapi=eapi)
28964 -
28965 - # All EAPI dependent code comes last, so that essential variables like
28966 - # PATH and PORTAGE_BUILDDIR are still initialized even in cases when
28967 - # UnsupportedAPIException needs to be raised, which can be useful
28968 - # when uninstalling a package that has corrupt EAPI metadata.
28969 - if not eapi_is_supported(eapi):
28970 - raise UnsupportedAPIException(mycpv, eapi)
28971 -
28972 - if eapi_exports_REPOSITORY(eapi) and "PORTAGE_REPO_NAME" in mysettings.configdict["pkg"]:
28973 - mysettings.configdict["pkg"]["REPOSITORY"] = mysettings.configdict["pkg"]["PORTAGE_REPO_NAME"]
28974 -
28975 - if mydo != "depend":
28976 - if hasattr(mydbapi, "getFetchMap") and \
28977 - ("A" not in mysettings.configdict["pkg"] or \
28978 - "AA" not in mysettings.configdict["pkg"]):
28979 - src_uri = mysettings.configdict["pkg"].get("SRC_URI")
28980 - if src_uri is None:
28981 - src_uri, = mydbapi.aux_get(mysettings.mycpv,
28982 - ["SRC_URI"], mytree=mytree)
28983 - metadata = {
28984 - "EAPI" : eapi,
28985 - "SRC_URI" : src_uri,
28986 - }
28987 - use = frozenset(mysettings["PORTAGE_USE"].split())
28988 - try:
28989 - uri_map = _parse_uri_map(mysettings.mycpv, metadata, use=use)
28990 - except InvalidDependString:
28991 - mysettings.configdict["pkg"]["A"] = ""
28992 - else:
28993 - mysettings.configdict["pkg"]["A"] = " ".join(uri_map)
28994 -
28995 - try:
28996 - uri_map = _parse_uri_map(mysettings.mycpv, metadata)
28997 - except InvalidDependString:
28998 - mysettings.configdict["pkg"]["AA"] = ""
28999 - else:
29000 - mysettings.configdict["pkg"]["AA"] = " ".join(uri_map)
29001 -
29002 - ccache = "ccache" in mysettings.features
29003 - distcc = "distcc" in mysettings.features
29004 - icecream = "icecream" in mysettings.features
29005 -
29006 - if ccache or distcc or icecream:
29007 - libdir = None
29008 - default_abi = mysettings.get("DEFAULT_ABI")
29009 - if default_abi:
29010 - libdir = mysettings.get("LIBDIR_" + default_abi)
29011 - if not libdir:
29012 - libdir = "lib"
29013 -
29014 - # The installation locations use to vary between versions...
29015 - # Safer to look them up rather than assuming
29016 - possible_libexecdirs = (libdir, "lib", "libexec")
29017 - masquerades = []
29018 - if distcc:
29019 - masquerades.append(("distcc", "distcc"))
29020 - if icecream:
29021 - masquerades.append(("icecream", "icecc"))
29022 - if ccache:
29023 - masquerades.append(("ccache", "ccache"))
29024 -
29025 - for feature, m in masquerades:
29026 - for l in possible_libexecdirs:
29027 - p = os.path.join(os.sep, eprefix_lstrip,
29028 - "usr", l, m, "bin")
29029 - if os.path.isdir(p):
29030 - mysettings["PATH"] = p + ":" + mysettings["PATH"]
29031 - break
29032 - else:
29033 - writemsg(("Warning: %s requested but no masquerade dir "
29034 - "can be found in /usr/lib*/%s/bin\n") % (m, m))
29035 - mysettings.features.remove(feature)
29036 -
29037 - if 'MAKEOPTS' not in mysettings:
29038 - nproc = get_cpu_count()
29039 - if nproc:
29040 - mysettings['MAKEOPTS'] = '-j%d' % (nproc)
29041 -
29042 - if not eapi_exports_KV(eapi):
29043 - # Discard KV for EAPIs that don't support it. Cached KV is restored
29044 - # from the backupenv whenever config.reset() is called.
29045 - mysettings.pop('KV', None)
29046 - elif 'KV' not in mysettings and \
29047 - mydo in ('compile', 'config', 'configure', 'info',
29048 - 'install', 'nofetch', 'postinst', 'postrm', 'preinst',
29049 - 'prepare', 'prerm', 'setup', 'test', 'unpack'):
29050 - mykv, err1 = ExtractKernelVersion(
29051 - os.path.join(mysettings['EROOT'], "usr/src/linux"))
29052 - if mykv:
29053 - # Regular source tree
29054 - mysettings["KV"] = mykv
29055 - else:
29056 - mysettings["KV"] = ""
29057 - mysettings.backup_changes("KV")
29058 -
29059 - binpkg_compression = mysettings.get("BINPKG_COMPRESS", "bzip2")
29060 - try:
29061 - compression = _compressors[binpkg_compression]
29062 - except KeyError as e:
29063 - if binpkg_compression:
29064 - writemsg("Warning: Invalid or unsupported compression method: %s\n" % e.args[0])
29065 - else:
29066 - # Empty BINPKG_COMPRESS disables compression.
29067 - mysettings['PORTAGE_COMPRESSION_COMMAND'] = 'cat'
29068 - else:
29069 - try:
29070 - compression_binary = shlex_split(varexpand(compression["compress"], mydict=settings))[0]
29071 - except IndexError as e:
29072 - writemsg("Warning: Invalid or unsupported compression method: %s\n" % e.args[0])
29073 - else:
29074 - if find_binary(compression_binary) is None:
29075 - missing_package = compression["package"]
29076 - writemsg("Warning: File compression unsupported %s. Missing package: %s\n" % (binpkg_compression, missing_package))
29077 - else:
29078 - cmd = [varexpand(x, mydict=settings) for x in shlex_split(compression["compress"])]
29079 - # Filter empty elements
29080 - cmd = [x for x in cmd if x != ""]
29081 - mysettings['PORTAGE_COMPRESSION_COMMAND'] = ' '.join(cmd)
29082 + """
29083 + Generate the PATH variable.
29084 + """
29085 +
29086 + # Note: PORTAGE_BIN_PATH may differ from the global constant
29087 + # when portage is reinstalling itself.
29088 + portage_bin_path = [settings["PORTAGE_BIN_PATH"]]
29089 + if portage_bin_path[0] != portage.const.PORTAGE_BIN_PATH:
29090 + # Add a fallback path for restarting failed builds (bug 547086)
29091 + portage_bin_path.append(portage.const.PORTAGE_BIN_PATH)
29092 + prerootpath = [x for x in settings.get("PREROOTPATH", "").split(":") if x]
29093 + rootpath = [x for x in settings.get("ROOTPATH", "").split(":") if x]
29094 + rootpath_set = frozenset(rootpath)
29095 + overrides = [
29096 + x for x in settings.get("__PORTAGE_TEST_PATH_OVERRIDE", "").split(":") if x
29097 + ]
29098 +
29099 + prefixes = []
29100 + # settings["EPREFIX"] should take priority over portage.const.EPREFIX
29101 + if portage.const.EPREFIX != settings["EPREFIX"] and settings["ROOT"] == os.sep:
29102 + prefixes.append(settings["EPREFIX"])
29103 + prefixes.append(portage.const.EPREFIX)
29104 +
29105 + path = overrides
29106 +
29107 + if "xattr" in settings.features:
29108 + for x in portage_bin_path:
29109 + path.append(os.path.join(x, "ebuild-helpers", "xattr"))
29110 +
29111 + if (
29112 + uid != 0
29113 + and "unprivileged" in settings.features
29114 + and "fakeroot" not in settings.features
29115 + ):
29116 + for x in portage_bin_path:
29117 + path.append(os.path.join(x, "ebuild-helpers", "unprivileged"))
29118 +
29119 + if settings.get("USERLAND", "GNU") != "GNU":
29120 + for x in portage_bin_path:
29121 + path.append(os.path.join(x, "ebuild-helpers", "bsd"))
29122 +
29123 + for x in portage_bin_path:
29124 + path.append(os.path.join(x, "ebuild-helpers"))
29125 + path.extend(prerootpath)
29126 +
29127 + for prefix in prefixes:
29128 + prefix = prefix if prefix else "/"
29129 + for x in (
29130 + "usr/local/sbin",
29131 + "usr/local/bin",
29132 + "usr/sbin",
29133 + "usr/bin",
29134 + "sbin",
29135 + "bin",
29136 + ):
29137 + # Respect order defined in ROOTPATH
29138 + x_abs = os.path.join(prefix, x)
29139 + if x_abs not in rootpath_set:
29140 + path.append(x_abs)
29141 +
29142 + path.extend(rootpath)
29143 ++
29144 ++ # BEGIN PREFIX LOCAL: append EXTRA_PATH from make.globals
29145 ++ extrapath = [x for x in settings.get("EXTRA_PATH", "").split(":") if x]
29146 ++ path.extend(extrapath)
29147 ++ # END PREFIX LOCAL
29148 ++
29149 + settings["PATH"] = ":".join(path)
29150 +
29151 +
29152 + def doebuild_environment(
29153 + myebuild, mydo, myroot=None, settings=None, debug=False, use_cache=None, db=None
29154 + ):
29155 + """
29156 + Create and store environment variable in the config instance
29157 + that's passed in as the "settings" parameter. This will raise
29158 + UnsupportedAPIException if the given ebuild has an unsupported
29159 + EAPI. All EAPI dependent code comes last, so that essential
29160 + variables like PORTAGE_BUILDDIR are still initialized even in
29161 + cases when UnsupportedAPIException needs to be raised, which
29162 + can be useful when uninstalling a package that has corrupt
29163 + EAPI metadata.
29164 + The myroot and use_cache parameters are unused.
29165 + """
29166 +
29167 + if settings is None:
29168 + raise TypeError("settings argument is required")
29169 +
29170 + if db is None:
29171 + raise TypeError("db argument is required")
29172 +
29173 + mysettings = settings
29174 + mydbapi = db
29175 + ebuild_path = os.path.abspath(myebuild)
29176 + pkg_dir = os.path.dirname(ebuild_path)
29177 + mytree = os.path.dirname(os.path.dirname(pkg_dir))
29178 + mypv = os.path.basename(ebuild_path)[:-7]
29179 + mysplit = _pkgsplit(mypv, eapi=mysettings.configdict["pkg"].get("EAPI"))
29180 + if mysplit is None:
29181 + raise IncorrectParameter(_("Invalid ebuild path: '%s'") % myebuild)
29182 +
29183 + if (
29184 + mysettings.mycpv is not None
29185 + and mysettings.configdict["pkg"].get("PF") == mypv
29186 + and "CATEGORY" in mysettings.configdict["pkg"]
29187 + ):
29188 + # Assume that PF is enough to assume that we've got
29189 + # the correct CATEGORY, though this is not really
29190 + # a solid assumption since it's possible (though
29191 + # unlikely) that two packages in different
29192 + # categories have the same PF. Callers should call
29193 + # setcpv or create a clean clone of a locked config
29194 + # instance in order to ensure that this assumption
29195 + # does not fail like in bug #408817.
29196 + cat = mysettings.configdict["pkg"]["CATEGORY"]
29197 + mycpv = mysettings.mycpv
29198 + elif os.path.basename(pkg_dir) in (mysplit[0], mypv):
29199 + # portdbapi or vardbapi
29200 + cat = os.path.basename(os.path.dirname(pkg_dir))
29201 + mycpv = cat + "/" + mypv
29202 + else:
29203 + raise AssertionError("unable to determine CATEGORY")
29204 +
29205 + # Make a backup of PORTAGE_TMPDIR prior to calling config.reset()
29206 + # so that the caller can override it.
29207 + tmpdir = mysettings["PORTAGE_TMPDIR"]
29208 +
29209 + if mydo == "depend":
29210 + if mycpv != mysettings.mycpv:
29211 + # Don't pass in mydbapi here since the resulting aux_get
29212 + # call would lead to infinite 'depend' phase recursion.
29213 + mysettings.setcpv(mycpv)
29214 + else:
29215 + # If EAPI isn't in configdict["pkg"], it means that setcpv()
29216 + # hasn't been called with the mydb argument, so we have to
29217 + # call it here (portage code always calls setcpv properly,
29218 + # but api consumers might not).
29219 + if mycpv != mysettings.mycpv or "EAPI" not in mysettings.configdict["pkg"]:
29220 + # Reload env.d variables and reset any previous settings.
29221 + mysettings.reload()
29222 + mysettings.reset()
29223 + mysettings.setcpv(mycpv, mydb=mydbapi)
29224 +
29225 + # config.reset() might have reverted a change made by the caller,
29226 + # so restore it to its original value. Sandbox needs canonical
29227 + # paths, so realpath it.
29228 + mysettings["PORTAGE_TMPDIR"] = os.path.realpath(tmpdir)
29229 +
29230 + mysettings.pop("EBUILD_PHASE", None) # remove from backupenv
29231 + mysettings["EBUILD_PHASE"] = mydo
29232 +
29233 + # Set requested Python interpreter for Portage helpers.
29234 + mysettings["PORTAGE_PYTHON"] = portage._python_interpreter
29235 +
29236 + # This is used by assert_sigpipe_ok() that's used by the ebuild
29237 + # unpack() helper. SIGPIPE is typically 13, but its better not
29238 + # to assume that.
29239 + mysettings["PORTAGE_SIGPIPE_STATUS"] = str(128 + signal.SIGPIPE)
29240 +
29241 + # We are disabling user-specific bashrc files.
29242 + mysettings["BASH_ENV"] = INVALID_ENV_FILE
29243 +
29244 + if debug: # Otherwise it overrides emerge's settings.
29245 + # We have no other way to set debug... debug can't be passed in
29246 + # due to how it's coded... Don't overwrite this so we can use it.
29247 + mysettings["PORTAGE_DEBUG"] = "1"
29248 +
29249 + mysettings["EBUILD"] = ebuild_path
29250 + mysettings["O"] = pkg_dir
29251 + mysettings.configdict["pkg"]["CATEGORY"] = cat
29252 + mysettings["PF"] = mypv
29253 +
29254 + if hasattr(mydbapi, "repositories"):
29255 + repo = mydbapi.repositories.get_repo_for_location(mytree)
29256 + mysettings["PORTDIR"] = repo.eclass_db.porttrees[0]
29257 + mysettings["PORTAGE_ECLASS_LOCATIONS"] = repo.eclass_db.eclass_locations_string
29258 + mysettings.configdict["pkg"]["PORTAGE_REPO_NAME"] = repo.name
29259 +
29260 + mysettings["PORTDIR"] = os.path.realpath(mysettings["PORTDIR"])
29261 + mysettings.pop("PORTDIR_OVERLAY", None)
29262 + mysettings["DISTDIR"] = os.path.realpath(mysettings["DISTDIR"])
29263 + mysettings["RPMDIR"] = os.path.realpath(mysettings["RPMDIR"])
29264 +
29265 + mysettings["ECLASSDIR"] = mysettings["PORTDIR"] + "/eclass"
29266 +
29267 + mysettings["PORTAGE_BASHRC_FILES"] = "\n".join(mysettings._pbashrc)
29268 +
29269 + mysettings["P"] = mysplit[0] + "-" + mysplit[1]
29270 + mysettings["PN"] = mysplit[0]
29271 + mysettings["PV"] = mysplit[1]
29272 + mysettings["PR"] = mysplit[2]
29273 +
29274 + if noiselimit < 0:
29275 + mysettings["PORTAGE_QUIET"] = "1"
29276 +
29277 + if mysplit[2] == "r0":
29278 + mysettings["PVR"] = mysplit[1]
29279 + else:
29280 + mysettings["PVR"] = mysplit[1] + "-" + mysplit[2]
29281 +
29282 + # All temporary directories should be subdirectories of
29283 + # $PORTAGE_TMPDIR/portage, since it's common for /tmp and /var/tmp
29284 + # to be mounted with the "noexec" option (see bug #346899).
29285 + mysettings["BUILD_PREFIX"] = mysettings["PORTAGE_TMPDIR"] + "/portage"
29286 + mysettings["PKG_TMPDIR"] = mysettings["BUILD_PREFIX"] + "/._unmerge_"
29287 +
29288 + # Package {pre,post}inst and {pre,post}rm may overlap, so they must have separate
29289 + # locations in order to prevent interference.
29290 + if mydo in ("unmerge", "prerm", "postrm", "cleanrm"):
29291 + mysettings["PORTAGE_BUILDDIR"] = os.path.join(
29292 + mysettings["PKG_TMPDIR"], mysettings["CATEGORY"], mysettings["PF"]
29293 + )
29294 + else:
29295 + mysettings["PORTAGE_BUILDDIR"] = os.path.join(
29296 + mysettings["BUILD_PREFIX"], mysettings["CATEGORY"], mysettings["PF"]
29297 + )
29298 +
29299 + mysettings["HOME"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "homedir")
29300 + mysettings["WORKDIR"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "work")
29301 + mysettings["D"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "image") + os.sep
29302 + mysettings["T"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "temp")
29303 + mysettings["SANDBOX_LOG"] = os.path.join(mysettings["T"], "sandbox.log")
29304 + mysettings["FILESDIR"] = os.path.join(settings["PORTAGE_BUILDDIR"], "files")
29305 +
29306 + # Prefix forward compatability
29307 + eprefix_lstrip = mysettings["EPREFIX"].lstrip(os.sep)
29308 + mysettings["ED"] = (
29309 + os.path.join(mysettings["D"], eprefix_lstrip).rstrip(os.sep) + os.sep
29310 + )
29311 +
29312 + mysettings["PORTAGE_BASHRC"] = os.path.join(
29313 + mysettings["PORTAGE_CONFIGROOT"], EBUILD_SH_ENV_FILE
29314 + )
29315 + mysettings["PM_EBUILD_HOOK_DIR"] = os.path.join(
29316 + mysettings["PORTAGE_CONFIGROOT"], EBUILD_SH_ENV_DIR
29317 + )
29318 +
29319 + # Allow color.map to control colors associated with einfo, ewarn, etc...
29320 + mysettings["PORTAGE_COLORMAP"] = colormap()
29321 +
29322 + if "COLUMNS" not in mysettings:
29323 + # Set COLUMNS, in order to prevent unnecessary stty calls
29324 + # inside the set_colors function of isolated-functions.sh.
29325 + # We cache the result in os.environ, in order to avoid
29326 + # multiple stty calls in cases when get_term_size() falls
29327 + # back to stty due to a missing or broken curses module.
29328 + columns = os.environ.get("COLUMNS")
29329 + if columns is None:
29330 + rows, columns = portage.output.get_term_size()
29331 + if columns < 1:
29332 + # Force a sane value for COLUMNS, so that tools
29333 + # like ls don't complain (see bug #394091).
29334 + columns = 80
29335 + columns = str(columns)
29336 + os.environ["COLUMNS"] = columns
29337 + mysettings["COLUMNS"] = columns
29338 +
29339 + # EAPI is always known here, even for the "depend" phase, because
29340 + # EbuildMetadataPhase gets it from _parse_eapi_ebuild_head().
29341 + eapi = mysettings.configdict["pkg"]["EAPI"]
29342 + _doebuild_path(mysettings, eapi=eapi)
29343 +
29344 + # All EAPI dependent code comes last, so that essential variables like
29345 + # PATH and PORTAGE_BUILDDIR are still initialized even in cases when
29346 + # UnsupportedAPIException needs to be raised, which can be useful
29347 + # when uninstalling a package that has corrupt EAPI metadata.
29348 + if not eapi_is_supported(eapi):
29349 + raise UnsupportedAPIException(mycpv, eapi)
29350 +
29351 + if (
29352 + eapi_exports_REPOSITORY(eapi)
29353 + and "PORTAGE_REPO_NAME" in mysettings.configdict["pkg"]
29354 + ):
29355 + mysettings.configdict["pkg"]["REPOSITORY"] = mysettings.configdict["pkg"][
29356 + "PORTAGE_REPO_NAME"
29357 + ]
29358 +
29359 + if mydo != "depend":
29360 + if hasattr(mydbapi, "getFetchMap") and (
29361 + "A" not in mysettings.configdict["pkg"]
29362 + or "AA" not in mysettings.configdict["pkg"]
29363 + ):
29364 + src_uri = mysettings.configdict["pkg"].get("SRC_URI")
29365 + if src_uri is None:
29366 + (src_uri,) = mydbapi.aux_get(
29367 + mysettings.mycpv, ["SRC_URI"], mytree=mytree
29368 + )
29369 + metadata = {
29370 + "EAPI": eapi,
29371 + "SRC_URI": src_uri,
29372 + }
29373 + use = frozenset(mysettings["PORTAGE_USE"].split())
29374 + try:
29375 + uri_map = _parse_uri_map(mysettings.mycpv, metadata, use=use)
29376 + except InvalidDependString:
29377 + mysettings.configdict["pkg"]["A"] = ""
29378 + else:
29379 + mysettings.configdict["pkg"]["A"] = " ".join(uri_map)
29380 +
29381 + try:
29382 + uri_map = _parse_uri_map(mysettings.mycpv, metadata)
29383 + except InvalidDependString:
29384 + mysettings.configdict["pkg"]["AA"] = ""
29385 + else:
29386 + mysettings.configdict["pkg"]["AA"] = " ".join(uri_map)
29387 +
29388 + ccache = "ccache" in mysettings.features
29389 + distcc = "distcc" in mysettings.features
29390 + icecream = "icecream" in mysettings.features
29391 +
29392 + if ccache or distcc or icecream:
29393 + libdir = None
29394 + default_abi = mysettings.get("DEFAULT_ABI")
29395 + if default_abi:
29396 + libdir = mysettings.get("LIBDIR_" + default_abi)
29397 + if not libdir:
29398 + libdir = "lib"
29399 +
29400 + # The installation locations use to vary between versions...
29401 + # Safer to look them up rather than assuming
29402 + possible_libexecdirs = (libdir, "lib", "libexec")
29403 + masquerades = []
29404 + if distcc:
29405 + masquerades.append(("distcc", "distcc"))
29406 + if icecream:
29407 + masquerades.append(("icecream", "icecc"))
29408 + if ccache:
29409 + masquerades.append(("ccache", "ccache"))
29410 +
29411 + for feature, m in masquerades:
29412 + for l in possible_libexecdirs:
29413 + p = os.path.join(os.sep, eprefix_lstrip, "usr", l, m, "bin")
29414 + if os.path.isdir(p):
29415 + mysettings["PATH"] = p + ":" + mysettings["PATH"]
29416 + break
29417 + else:
29418 + writemsg(
29419 + (
29420 + "Warning: %s requested but no masquerade dir "
29421 + "can be found in /usr/lib*/%s/bin\n"
29422 + )
29423 + % (m, m)
29424 + )
29425 + mysettings.features.remove(feature)
29426 +
29427 + if "MAKEOPTS" not in mysettings:
29428 + nproc = get_cpu_count()
29429 + if nproc:
29430 + mysettings["MAKEOPTS"] = "-j%d" % (nproc)
29431 +
29432 + if not eapi_exports_KV(eapi):
29433 + # Discard KV for EAPIs that don't support it. Cached KV is restored
29434 + # from the backupenv whenever config.reset() is called.
29435 + mysettings.pop("KV", None)
29436 + elif "KV" not in mysettings and mydo in (
29437 + "compile",
29438 + "config",
29439 + "configure",
29440 + "info",
29441 + "install",
29442 + "nofetch",
29443 + "postinst",
29444 + "postrm",
29445 + "preinst",
29446 + "prepare",
29447 + "prerm",
29448 + "setup",
29449 + "test",
29450 + "unpack",
29451 + ):
29452 + mykv, err1 = ExtractKernelVersion(
29453 + os.path.join(mysettings["EROOT"], "usr/src/linux")
29454 + )
29455 + if mykv:
29456 + # Regular source tree
29457 + mysettings["KV"] = mykv
29458 + else:
29459 + mysettings["KV"] = ""
29460 + mysettings.backup_changes("KV")
29461 +
29462 + binpkg_compression = mysettings.get("BINPKG_COMPRESS", "bzip2")
29463 + try:
29464 + compression = _compressors[binpkg_compression]
29465 + except KeyError as e:
29466 + if binpkg_compression:
29467 + writemsg(
29468 + "Warning: Invalid or unsupported compression method: %s\n"
29469 + % e.args[0]
29470 + )
29471 + else:
29472 + # Empty BINPKG_COMPRESS disables compression.
29473 + mysettings["PORTAGE_COMPRESSION_COMMAND"] = "cat"
29474 + else:
29475 + try:
29476 + compression_binary = shlex_split(
29477 + varexpand(compression["compress"], mydict=settings)
29478 + )[0]
29479 + except IndexError as e:
29480 + writemsg(
29481 + "Warning: Invalid or unsupported compression method: %s\n"
29482 + % e.args[0]
29483 + )
29484 + else:
29485 + if find_binary(compression_binary) is None:
29486 + missing_package = compression["package"]
29487 + writemsg(
29488 + "Warning: File compression unsupported %s. Missing package: %s\n"
29489 + % (binpkg_compression, missing_package)
29490 + )
29491 + else:
29492 + cmd = [
29493 + varexpand(x, mydict=settings)
29494 + for x in shlex_split(compression["compress"])
29495 + ]
29496 + # Filter empty elements
29497 + cmd = [x for x in cmd if x != ""]
29498 + mysettings["PORTAGE_COMPRESSION_COMMAND"] = " ".join(cmd)
29499 +
29500
29501 _doebuild_manifest_cache = None
29502 _doebuild_broken_ebuilds = set()
29503 _doebuild_broken_manifests = set()
29504 _doebuild_commands_without_builddir = (
29505 - 'clean', 'cleanrm', 'depend', 'digest',
29506 - 'fetch', 'fetchall', 'help', 'manifest'
29507 + "clean",
29508 + "cleanrm",
29509 + "depend",
29510 + "digest",
29511 + "fetch",
29512 + "fetchall",
29513 + "help",
29514 + "manifest",
29515 )
29516
29517 - def doebuild(myebuild, mydo, _unused=DeprecationWarning, settings=None, debug=0, listonly=0,
29518 - fetchonly=0, cleanup=0, dbkey=DeprecationWarning, use_cache=1, fetchall=0, tree=None,
29519 - mydbapi=None, vartree=None, prev_mtimes=None,
29520 - fd_pipes=None, returnpid=False):
29521 - """
29522 - Wrapper function that invokes specific ebuild phases through the spawning
29523 - of ebuild.sh
29524 -
29525 - @param myebuild: name of the ebuild to invoke the phase on (CPV)
29526 - @type myebuild: String
29527 - @param mydo: Phase to run
29528 - @type mydo: String
29529 - @param _unused: Deprecated (use settings["ROOT"] instead)
29530 - @type _unused: String
29531 - @param settings: Portage Configuration
29532 - @type settings: instance of portage.config
29533 - @param debug: Turns on various debug information (eg, debug for spawn)
29534 - @type debug: Boolean
29535 - @param listonly: Used to wrap fetch(); passed such that fetch only lists files required.
29536 - @type listonly: Boolean
29537 - @param fetchonly: Used to wrap fetch(); passed such that files are only fetched (no other actions)
29538 - @type fetchonly: Boolean
29539 - @param cleanup: Passed to prepare_build_dirs (TODO: what does it do?)
29540 - @type cleanup: Boolean
29541 - @param dbkey: A file path where metadata generated by the 'depend' phase
29542 - will be written.
29543 - @type dbkey: String
29544 - @param use_cache: Enables the cache
29545 - @type use_cache: Boolean
29546 - @param fetchall: Used to wrap fetch(), fetches all URIs (even ones invalid due to USE conditionals)
29547 - @type fetchall: Boolean
29548 - @param tree: Which tree to use ('vartree','porttree','bintree', etc..), defaults to 'porttree'
29549 - @type tree: String
29550 - @param mydbapi: a dbapi instance to pass to various functions; this should be a portdbapi instance.
29551 - @type mydbapi: portdbapi instance
29552 - @param vartree: A instance of vartree; used for aux_get calls, defaults to db[myroot]['vartree']
29553 - @type vartree: vartree instance
29554 - @param prev_mtimes: A dict of { filename:mtime } keys used by merge() to do config_protection
29555 - @type prev_mtimes: dictionary
29556 - @param fd_pipes: A dict of mapping for pipes, { '0': stdin, '1': stdout }
29557 - for example.
29558 - @type fd_pipes: Dictionary
29559 - @param returnpid: Return a list of process IDs for a successful spawn, or
29560 - an integer value if spawn is unsuccessful. NOTE: This requires the
29561 - caller clean up all returned PIDs.
29562 - @type returnpid: Boolean
29563 - @rtype: Boolean
29564 - @return:
29565 - 1. 0 for success
29566 - 2. 1 for error
29567 -
29568 - Most errors have an accompanying error message.
29569 -
29570 - listonly and fetchonly are only really necessary for operations involving 'fetch'
29571 - prev_mtimes are only necessary for merge operations.
29572 - Other variables may not be strictly required, many have defaults that are set inside of doebuild.
29573 -
29574 - """
29575 -
29576 - if settings is None:
29577 - raise TypeError("settings parameter is required")
29578 - mysettings = settings
29579 - myroot = settings['EROOT']
29580 -
29581 - if _unused is not DeprecationWarning:
29582 - warnings.warn("The third parameter of the "
29583 - "portage.doebuild() is deprecated. Instead "
29584 - "settings['EROOT'] is used.",
29585 - DeprecationWarning, stacklevel=2)
29586 -
29587 - if dbkey is not DeprecationWarning:
29588 - warnings.warn("portage.doebuild() called "
29589 - "with deprecated dbkey argument.",
29590 - DeprecationWarning, stacklevel=2)
29591 -
29592 - if not tree:
29593 - writemsg("Warning: tree not specified to doebuild\n")
29594 - tree = "porttree"
29595 -
29596 - # chunked out deps for each phase, so that ebuild binary can use it
29597 - # to collapse targets down.
29598 - actionmap_deps={
29599 - "pretend" : [],
29600 - "setup": ["pretend"],
29601 - "unpack": ["setup"],
29602 - "prepare": ["unpack"],
29603 - "configure": ["prepare"],
29604 - "compile":["configure"],
29605 - "test": ["compile"],
29606 - "install":["test"],
29607 - "instprep":["install"],
29608 - "rpm": ["install"],
29609 - "package":["install"],
29610 - "merge" :["install"],
29611 - }
29612 -
29613 - if mydbapi is None:
29614 - mydbapi = portage.db[myroot][tree].dbapi
29615 -
29616 - if vartree is None and mydo in ("merge", "qmerge", "unmerge"):
29617 - vartree = portage.db[myroot]["vartree"]
29618 -
29619 - features = mysettings.features
29620 -
29621 - clean_phases = ("clean", "cleanrm")
29622 - validcommands = ["help","clean","prerm","postrm","cleanrm","preinst","postinst",
29623 - "config", "info", "setup", "depend", "pretend",
29624 - "fetch", "fetchall", "digest",
29625 - "unpack", "prepare", "configure", "compile", "test",
29626 - "install", "instprep", "rpm", "qmerge", "merge",
29627 - "package", "unmerge", "manifest", "nofetch"]
29628 -
29629 - if mydo not in validcommands:
29630 - validcommands.sort()
29631 - writemsg("!!! doebuild: '%s' is not one of the following valid commands:" % mydo,
29632 - noiselevel=-1)
29633 - for vcount in range(len(validcommands)):
29634 - if vcount%6 == 0:
29635 - writemsg("\n!!! ", noiselevel=-1)
29636 - writemsg(validcommands[vcount].ljust(11), noiselevel=-1)
29637 - writemsg("\n", noiselevel=-1)
29638 - return 1
29639 -
29640 - if returnpid and mydo != 'depend':
29641 - # This case is not supported, since it bypasses the EbuildPhase class
29642 - # which implements important functionality (including post phase hooks
29643 - # and IPC for things like best/has_version and die).
29644 - warnings.warn("portage.doebuild() called "
29645 - "with returnpid parameter enabled. This usage will "
29646 - "not be supported in the future.",
29647 - DeprecationWarning, stacklevel=2)
29648 -
29649 - if mydo == "fetchall":
29650 - fetchall = 1
29651 - mydo = "fetch"
29652 -
29653 - if mydo not in clean_phases and not os.path.exists(myebuild):
29654 - writemsg("!!! doebuild: %s not found for %s\n" % (myebuild, mydo),
29655 - noiselevel=-1)
29656 - return 1
29657 -
29658 - global _doebuild_manifest_cache
29659 - pkgdir = os.path.dirname(myebuild)
29660 - manifest_path = os.path.join(pkgdir, "Manifest")
29661 - if tree == "porttree":
29662 - repo_config = mysettings.repositories.get_repo_for_location(
29663 - os.path.dirname(os.path.dirname(pkgdir)))
29664 - else:
29665 - repo_config = None
29666 -
29667 - mf = None
29668 - if "strict" in features and \
29669 - "digest" not in features and \
29670 - tree == "porttree" and \
29671 - not repo_config.thin_manifest and \
29672 - mydo not in ("digest", "manifest", "help") and \
29673 - not portage._doebuild_manifest_exempt_depend and \
29674 - not (repo_config.allow_missing_manifest and not os.path.exists(manifest_path)):
29675 - # Always verify the ebuild checksums before executing it.
29676 - global _doebuild_broken_ebuilds
29677 -
29678 - if myebuild in _doebuild_broken_ebuilds:
29679 - return 1
29680 -
29681 - # Avoid checking the same Manifest several times in a row during a
29682 - # regen with an empty cache.
29683 - if _doebuild_manifest_cache is None or \
29684 - _doebuild_manifest_cache.getFullname() != manifest_path:
29685 - _doebuild_manifest_cache = None
29686 - if not os.path.exists(manifest_path):
29687 - out = portage.output.EOutput()
29688 - out.eerror(_("Manifest not found for '%s'") % (myebuild,))
29689 - _doebuild_broken_ebuilds.add(myebuild)
29690 - return 1
29691 - mf = repo_config.load_manifest(pkgdir, mysettings["DISTDIR"])
29692 -
29693 - else:
29694 - mf = _doebuild_manifest_cache
29695 -
29696 - try:
29697 - mf.checkFileHashes("EBUILD", os.path.basename(myebuild))
29698 - except KeyError:
29699 - if not (mf.allow_missing and
29700 - os.path.basename(myebuild) not in mf.fhashdict["EBUILD"]):
29701 - out = portage.output.EOutput()
29702 - out.eerror(_("Missing digest for '%s'") % (myebuild,))
29703 - _doebuild_broken_ebuilds.add(myebuild)
29704 - return 1
29705 - except FileNotFound:
29706 - out = portage.output.EOutput()
29707 - out.eerror(_("A file listed in the Manifest "
29708 - "could not be found: '%s'") % (myebuild,))
29709 - _doebuild_broken_ebuilds.add(myebuild)
29710 - return 1
29711 - except DigestException as e:
29712 - out = portage.output.EOutput()
29713 - out.eerror(_("Digest verification failed:"))
29714 - out.eerror("%s" % e.value[0])
29715 - out.eerror(_("Reason: %s") % e.value[1])
29716 - out.eerror(_("Got: %s") % e.value[2])
29717 - out.eerror(_("Expected: %s") % e.value[3])
29718 - _doebuild_broken_ebuilds.add(myebuild)
29719 - return 1
29720 -
29721 - if mf.getFullname() in _doebuild_broken_manifests:
29722 - return 1
29723 -
29724 - if mf is not _doebuild_manifest_cache and not mf.allow_missing:
29725 -
29726 - # Make sure that all of the ebuilds are
29727 - # actually listed in the Manifest.
29728 - for f in os.listdir(pkgdir):
29729 - pf = None
29730 - if f[-7:] == '.ebuild':
29731 - pf = f[:-7]
29732 - if pf is not None and not mf.hasFile("EBUILD", f):
29733 - f = os.path.join(pkgdir, f)
29734 - if f not in _doebuild_broken_ebuilds:
29735 - out = portage.output.EOutput()
29736 - out.eerror(_("A file is not listed in the "
29737 - "Manifest: '%s'") % (f,))
29738 - _doebuild_broken_manifests.add(manifest_path)
29739 - return 1
29740 -
29741 - # We cache it only after all above checks succeed.
29742 - _doebuild_manifest_cache = mf
29743 -
29744 - logfile=None
29745 - builddir_lock = None
29746 - tmpdir = None
29747 - tmpdir_orig = None
29748 -
29749 - try:
29750 - if mydo in ("digest", "manifest", "help"):
29751 - # Temporarily exempt the depend phase from manifest checks, in case
29752 - # aux_get calls trigger cache generation.
29753 - portage._doebuild_manifest_exempt_depend += 1
29754 -
29755 - # If we don't need much space and we don't need a constant location,
29756 - # we can temporarily override PORTAGE_TMPDIR with a random temp dir
29757 - # so that there's no need for locking and it can be used even if the
29758 - # user isn't in the portage group.
29759 - if not returnpid and mydo in ("info",):
29760 - tmpdir = tempfile.mkdtemp()
29761 - tmpdir_orig = mysettings["PORTAGE_TMPDIR"]
29762 - mysettings["PORTAGE_TMPDIR"] = tmpdir
29763 -
29764 - doebuild_environment(myebuild, mydo, myroot, mysettings, debug,
29765 - use_cache, mydbapi)
29766 -
29767 - if mydo in clean_phases:
29768 - builddir_lock = None
29769 - if not returnpid and \
29770 - 'PORTAGE_BUILDDIR_LOCKED' not in mysettings:
29771 - builddir_lock = EbuildBuildDir(
29772 - scheduler=asyncio._safe_loop(),
29773 - settings=mysettings)
29774 - builddir_lock.scheduler.run_until_complete(
29775 - builddir_lock.async_lock())
29776 - try:
29777 - return _spawn_phase(mydo, mysettings,
29778 - fd_pipes=fd_pipes, returnpid=returnpid)
29779 - finally:
29780 - if builddir_lock is not None:
29781 - builddir_lock.scheduler.run_until_complete(
29782 - builddir_lock.async_unlock())
29783 -
29784 - # get possible slot information from the deps file
29785 - if mydo == "depend":
29786 - writemsg("!!! DEBUG: dbkey: %s\n" % str(dbkey), 2)
29787 - if returnpid:
29788 - return _spawn_phase(mydo, mysettings,
29789 - fd_pipes=fd_pipes, returnpid=returnpid)
29790 - if dbkey and dbkey is not DeprecationWarning:
29791 - mysettings["dbkey"] = dbkey
29792 - else:
29793 - mysettings["dbkey"] = \
29794 - os.path.join(mysettings.depcachedir, "aux_db_key_temp")
29795 -
29796 - return _spawn_phase(mydo, mysettings,
29797 - fd_pipes=fd_pipes, returnpid=returnpid)
29798 -
29799 - if mydo == "nofetch":
29800 -
29801 - if returnpid:
29802 - writemsg("!!! doebuild: %s\n" %
29803 - _("returnpid is not supported for phase '%s'\n" % mydo),
29804 - noiselevel=-1)
29805 -
29806 - return spawn_nofetch(mydbapi, myebuild, settings=mysettings,
29807 - fd_pipes=fd_pipes)
29808 -
29809 - if tree == "porttree":
29810 -
29811 - if not returnpid:
29812 - # Validate dependency metadata here to ensure that ebuilds with
29813 - # invalid data are never installed via the ebuild command. Skip
29814 - # this when returnpid is True (assume the caller handled it).
29815 - rval = _validate_deps(mysettings, myroot, mydo, mydbapi)
29816 - if rval != os.EX_OK:
29817 - return rval
29818 -
29819 - else:
29820 - # FEATURES=noauto only makes sense for porttree, and we don't want
29821 - # it to trigger redundant sourcing of the ebuild for API consumers
29822 - # that are using binary packages
29823 - if "noauto" in mysettings.features:
29824 - mysettings.features.discard("noauto")
29825 -
29826 - # If we are not using a private temp dir, then check access
29827 - # to the global temp dir.
29828 - if tmpdir is None and \
29829 - mydo not in _doebuild_commands_without_builddir:
29830 - rval = _check_temp_dir(mysettings)
29831 - if rval != os.EX_OK:
29832 - return rval
29833 -
29834 - if mydo == "unmerge":
29835 - if returnpid:
29836 - writemsg("!!! doebuild: %s\n" %
29837 - _("returnpid is not supported for phase '%s'\n" % mydo),
29838 - noiselevel=-1)
29839 - return unmerge(mysettings["CATEGORY"],
29840 - mysettings["PF"], myroot, mysettings, vartree=vartree)
29841 -
29842 - phases_to_run = set()
29843 - if returnpid or \
29844 - "noauto" in mysettings.features or \
29845 - mydo not in actionmap_deps:
29846 - phases_to_run.add(mydo)
29847 - else:
29848 - phase_stack = [mydo]
29849 - while phase_stack:
29850 - x = phase_stack.pop()
29851 - if x in phases_to_run:
29852 - continue
29853 - phases_to_run.add(x)
29854 - phase_stack.extend(actionmap_deps.get(x, []))
29855 - del phase_stack
29856 -
29857 - alist = set(mysettings.configdict["pkg"].get("A", "").split())
29858 -
29859 - unpacked = False
29860 - if tree != "porttree" or \
29861 - mydo in _doebuild_commands_without_builddir:
29862 - pass
29863 - elif "unpack" not in phases_to_run:
29864 - unpacked = os.path.exists(os.path.join(
29865 - mysettings["PORTAGE_BUILDDIR"], ".unpacked"))
29866 - else:
29867 - try:
29868 - workdir_st = os.stat(mysettings["WORKDIR"])
29869 - except OSError:
29870 - pass
29871 - else:
29872 - newstuff = False
29873 - if not os.path.exists(os.path.join(
29874 - mysettings["PORTAGE_BUILDDIR"], ".unpacked")):
29875 - writemsg_stdout(_(
29876 - ">>> Not marked as unpacked; recreating WORKDIR...\n"))
29877 - newstuff = True
29878 - else:
29879 - for x in alist:
29880 - writemsg_stdout(">>> Checking %s's mtime...\n" % x)
29881 - try:
29882 - x_st = os.stat(os.path.join(
29883 - mysettings["DISTDIR"], x))
29884 - except OSError:
29885 - # file deleted
29886 - x_st = None
29887 -
29888 - if x_st is not None and x_st.st_mtime > workdir_st.st_mtime:
29889 - writemsg_stdout(_(">>> Timestamp of "
29890 - "%s has changed; recreating WORKDIR...\n") % x)
29891 - newstuff = True
29892 - break
29893 -
29894 - if newstuff:
29895 - if builddir_lock is None and \
29896 - 'PORTAGE_BUILDDIR_LOCKED' not in mysettings:
29897 - builddir_lock = EbuildBuildDir(
29898 - scheduler=asyncio._safe_loop(),
29899 - settings=mysettings)
29900 - builddir_lock.scheduler.run_until_complete(
29901 - builddir_lock.async_lock())
29902 - try:
29903 - _spawn_phase("clean", mysettings)
29904 - finally:
29905 - if builddir_lock is not None:
29906 - builddir_lock.scheduler.run_until_complete(
29907 - builddir_lock.async_unlock())
29908 - builddir_lock = None
29909 - else:
29910 - writemsg_stdout(_(">>> WORKDIR is up-to-date, keeping...\n"))
29911 - unpacked = True
29912 -
29913 - # Build directory creation isn't required for any of these.
29914 - # In the fetch phase, the directory is needed only for RESTRICT=fetch
29915 - # in order to satisfy the sane $PWD requirement (from bug #239560)
29916 - # when pkg_nofetch is spawned.
29917 - have_build_dirs = False
29918 - if mydo not in ('digest', 'fetch', 'help', 'manifest'):
29919 - if not returnpid and \
29920 - 'PORTAGE_BUILDDIR_LOCKED' not in mysettings:
29921 - builddir_lock = EbuildBuildDir(
29922 - scheduler=asyncio._safe_loop(),
29923 - settings=mysettings)
29924 - builddir_lock.scheduler.run_until_complete(
29925 - builddir_lock.async_lock())
29926 - mystatus = prepare_build_dirs(myroot, mysettings, cleanup)
29927 - if mystatus:
29928 - return mystatus
29929 - have_build_dirs = True
29930 -
29931 - # emerge handles logging externally
29932 - if not returnpid:
29933 - # PORTAGE_LOG_FILE is set by the
29934 - # above prepare_build_dirs() call.
29935 - logfile = mysettings.get("PORTAGE_LOG_FILE")
29936 -
29937 - if have_build_dirs:
29938 - rval = _prepare_env_file(mysettings)
29939 - if rval != os.EX_OK:
29940 - return rval
29941 -
29942 - if eapi_exports_merge_type(mysettings["EAPI"]) and \
29943 - "MERGE_TYPE" not in mysettings.configdict["pkg"]:
29944 - if tree == "porttree":
29945 - mysettings.configdict["pkg"]["MERGE_TYPE"] = "source"
29946 - elif tree == "bintree":
29947 - mysettings.configdict["pkg"]["MERGE_TYPE"] = "binary"
29948 -
29949 - if tree == "porttree":
29950 - mysettings.configdict["pkg"]["EMERGE_FROM"] = "ebuild"
29951 - elif tree == "bintree":
29952 - mysettings.configdict["pkg"]["EMERGE_FROM"] = "binary"
29953 -
29954 - # NOTE: It's not possible to set REPLACED_BY_VERSION for prerm
29955 - # and postrm here, since we don't necessarily know what
29956 - # versions are being installed. This could be a problem
29957 - # for API consumers if they don't use dblink.treewalk()
29958 - # to execute prerm and postrm.
29959 - if eapi_exports_replace_vars(mysettings["EAPI"]) and \
29960 - (mydo in ("postinst", "preinst", "pretend", "setup") or \
29961 - ("noauto" not in features and not returnpid and \
29962 - (mydo in actionmap_deps or mydo in ("merge", "package", "qmerge")))):
29963 - if not vartree:
29964 - writemsg("Warning: vartree not given to doebuild. " + \
29965 - "Cannot set REPLACING_VERSIONS in pkg_{pretend,setup}\n")
29966 - else:
29967 - vardb = vartree.dbapi
29968 - cpv = mysettings.mycpv
29969 - cpv_slot = "%s%s%s" % \
29970 - (cpv.cp, portage.dep._slot_separator, cpv.slot)
29971 - mysettings["REPLACING_VERSIONS"] = " ".join(
29972 - set(portage.versions.cpv_getversion(match) \
29973 - for match in vardb.match(cpv_slot) + \
29974 - vardb.match('='+cpv)))
29975 -
29976 - # if any of these are being called, handle them -- running them out of
29977 - # the sandbox -- and stop now.
29978 - if mydo in ("config", "help", "info", "postinst",
29979 - "preinst", "pretend", "postrm", "prerm"):
29980 - if mydo in ("preinst", "postinst"):
29981 - env_file = os.path.join(os.path.dirname(mysettings["EBUILD"]),
29982 - "environment.bz2")
29983 - if os.path.isfile(env_file):
29984 - mysettings["PORTAGE_UPDATE_ENV"] = env_file
29985 - try:
29986 - return _spawn_phase(mydo, mysettings,
29987 - fd_pipes=fd_pipes, logfile=logfile, returnpid=returnpid)
29988 - finally:
29989 - mysettings.pop("PORTAGE_UPDATE_ENV", None)
29990 -
29991 - mycpv = "/".join((mysettings["CATEGORY"], mysettings["PF"]))
29992 -
29993 - # Only try and fetch the files if we are going to need them ...
29994 - # otherwise, if user has FEATURES=noauto and they run `ebuild clean
29995 - # unpack compile install`, we will try and fetch 4 times :/
29996 - need_distfiles = tree == "porttree" and not unpacked and \
29997 - (mydo in ("fetch", "unpack") or \
29998 - mydo not in ("digest", "manifest") and "noauto" not in features)
29999 - if need_distfiles:
30000 -
30001 - src_uri = mysettings.configdict["pkg"].get("SRC_URI")
30002 - if src_uri is None:
30003 - src_uri, = mydbapi.aux_get(mysettings.mycpv,
30004 - ["SRC_URI"], mytree=os.path.dirname(os.path.dirname(
30005 - os.path.dirname(myebuild))))
30006 - metadata = {
30007 - "EAPI" : mysettings["EAPI"],
30008 - "SRC_URI" : src_uri,
30009 - }
30010 - use = frozenset(mysettings["PORTAGE_USE"].split())
30011 - try:
30012 - alist = _parse_uri_map(mysettings.mycpv, metadata, use=use)
30013 - aalist = _parse_uri_map(mysettings.mycpv, metadata)
30014 - except InvalidDependString as e:
30015 - writemsg("!!! %s\n" % str(e), noiselevel=-1)
30016 - writemsg(_("!!! Invalid SRC_URI for '%s'.\n") % mycpv,
30017 - noiselevel=-1)
30018 - del e
30019 - return 1
30020 -
30021 - if "mirror" in features or fetchall:
30022 - fetchme = aalist
30023 - else:
30024 - fetchme = alist
30025 -
30026 - dist_digests = None
30027 - if mf is not None:
30028 - dist_digests = mf.getTypeDigests("DIST")
30029 -
30030 - def _fetch_subprocess(fetchme, mysettings, listonly, dist_digests):
30031 - # For userfetch, drop privileges for the entire fetch call, in
30032 - # order to handle DISTDIR on NFS with root_squash for bug 601252.
30033 - if _want_userfetch(mysettings):
30034 - _drop_privs_userfetch(mysettings)
30035 -
30036 - return fetch(fetchme, mysettings, listonly=listonly,
30037 - fetchonly=fetchonly, allow_missing_digests=False,
30038 - digests=dist_digests)
30039 -
30040 - loop = asyncio._safe_loop()
30041 - if loop.is_running():
30042 - # Called by EbuildFetchonly for emerge --pretend --fetchonly.
30043 - success = fetch(fetchme, mysettings, listonly=listonly,
30044 - fetchonly=fetchonly, allow_missing_digests=False,
30045 - digests=dist_digests)
30046 - else:
30047 - success = loop.run_until_complete(
30048 - loop.run_in_executor(ForkExecutor(loop=loop),
30049 - _fetch_subprocess, fetchme, mysettings, listonly, dist_digests))
30050 - if not success:
30051 - # Since listonly mode is called by emerge --pretend in an
30052 - # asynchronous context, spawn_nofetch would trigger event loop
30053 - # recursion here, therefore delegate execution of pkg_nofetch
30054 - # to the caller (bug 657360).
30055 - if not listonly:
30056 - spawn_nofetch(mydbapi, myebuild, settings=mysettings,
30057 - fd_pipes=fd_pipes)
30058 - return 1
30059 -
30060 - if need_distfiles:
30061 - # Files are already checked inside fetch(),
30062 - # so do not check them again.
30063 - checkme = []
30064 - elif unpacked:
30065 - # The unpack phase is marked as complete, so it
30066 - # would be wasteful to check distfiles again.
30067 - checkme = []
30068 - else:
30069 - checkme = alist
30070 -
30071 - if mydo == "fetch" and listonly:
30072 - return 0
30073 -
30074 - try:
30075 - if mydo == "manifest":
30076 - mf = None
30077 - _doebuild_manifest_cache = None
30078 - return not digestgen(mysettings=mysettings, myportdb=mydbapi)
30079 - if mydo == "digest":
30080 - mf = None
30081 - _doebuild_manifest_cache = None
30082 - return not digestgen(mysettings=mysettings, myportdb=mydbapi)
30083 - if "digest" in mysettings.features:
30084 - mf = None
30085 - _doebuild_manifest_cache = None
30086 - digestgen(mysettings=mysettings, myportdb=mydbapi)
30087 - except PermissionDenied as e:
30088 - writemsg(_("!!! Permission Denied: %s\n") % (e,), noiselevel=-1)
30089 - if mydo in ("digest", "manifest"):
30090 - return 1
30091 -
30092 - if mydo == "fetch":
30093 - # Return after digestgen for FEATURES=digest support.
30094 - # Return before digestcheck, since fetch() already
30095 - # checked any relevant digests.
30096 - return 0
30097 -
30098 - # See above comment about fetching only when needed
30099 - if tree == 'porttree' and \
30100 - not digestcheck(checkme, mysettings, "strict" in features, mf=mf):
30101 - return 1
30102 -
30103 - # remove PORTAGE_ACTUAL_DISTDIR once cvs/svn is supported via SRC_URI
30104 - if tree == 'porttree' and \
30105 - ((mydo != "setup" and "noauto" not in features) \
30106 - or mydo in ("install", "unpack")):
30107 - _prepare_fake_distdir(mysettings, alist)
30108 -
30109 - #initial dep checks complete; time to process main commands
30110 - actionmap = _spawn_actionmap(mysettings)
30111 -
30112 - # merge the deps in so we have again a 'full' actionmap
30113 - # be glad when this can die.
30114 - for x in actionmap:
30115 - if len(actionmap_deps.get(x, [])):
30116 - actionmap[x]["dep"] = ' '.join(actionmap_deps[x])
30117 -
30118 - regular_actionmap_phase = mydo in actionmap
30119 -
30120 - if regular_actionmap_phase:
30121 - bintree = None
30122 - if mydo == "package":
30123 - # Make sure the package directory exists before executing
30124 - # this phase. This can raise PermissionDenied if
30125 - # the current user doesn't have write access to $PKGDIR.
30126 - if hasattr(portage, 'db'):
30127 - bintree = portage.db[mysettings['EROOT']]['bintree']
30128 - binpkg_tmpfile_dir = os.path.join(bintree.pkgdir, mysettings["CATEGORY"])
30129 - bintree._ensure_dir(binpkg_tmpfile_dir)
30130 - with tempfile.NamedTemporaryFile(
30131 - prefix=mysettings["PF"],
30132 - suffix=".tbz2." + str(portage.getpid()),
30133 - dir=binpkg_tmpfile_dir,
30134 - delete=False) as binpkg_tmpfile:
30135 - mysettings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile.name
30136 - else:
30137 - parent_dir = os.path.join(mysettings["PKGDIR"],
30138 - mysettings["CATEGORY"])
30139 - portage.util.ensure_dirs(parent_dir)
30140 - if not os.access(parent_dir, os.W_OK):
30141 - raise PermissionDenied(
30142 - "access('%s', os.W_OK)" % parent_dir)
30143 - retval = spawnebuild(mydo,
30144 - actionmap, mysettings, debug, logfile=logfile,
30145 - fd_pipes=fd_pipes, returnpid=returnpid)
30146 -
30147 - if returnpid and isinstance(retval, list):
30148 - return retval
30149 -
30150 - if retval == os.EX_OK:
30151 - if mydo == "package" and bintree is not None:
30152 - pkg = bintree.inject(mysettings.mycpv,
30153 - filename=mysettings["PORTAGE_BINPKG_TMPFILE"])
30154 - if pkg is not None:
30155 - infoloc = os.path.join(
30156 - mysettings["PORTAGE_BUILDDIR"], "build-info")
30157 - build_info = {
30158 - "BINPKGMD5": "%s\n" % pkg._metadata["MD5"],
30159 - }
30160 - if pkg.build_id is not None:
30161 - build_info["BUILD_ID"] = "%s\n" % pkg.build_id
30162 - for k, v in build_info.items():
30163 - with io.open(_unicode_encode(
30164 - os.path.join(infoloc, k),
30165 - encoding=_encodings['fs'], errors='strict'),
30166 - mode='w', encoding=_encodings['repo.content'],
30167 - errors='strict') as f:
30168 - f.write(v)
30169 - else:
30170 - if "PORTAGE_BINPKG_TMPFILE" in mysettings:
30171 - try:
30172 - os.unlink(mysettings["PORTAGE_BINPKG_TMPFILE"])
30173 - except OSError:
30174 - pass
30175 -
30176 - elif returnpid:
30177 - writemsg("!!! doebuild: %s\n" %
30178 - _("returnpid is not supported for phase '%s'\n" % mydo),
30179 - noiselevel=-1)
30180 -
30181 - if regular_actionmap_phase:
30182 - # handled above
30183 - pass
30184 - elif mydo == "qmerge":
30185 - # check to ensure install was run. this *only* pops up when users
30186 - # forget it and are using ebuild
30187 - if not os.path.exists(
30188 - os.path.join(mysettings["PORTAGE_BUILDDIR"], ".installed")):
30189 - writemsg(_("!!! mydo=qmerge, but the install phase has not been run\n"),
30190 - noiselevel=-1)
30191 - return 1
30192 - # qmerge is a special phase that implies noclean.
30193 - if "noclean" not in mysettings.features:
30194 - mysettings.features.add("noclean")
30195 - _handle_self_update(mysettings, vartree.dbapi)
30196 - #qmerge is specifically not supposed to do a runtime dep check
30197 - retval = merge(
30198 - mysettings["CATEGORY"], mysettings["PF"], mysettings["D"],
30199 - os.path.join(mysettings["PORTAGE_BUILDDIR"], "build-info"),
30200 - myroot, mysettings, myebuild=mysettings["EBUILD"], mytree=tree,
30201 - mydbapi=mydbapi, vartree=vartree, prev_mtimes=prev_mtimes,
30202 - fd_pipes=fd_pipes)
30203 - elif mydo=="merge":
30204 - retval = spawnebuild("install", actionmap, mysettings, debug,
30205 - alwaysdep=1, logfile=logfile, fd_pipes=fd_pipes,
30206 - returnpid=returnpid)
30207 - if retval != os.EX_OK:
30208 - # The merge phase handles this already. Callers don't know how
30209 - # far this function got, so we have to call elog_process() here
30210 - # so that it's only called once.
30211 - elog_process(mysettings.mycpv, mysettings)
30212 - if retval == os.EX_OK:
30213 - _handle_self_update(mysettings, vartree.dbapi)
30214 - retval = merge(mysettings["CATEGORY"], mysettings["PF"],
30215 - mysettings["D"], os.path.join(mysettings["PORTAGE_BUILDDIR"],
30216 - "build-info"), myroot, mysettings,
30217 - myebuild=mysettings["EBUILD"], mytree=tree, mydbapi=mydbapi,
30218 - vartree=vartree, prev_mtimes=prev_mtimes,
30219 - fd_pipes=fd_pipes)
30220 -
30221 - else:
30222 - writemsg_stdout(_("!!! Unknown mydo: %s\n") % mydo, noiselevel=-1)
30223 - return 1
30224 -
30225 - return retval
30226 -
30227 - finally:
30228 -
30229 - if builddir_lock is not None:
30230 - builddir_lock.scheduler.run_until_complete(
30231 - builddir_lock.async_unlock())
30232 - if tmpdir:
30233 - mysettings["PORTAGE_TMPDIR"] = tmpdir_orig
30234 - shutil.rmtree(tmpdir)
30235 -
30236 - mysettings.pop("REPLACING_VERSIONS", None)
30237 -
30238 - if logfile and not returnpid:
30239 - try:
30240 - if os.stat(logfile).st_size == 0:
30241 - os.unlink(logfile)
30242 - except OSError:
30243 - pass
30244 -
30245 - if mydo in ("digest", "manifest", "help"):
30246 - # If necessary, depend phase has been triggered by aux_get calls
30247 - # and the exemption is no longer needed.
30248 - portage._doebuild_manifest_exempt_depend -= 1
30249 +
30250 + def doebuild(
30251 + myebuild,
30252 + mydo,
30253 + _unused=DeprecationWarning,
30254 + settings=None,
30255 + debug=0,
30256 + listonly=0,
30257 + fetchonly=0,
30258 + cleanup=0,
30259 + use_cache=1,
30260 + fetchall=0,
30261 + tree=None,
30262 + mydbapi=None,
30263 + vartree=None,
30264 + prev_mtimes=None,
30265 + fd_pipes=None,
30266 + returnpid=False,
30267 + ):
30268 + """
30269 + Wrapper function that invokes specific ebuild phases through the spawning
30270 + of ebuild.sh
30271 +
30272 + @param myebuild: name of the ebuild to invoke the phase on (CPV)
30273 + @type myebuild: String
30274 + @param mydo: Phase to run
30275 + @type mydo: String
30276 + @param _unused: Deprecated (use settings["ROOT"] instead)
30277 + @type _unused: String
30278 + @param settings: Portage Configuration
30279 + @type settings: instance of portage.config
30280 + @param debug: Turns on various debug information (eg, debug for spawn)
30281 + @type debug: Boolean
30282 + @param listonly: Used to wrap fetch(); passed such that fetch only lists files required.
30283 + @type listonly: Boolean
30284 + @param fetchonly: Used to wrap fetch(); passed such that files are only fetched (no other actions)
30285 + @type fetchonly: Boolean
30286 + @param cleanup: Passed to prepare_build_dirs (TODO: what does it do?)
30287 + @type cleanup: Boolean
30288 + @param use_cache: Enables the cache
30289 + @type use_cache: Boolean
30290 + @param fetchall: Used to wrap fetch(), fetches all URIs (even ones invalid due to USE conditionals)
30291 + @type fetchall: Boolean
30292 + @param tree: Which tree to use ('vartree','porttree','bintree', etc..), defaults to 'porttree'
30293 + @type tree: String
30294 + @param mydbapi: a dbapi instance to pass to various functions; this should be a portdbapi instance.
30295 + @type mydbapi: portdbapi instance
30296 + @param vartree: A instance of vartree; used for aux_get calls, defaults to db[myroot]['vartree']
30297 + @type vartree: vartree instance
30298 + @param prev_mtimes: A dict of { filename:mtime } keys used by merge() to do config_protection
30299 + @type prev_mtimes: dictionary
30300 + @param fd_pipes: A dict of mapping for pipes, { '0': stdin, '1': stdout }
30301 + for example.
30302 + @type fd_pipes: Dictionary
30303 + @param returnpid: Return a list of process IDs for a successful spawn, or
30304 + an integer value if spawn is unsuccessful. NOTE: This requires the
30305 + caller clean up all returned PIDs.
30306 + @type returnpid: Boolean
30307 + @rtype: Boolean
30308 + @return:
30309 + 1. 0 for success
30310 + 2. 1 for error
30311 +
30312 + Most errors have an accompanying error message.
30313 +
30314 + listonly and fetchonly are only really necessary for operations involving 'fetch'
30315 + prev_mtimes are only necessary for merge operations.
30316 + Other variables may not be strictly required, many have defaults that are set inside of doebuild.
30317 +
30318 + """
30319 +
30320 + if settings is None:
30321 + raise TypeError("settings parameter is required")
30322 + mysettings = settings
30323 + myroot = settings["EROOT"]
30324 +
30325 + if _unused is not DeprecationWarning:
30326 + warnings.warn(
30327 + "The third parameter of the "
30328 + "portage.doebuild() is deprecated. Instead "
30329 + "settings['EROOT'] is used.",
30330 + DeprecationWarning,
30331 + stacklevel=2,
30332 + )
30333 +
30334 + if not tree:
30335 + writemsg("Warning: tree not specified to doebuild\n")
30336 + tree = "porttree"
30337 +
30338 + # chunked out deps for each phase, so that ebuild binary can use it
30339 + # to collapse targets down.
30340 + actionmap_deps = {
30341 + "pretend": [],
30342 + "setup": ["pretend"],
30343 + "unpack": ["setup"],
30344 + "prepare": ["unpack"],
30345 + "configure": ["prepare"],
30346 + "compile": ["configure"],
30347 + "test": ["compile"],
30348 + "install": ["test"],
30349 + "instprep": ["install"],
30350 + "rpm": ["install"],
30351 + "package": ["install"],
30352 + "merge": ["install"],
30353 + }
30354 +
30355 + if mydbapi is None:
30356 + mydbapi = portage.db[myroot][tree].dbapi
30357 +
30358 + if vartree is None and mydo in ("merge", "qmerge", "unmerge"):
30359 + vartree = portage.db[myroot]["vartree"]
30360 +
30361 + features = mysettings.features
30362 +
30363 + clean_phases = ("clean", "cleanrm")
30364 + validcommands = [
30365 + "help",
30366 + "clean",
30367 + "prerm",
30368 + "postrm",
30369 + "cleanrm",
30370 + "preinst",
30371 + "postinst",
30372 + "config",
30373 + "info",
30374 + "setup",
30375 + "depend",
30376 + "pretend",
30377 + "fetch",
30378 + "fetchall",
30379 + "digest",
30380 + "unpack",
30381 + "prepare",
30382 + "configure",
30383 + "compile",
30384 + "test",
30385 + "install",
30386 + "instprep",
30387 + "rpm",
30388 + "qmerge",
30389 + "merge",
30390 + "package",
30391 + "unmerge",
30392 + "manifest",
30393 + "nofetch",
30394 + ]
30395 +
30396 + if mydo not in validcommands:
30397 + validcommands.sort()
30398 + writemsg(
30399 + "!!! doebuild: '%s' is not one of the following valid commands:" % mydo,
30400 + noiselevel=-1,
30401 + )
30402 + for vcount in range(len(validcommands)):
30403 + if vcount % 6 == 0:
30404 + writemsg("\n!!! ", noiselevel=-1)
30405 + writemsg(validcommands[vcount].ljust(11), noiselevel=-1)
30406 + writemsg("\n", noiselevel=-1)
30407 + return 1
30408 +
30409 + if returnpid and mydo != "depend":
30410 + # This case is not supported, since it bypasses the EbuildPhase class
30411 + # which implements important functionality (including post phase hooks
30412 + # and IPC for things like best/has_version and die).
30413 + warnings.warn(
30414 + "portage.doebuild() called "
30415 + "with returnpid parameter enabled. This usage will "
30416 + "not be supported in the future.",
30417 + DeprecationWarning,
30418 + stacklevel=2,
30419 + )
30420 +
30421 + if mydo == "fetchall":
30422 + fetchall = 1
30423 + mydo = "fetch"
30424 +
30425 + if mydo not in clean_phases and not os.path.exists(myebuild):
30426 + writemsg(
30427 + "!!! doebuild: %s not found for %s\n" % (myebuild, mydo), noiselevel=-1
30428 + )
30429 + return 1
30430 +
30431 + global _doebuild_manifest_cache
30432 + pkgdir = os.path.dirname(myebuild)
30433 + manifest_path = os.path.join(pkgdir, "Manifest")
30434 + if tree == "porttree":
30435 + repo_config = mysettings.repositories.get_repo_for_location(
30436 + os.path.dirname(os.path.dirname(pkgdir))
30437 + )
30438 + else:
30439 + repo_config = None
30440 +
30441 + mf = None
30442 + if (
30443 + "strict" in features
30444 + and "digest" not in features
30445 + and tree == "porttree"
30446 + and not repo_config.thin_manifest
30447 + and mydo not in ("digest", "manifest", "help")
30448 + and not portage._doebuild_manifest_exempt_depend
30449 + and not (
30450 + repo_config.allow_missing_manifest and not os.path.exists(manifest_path)
30451 + )
30452 + ):
30453 + # Always verify the ebuild checksums before executing it.
30454 + global _doebuild_broken_ebuilds
30455 +
30456 + if myebuild in _doebuild_broken_ebuilds:
30457 + return 1
30458 +
30459 + # Avoid checking the same Manifest several times in a row during a
30460 + # regen with an empty cache.
30461 + if (
30462 + _doebuild_manifest_cache is None
30463 + or _doebuild_manifest_cache.getFullname() != manifest_path
30464 + ):
30465 + _doebuild_manifest_cache = None
30466 + if not os.path.exists(manifest_path):
30467 + out = portage.output.EOutput()
30468 + out.eerror(_("Manifest not found for '%s'") % (myebuild,))
30469 + _doebuild_broken_ebuilds.add(myebuild)
30470 + return 1
30471 + mf = repo_config.load_manifest(pkgdir, mysettings["DISTDIR"])
30472 +
30473 + else:
30474 + mf = _doebuild_manifest_cache
30475 +
30476 + try:
30477 + mf.checkFileHashes("EBUILD", os.path.basename(myebuild))
30478 + except KeyError:
30479 + if not (
30480 + mf.allow_missing
30481 + and os.path.basename(myebuild) not in mf.fhashdict["EBUILD"]
30482 + ):
30483 + out = portage.output.EOutput()
30484 + out.eerror(_("Missing digest for '%s'") % (myebuild,))
30485 + _doebuild_broken_ebuilds.add(myebuild)
30486 + return 1
30487 + except FileNotFound:
30488 + out = portage.output.EOutput()
30489 + out.eerror(
30490 + _("A file listed in the Manifest " "could not be found: '%s'")
30491 + % (myebuild,)
30492 + )
30493 + _doebuild_broken_ebuilds.add(myebuild)
30494 + return 1
30495 + except DigestException as e:
30496 + out = portage.output.EOutput()
30497 + out.eerror(_("Digest verification failed:"))
30498 + out.eerror("%s" % e.value[0])
30499 + out.eerror(_("Reason: %s") % e.value[1])
30500 + out.eerror(_("Got: %s") % e.value[2])
30501 + out.eerror(_("Expected: %s") % e.value[3])
30502 + _doebuild_broken_ebuilds.add(myebuild)
30503 + return 1
30504 +
30505 + if mf.getFullname() in _doebuild_broken_manifests:
30506 + return 1
30507 +
30508 + if mf is not _doebuild_manifest_cache and not mf.allow_missing:
30509 +
30510 + # Make sure that all of the ebuilds are
30511 + # actually listed in the Manifest.
30512 + for f in os.listdir(pkgdir):
30513 + pf = None
30514 + if f[-7:] == ".ebuild":
30515 + pf = f[:-7]
30516 + if pf is not None and not mf.hasFile("EBUILD", f):
30517 + f = os.path.join(pkgdir, f)
30518 + if f not in _doebuild_broken_ebuilds:
30519 + out = portage.output.EOutput()
30520 + out.eerror(
30521 + _("A file is not listed in the " "Manifest: '%s'") % (f,)
30522 + )
30523 + _doebuild_broken_manifests.add(manifest_path)
30524 + return 1
30525 +
30526 + # We cache it only after all above checks succeed.
30527 + _doebuild_manifest_cache = mf
30528 +
30529 + logfile = None
30530 + builddir_lock = None
30531 + tmpdir = None
30532 + tmpdir_orig = None
30533 +
30534 + try:
30535 + if mydo in ("digest", "manifest", "help"):
30536 + # Temporarily exempt the depend phase from manifest checks, in case
30537 + # aux_get calls trigger cache generation.
30538 + portage._doebuild_manifest_exempt_depend += 1
30539 +
30540 + # If we don't need much space and we don't need a constant location,
30541 + # we can temporarily override PORTAGE_TMPDIR with a random temp dir
30542 + # so that there's no need for locking and it can be used even if the
30543 + # user isn't in the portage group.
30544 + if not returnpid and mydo in ("info",):
30545 + tmpdir = tempfile.mkdtemp()
30546 + tmpdir_orig = mysettings["PORTAGE_TMPDIR"]
30547 + mysettings["PORTAGE_TMPDIR"] = tmpdir
30548 +
30549 + doebuild_environment(
30550 + myebuild, mydo, myroot, mysettings, debug, use_cache, mydbapi
30551 + )
30552 +
30553 + if mydo in clean_phases:
30554 + builddir_lock = None
30555 + if not returnpid and "PORTAGE_BUILDDIR_LOCKED" not in mysettings:
30556 + builddir_lock = EbuildBuildDir(
30557 + scheduler=asyncio._safe_loop(), settings=mysettings
30558 + )
30559 + builddir_lock.scheduler.run_until_complete(builddir_lock.async_lock())
30560 + try:
30561 + return _spawn_phase(
30562 + mydo, mysettings, fd_pipes=fd_pipes, returnpid=returnpid
30563 + )
30564 + finally:
30565 + if builddir_lock is not None:
30566 + builddir_lock.scheduler.run_until_complete(
30567 + builddir_lock.async_unlock()
30568 + )
30569 +
30570 + # get possible slot information from the deps file
30571 + if mydo == "depend":
30572 + if not returnpid:
30573 + raise TypeError("returnpid must be True for depend phase")
30574 + return _spawn_phase(
30575 + mydo, mysettings, fd_pipes=fd_pipes, returnpid=returnpid
30576 + )
30577 +
30578 + if mydo == "nofetch":
30579 +
30580 + if returnpid:
30581 + writemsg(
30582 + "!!! doebuild: %s\n"
30583 + % _("returnpid is not supported for phase '%s'\n" % mydo),
30584 + noiselevel=-1,
30585 + )
30586 +
30587 + return spawn_nofetch(
30588 + mydbapi, myebuild, settings=mysettings, fd_pipes=fd_pipes
30589 + )
30590 +
30591 + if tree == "porttree":
30592 +
30593 + if not returnpid:
30594 + # Validate dependency metadata here to ensure that ebuilds with
30595 + # invalid data are never installed via the ebuild command. Skip
30596 + # this when returnpid is True (assume the caller handled it).
30597 + rval = _validate_deps(mysettings, myroot, mydo, mydbapi)
30598 + if rval != os.EX_OK:
30599 + return rval
30600 +
30601 + else:
30602 + # FEATURES=noauto only makes sense for porttree, and we don't want
30603 + # it to trigger redundant sourcing of the ebuild for API consumers
30604 + # that are using binary packages
30605 + if "noauto" in mysettings.features:
30606 + mysettings.features.discard("noauto")
30607 +
30608 + # If we are not using a private temp dir, then check access
30609 + # to the global temp dir.
30610 + if tmpdir is None and mydo not in _doebuild_commands_without_builddir:
30611 + rval = _check_temp_dir(mysettings)
30612 + if rval != os.EX_OK:
30613 + return rval
30614 +
30615 + if mydo == "unmerge":
30616 + if returnpid:
30617 + writemsg(
30618 + "!!! doebuild: %s\n"
30619 + % _("returnpid is not supported for phase '%s'\n" % mydo),
30620 + noiselevel=-1,
30621 + )
30622 + return unmerge(
30623 + mysettings["CATEGORY"],
30624 + mysettings["PF"],
30625 + myroot,
30626 + mysettings,
30627 + vartree=vartree,
30628 + )
30629 +
30630 + phases_to_run = set()
30631 + if returnpid or "noauto" in mysettings.features or mydo not in actionmap_deps:
30632 + phases_to_run.add(mydo)
30633 + else:
30634 + phase_stack = [mydo]
30635 + while phase_stack:
30636 + x = phase_stack.pop()
30637 + if x in phases_to_run:
30638 + continue
30639 + phases_to_run.add(x)
30640 + phase_stack.extend(actionmap_deps.get(x, []))
30641 + del phase_stack
30642 +
30643 + alist = set(mysettings.configdict["pkg"].get("A", "").split())
30644 +
30645 + unpacked = False
30646 + if tree != "porttree" or mydo in _doebuild_commands_without_builddir:
30647 + pass
30648 + elif "unpack" not in phases_to_run:
30649 + unpacked = os.path.exists(
30650 + os.path.join(mysettings["PORTAGE_BUILDDIR"], ".unpacked")
30651 + )
30652 + else:
30653 + try:
30654 + workdir_st = os.stat(mysettings["WORKDIR"])
30655 + except OSError:
30656 + pass
30657 + else:
30658 + newstuff = False
30659 + if not os.path.exists(
30660 + os.path.join(mysettings["PORTAGE_BUILDDIR"], ".unpacked")
30661 + ):
30662 + writemsg_stdout(
30663 + _(">>> Not marked as unpacked; recreating WORKDIR...\n")
30664 + )
30665 + newstuff = True
30666 + else:
30667 + for x in alist:
30668 + writemsg_stdout(">>> Checking %s's mtime...\n" % x)
30669 + try:
30670 + x_st = os.stat(os.path.join(mysettings["DISTDIR"], x))
30671 + except OSError:
30672 + # file deleted
30673 + x_st = None
30674 +
30675 + if x_st is not None and x_st.st_mtime > workdir_st.st_mtime:
30676 + writemsg_stdout(
30677 + _(
30678 + ">>> Timestamp of "
30679 + "%s has changed; recreating WORKDIR...\n"
30680 + )
30681 + % x
30682 + )
30683 + newstuff = True
30684 + break
30685 +
30686 + if newstuff:
30687 + if (
30688 + builddir_lock is None
30689 + and "PORTAGE_BUILDDIR_LOCKED" not in mysettings
30690 + ):
30691 + builddir_lock = EbuildBuildDir(
30692 + scheduler=asyncio._safe_loop(), settings=mysettings
30693 + )
30694 + builddir_lock.scheduler.run_until_complete(
30695 + builddir_lock.async_lock()
30696 + )
30697 + try:
30698 + _spawn_phase("clean", mysettings)
30699 + finally:
30700 + if builddir_lock is not None:
30701 + builddir_lock.scheduler.run_until_complete(
30702 + builddir_lock.async_unlock()
30703 + )
30704 + builddir_lock = None
30705 + else:
30706 + writemsg_stdout(_(">>> WORKDIR is up-to-date, keeping...\n"))
30707 + unpacked = True
30708 +
30709 + # Build directory creation isn't required for any of these.
30710 + # In the fetch phase, the directory is needed only for RESTRICT=fetch
30711 + # in order to satisfy the sane $PWD requirement (from bug #239560)
30712 + # when pkg_nofetch is spawned.
30713 + have_build_dirs = False
30714 + if mydo not in ("digest", "fetch", "help", "manifest"):
30715 + if not returnpid and "PORTAGE_BUILDDIR_LOCKED" not in mysettings:
30716 + builddir_lock = EbuildBuildDir(
30717 + scheduler=asyncio._safe_loop(), settings=mysettings
30718 + )
30719 + builddir_lock.scheduler.run_until_complete(builddir_lock.async_lock())
30720 + mystatus = prepare_build_dirs(myroot, mysettings, cleanup)
30721 + if mystatus:
30722 + return mystatus
30723 + have_build_dirs = True
30724 +
30725 + # emerge handles logging externally
30726 + if not returnpid:
30727 + # PORTAGE_LOG_FILE is set by the
30728 + # above prepare_build_dirs() call.
30729 + logfile = mysettings.get("PORTAGE_LOG_FILE")
30730 +
30731 + if have_build_dirs:
30732 + rval = _prepare_env_file(mysettings)
30733 + if rval != os.EX_OK:
30734 + return rval
30735 +
30736 + if (
30737 + eapi_exports_merge_type(mysettings["EAPI"])
30738 + and "MERGE_TYPE" not in mysettings.configdict["pkg"]
30739 + ):
30740 + if tree == "porttree":
30741 + mysettings.configdict["pkg"]["MERGE_TYPE"] = "source"
30742 + elif tree == "bintree":
30743 + mysettings.configdict["pkg"]["MERGE_TYPE"] = "binary"
30744 +
30745 + if tree == "porttree":
30746 + mysettings.configdict["pkg"]["EMERGE_FROM"] = "ebuild"
30747 + elif tree == "bintree":
30748 + mysettings.configdict["pkg"]["EMERGE_FROM"] = "binary"
30749 +
30750 + # NOTE: It's not possible to set REPLACED_BY_VERSION for prerm
30751 + # and postrm here, since we don't necessarily know what
30752 + # versions are being installed. This could be a problem
30753 + # for API consumers if they don't use dblink.treewalk()
30754 + # to execute prerm and postrm.
30755 + if eapi_exports_replace_vars(mysettings["EAPI"]) and (
30756 + mydo in ("postinst", "preinst", "pretend", "setup")
30757 + or (
30758 + "noauto" not in features
30759 + and not returnpid
30760 + and (mydo in actionmap_deps or mydo in ("merge", "package", "qmerge"))
30761 + )
30762 + ):
30763 + if not vartree:
30764 + writemsg(
30765 + "Warning: vartree not given to doebuild. "
30766 + + "Cannot set REPLACING_VERSIONS in pkg_{pretend,setup}\n"
30767 + )
30768 + else:
30769 + vardb = vartree.dbapi
30770 + cpv = mysettings.mycpv
30771 + cpv_slot = "%s%s%s" % (cpv.cp, portage.dep._slot_separator, cpv.slot)
30772 + mysettings["REPLACING_VERSIONS"] = " ".join(
30773 + set(
30774 + portage.versions.cpv_getversion(match)
30775 + for match in vardb.match(cpv_slot) + vardb.match("=" + cpv)
30776 + )
30777 + )
30778 +
30779 + # if any of these are being called, handle them -- running them out of
30780 + # the sandbox -- and stop now.
30781 + if mydo in (
30782 + "config",
30783 + "help",
30784 + "info",
30785 + "postinst",
30786 + "preinst",
30787 + "pretend",
30788 + "postrm",
30789 + "prerm",
30790 + ):
30791 + if mydo in ("preinst", "postinst"):
30792 + env_file = os.path.join(
30793 + os.path.dirname(mysettings["EBUILD"]), "environment.bz2"
30794 + )
30795 + if os.path.isfile(env_file):
30796 + mysettings["PORTAGE_UPDATE_ENV"] = env_file
30797 + try:
30798 + return _spawn_phase(
30799 + mydo,
30800 + mysettings,
30801 + fd_pipes=fd_pipes,
30802 + logfile=logfile,
30803 + returnpid=returnpid,
30804 + )
30805 + finally:
30806 + mysettings.pop("PORTAGE_UPDATE_ENV", None)
30807 +
30808 + mycpv = "/".join((mysettings["CATEGORY"], mysettings["PF"]))
30809 +
30810 + # Only try and fetch the files if we are going to need them ...
30811 + # otherwise, if user has FEATURES=noauto and they run `ebuild clean
30812 + # unpack compile install`, we will try and fetch 4 times :/
30813 + need_distfiles = (
30814 + tree == "porttree"
30815 + and not unpacked
30816 + and (
30817 + mydo in ("fetch", "unpack")
30818 + or mydo not in ("digest", "manifest")
30819 + and "noauto" not in features
30820 + )
30821 + )
30822 + if need_distfiles:
30823 +
30824 + src_uri = mysettings.configdict["pkg"].get("SRC_URI")
30825 + if src_uri is None:
30826 + (src_uri,) = mydbapi.aux_get(
30827 + mysettings.mycpv,
30828 + ["SRC_URI"],
30829 + mytree=os.path.dirname(os.path.dirname(os.path.dirname(myebuild))),
30830 + )
30831 + metadata = {
30832 + "EAPI": mysettings["EAPI"],
30833 + "SRC_URI": src_uri,
30834 + }
30835 + use = frozenset(mysettings["PORTAGE_USE"].split())
30836 + try:
30837 + alist = _parse_uri_map(mysettings.mycpv, metadata, use=use)
30838 + aalist = _parse_uri_map(mysettings.mycpv, metadata)
30839 + except InvalidDependString as e:
30840 + writemsg("!!! %s\n" % str(e), noiselevel=-1)
30841 + writemsg(_("!!! Invalid SRC_URI for '%s'.\n") % mycpv, noiselevel=-1)
30842 + del e
30843 + return 1
30844 +
30845 + if "mirror" in features or fetchall:
30846 + fetchme = aalist
30847 + else:
30848 + fetchme = alist
30849 +
30850 + dist_digests = None
30851 + if mf is not None:
30852 + dist_digests = mf.getTypeDigests("DIST")
30853 +
30854 + def _fetch_subprocess(fetchme, mysettings, listonly, dist_digests):
30855 + # For userfetch, drop privileges for the entire fetch call, in
30856 + # order to handle DISTDIR on NFS with root_squash for bug 601252.
30857 + if _want_userfetch(mysettings):
30858 + _drop_privs_userfetch(mysettings)
30859 +
30860 + return fetch(
30861 + fetchme,
30862 + mysettings,
30863 + listonly=listonly,
30864 + fetchonly=fetchonly,
30865 + allow_missing_digests=False,
30866 + digests=dist_digests,
30867 + )
30868 +
30869 + loop = asyncio._safe_loop()
30870 + if loop.is_running():
30871 + # Called by EbuildFetchonly for emerge --pretend --fetchonly.
30872 + success = fetch(
30873 + fetchme,
30874 + mysettings,
30875 + listonly=listonly,
30876 + fetchonly=fetchonly,
30877 + allow_missing_digests=False,
30878 + digests=dist_digests,
30879 + )
30880 + else:
30881 + success = loop.run_until_complete(
30882 + loop.run_in_executor(
30883 + ForkExecutor(loop=loop),
30884 + _fetch_subprocess,
30885 + fetchme,
30886 + mysettings,
30887 + listonly,
30888 + dist_digests,
30889 + )
30890 + )
30891 + if not success:
30892 + # Since listonly mode is called by emerge --pretend in an
30893 + # asynchronous context, spawn_nofetch would trigger event loop
30894 + # recursion here, therefore delegate execution of pkg_nofetch
30895 + # to the caller (bug 657360).
30896 + if not listonly:
30897 + spawn_nofetch(
30898 + mydbapi, myebuild, settings=mysettings, fd_pipes=fd_pipes
30899 + )
30900 + return 1
30901 +
30902 + if need_distfiles:
30903 + # Files are already checked inside fetch(),
30904 + # so do not check them again.
30905 + checkme = []
30906 + elif unpacked:
30907 + # The unpack phase is marked as complete, so it
30908 + # would be wasteful to check distfiles again.
30909 + checkme = []
30910 + else:
30911 + checkme = alist
30912 +
30913 + if mydo == "fetch" and listonly:
30914 + return 0
30915 +
30916 + try:
30917 + if mydo == "manifest":
30918 + mf = None
30919 + _doebuild_manifest_cache = None
30920 + return not digestgen(mysettings=mysettings, myportdb=mydbapi)
30921 + if mydo == "digest":
30922 + mf = None
30923 + _doebuild_manifest_cache = None
30924 + return not digestgen(mysettings=mysettings, myportdb=mydbapi)
30925 + if "digest" in mysettings.features:
30926 + mf = None
30927 + _doebuild_manifest_cache = None
30928 + digestgen(mysettings=mysettings, myportdb=mydbapi)
30929 + except PermissionDenied as e:
30930 + writemsg(_("!!! Permission Denied: %s\n") % (e,), noiselevel=-1)
30931 + if mydo in ("digest", "manifest"):
30932 + return 1
30933 +
30934 + if mydo == "fetch":
30935 + # Return after digestgen for FEATURES=digest support.
30936 + # Return before digestcheck, since fetch() already
30937 + # checked any relevant digests.
30938 + return 0
30939 +
30940 + # See above comment about fetching only when needed
30941 + if tree == "porttree" and not digestcheck(
30942 + checkme, mysettings, "strict" in features, mf=mf
30943 + ):
30944 + return 1
30945 +
30946 + # remove PORTAGE_ACTUAL_DISTDIR once cvs/svn is supported via SRC_URI
30947 + if tree == "porttree" and (
30948 + (mydo != "setup" and "noauto" not in features)
30949 + or mydo in ("install", "unpack")
30950 + ):
30951 + _prepare_fake_distdir(mysettings, alist)
30952 +
30953 + # initial dep checks complete; time to process main commands
30954 + actionmap = _spawn_actionmap(mysettings)
30955 +
30956 + # merge the deps in so we have again a 'full' actionmap
30957 + # be glad when this can die.
30958 + for x in actionmap:
30959 + if len(actionmap_deps.get(x, [])):
30960 + actionmap[x]["dep"] = " ".join(actionmap_deps[x])
30961 +
30962 + regular_actionmap_phase = mydo in actionmap
30963 +
30964 + if regular_actionmap_phase:
30965 + bintree = None
30966 + if mydo == "package":
30967 + # Make sure the package directory exists before executing
30968 + # this phase. This can raise PermissionDenied if
30969 + # the current user doesn't have write access to $PKGDIR.
30970 + if hasattr(portage, "db"):
30971 + bintree = portage.db[mysettings["EROOT"]]["bintree"]
30972 + binpkg_tmpfile_dir = os.path.join(
30973 + bintree.pkgdir, mysettings["CATEGORY"]
30974 + )
30975 + bintree._ensure_dir(binpkg_tmpfile_dir)
30976 + with tempfile.NamedTemporaryFile(
30977 + prefix=mysettings["PF"],
30978 + suffix=".tbz2." + str(portage.getpid()),
30979 + dir=binpkg_tmpfile_dir,
30980 + delete=False,
30981 + ) as binpkg_tmpfile:
30982 + mysettings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile.name
30983 + else:
30984 + parent_dir = os.path.join(
30985 + mysettings["PKGDIR"], mysettings["CATEGORY"]
30986 + )
30987 + portage.util.ensure_dirs(parent_dir)
30988 + if not os.access(parent_dir, os.W_OK):
30989 + raise PermissionDenied("access('%s', os.W_OK)" % parent_dir)
30990 + retval = spawnebuild(
30991 + mydo,
30992 + actionmap,
30993 + mysettings,
30994 + debug,
30995 + logfile=logfile,
30996 + fd_pipes=fd_pipes,
30997 + returnpid=returnpid,
30998 + )
30999 +
31000 + if returnpid and isinstance(retval, list):
31001 + return retval
31002 +
31003 + if retval == os.EX_OK:
31004 + if mydo == "package" and bintree is not None:
31005 + pkg = bintree.inject(
31006 + mysettings.mycpv, filename=mysettings["PORTAGE_BINPKG_TMPFILE"]
31007 + )
31008 + if pkg is not None:
31009 + infoloc = os.path.join(
31010 + mysettings["PORTAGE_BUILDDIR"], "build-info"
31011 + )
31012 + build_info = {
31013 + "BINPKGMD5": "%s\n" % pkg._metadata["MD5"],
31014 + }
31015 + if pkg.build_id is not None:
31016 + build_info["BUILD_ID"] = "%s\n" % pkg.build_id
31017 + for k, v in build_info.items():
31018 + with io.open(
31019 + _unicode_encode(
31020 + os.path.join(infoloc, k),
31021 + encoding=_encodings["fs"],
31022 + errors="strict",
31023 + ),
31024 + mode="w",
31025 + encoding=_encodings["repo.content"],
31026 + errors="strict",
31027 + ) as f:
31028 + f.write(v)
31029 + else:
31030 + if "PORTAGE_BINPKG_TMPFILE" in mysettings:
31031 + try:
31032 + os.unlink(mysettings["PORTAGE_BINPKG_TMPFILE"])
31033 + except OSError:
31034 + pass
31035 +
31036 + elif returnpid:
31037 + writemsg(
31038 + "!!! doebuild: %s\n"
31039 + % _("returnpid is not supported for phase '%s'\n" % mydo),
31040 + noiselevel=-1,
31041 + )
31042 +
31043 + if regular_actionmap_phase:
31044 + # handled above
31045 + pass
31046 + elif mydo == "qmerge":
31047 + # check to ensure install was run. this *only* pops up when users
31048 + # forget it and are using ebuild
31049 + if not os.path.exists(
31050 + os.path.join(mysettings["PORTAGE_BUILDDIR"], ".installed")
31051 + ):
31052 + writemsg(
31053 + _("!!! mydo=qmerge, but the install phase has not been run\n"),
31054 + noiselevel=-1,
31055 + )
31056 + return 1
31057 + # qmerge is a special phase that implies noclean.
31058 + if "noclean" not in mysettings.features:
31059 + mysettings.features.add("noclean")
31060 + _handle_self_update(mysettings, vartree.dbapi)
31061 + # qmerge is specifically not supposed to do a runtime dep check
31062 + retval = merge(
31063 + mysettings["CATEGORY"],
31064 + mysettings["PF"],
31065 + mysettings["D"],
31066 + os.path.join(mysettings["PORTAGE_BUILDDIR"], "build-info"),
31067 + myroot,
31068 + mysettings,
31069 + myebuild=mysettings["EBUILD"],
31070 + mytree=tree,
31071 + mydbapi=mydbapi,
31072 + vartree=vartree,
31073 + prev_mtimes=prev_mtimes,
31074 + fd_pipes=fd_pipes,
31075 + )
31076 + elif mydo == "merge":
31077 + retval = spawnebuild(
31078 + "install",
31079 + actionmap,
31080 + mysettings,
31081 + debug,
31082 + alwaysdep=1,
31083 + logfile=logfile,
31084 + fd_pipes=fd_pipes,
31085 + returnpid=returnpid,
31086 + )
31087 + if retval != os.EX_OK:
31088 + # The merge phase handles this already. Callers don't know how
31089 + # far this function got, so we have to call elog_process() here
31090 + # so that it's only called once.
31091 + elog_process(mysettings.mycpv, mysettings)
31092 + if retval == os.EX_OK:
31093 + _handle_self_update(mysettings, vartree.dbapi)
31094 + retval = merge(
31095 + mysettings["CATEGORY"],
31096 + mysettings["PF"],
31097 + mysettings["D"],
31098 + os.path.join(mysettings["PORTAGE_BUILDDIR"], "build-info"),
31099 + myroot,
31100 + mysettings,
31101 + myebuild=mysettings["EBUILD"],
31102 + mytree=tree,
31103 + mydbapi=mydbapi,
31104 + vartree=vartree,
31105 + prev_mtimes=prev_mtimes,
31106 + fd_pipes=fd_pipes,
31107 + )
31108 +
31109 + else:
31110 + writemsg_stdout(_("!!! Unknown mydo: %s\n") % mydo, noiselevel=-1)
31111 + return 1
31112 +
31113 + return retval
31114 +
31115 + finally:
31116 +
31117 + if builddir_lock is not None:
31118 + builddir_lock.scheduler.run_until_complete(builddir_lock.async_unlock())
31119 + if tmpdir:
31120 + mysettings["PORTAGE_TMPDIR"] = tmpdir_orig
31121 + shutil.rmtree(tmpdir)
31122 +
31123 + mysettings.pop("REPLACING_VERSIONS", None)
31124 +
31125 + if logfile and not returnpid:
31126 + try:
31127 + if os.stat(logfile).st_size == 0:
31128 + os.unlink(logfile)
31129 + except OSError:
31130 + pass
31131 +
31132 + if mydo in ("digest", "manifest", "help"):
31133 + # If necessary, depend phase has been triggered by aux_get calls
31134 + # and the exemption is no longer needed.
31135 + portage._doebuild_manifest_exempt_depend -= 1
31136 +
31137
31138 def _check_temp_dir(settings):
31139 - if "PORTAGE_TMPDIR" not in settings or \
31140 - not os.path.isdir(settings["PORTAGE_TMPDIR"]):
31141 - writemsg(_("The directory specified in your "
31142 - "PORTAGE_TMPDIR variable, '%s',\n"
31143 - "does not exist. Please create this directory or "
31144 - "correct your PORTAGE_TMPDIR setting.\n") % \
31145 - settings.get("PORTAGE_TMPDIR", ""), noiselevel=-1)
31146 - return 1
31147 -
31148 - # as some people use a separate PORTAGE_TMPDIR mount
31149 - # we prefer that as the checks below would otherwise be pointless
31150 - # for those people.
31151 - checkdir = first_existing(os.path.join(settings["PORTAGE_TMPDIR"], "portage"))
31152 -
31153 - if not os.access(checkdir, os.W_OK):
31154 - writemsg(_("%s is not writable.\n"
31155 - "Likely cause is that you've mounted it as readonly.\n") % checkdir,
31156 - noiselevel=-1)
31157 - return 1
31158 -
31159 - with tempfile.NamedTemporaryFile(prefix="exectest-", dir=checkdir) as fd:
31160 - os.chmod(fd.name, 0o755)
31161 - if not os.access(fd.name, os.X_OK):
31162 - writemsg(_("Can not execute files in %s\n"
31163 - "Likely cause is that you've mounted it with one of the\n"
31164 - "following mount options: 'noexec', 'user', 'users'\n\n"
31165 - "Please make sure that portage can execute files in this directory.\n") % checkdir,
31166 - noiselevel=-1)
31167 - return 1
31168 -
31169 - return os.EX_OK
31170 + if "PORTAGE_TMPDIR" not in settings or not os.path.isdir(
31171 + settings["PORTAGE_TMPDIR"]
31172 + ):
31173 + writemsg(
31174 + _(
31175 + "The directory specified in your "
31176 + "PORTAGE_TMPDIR variable, '%s',\n"
31177 + "does not exist. Please create this directory or "
31178 + "correct your PORTAGE_TMPDIR setting.\n"
31179 + )
31180 + % settings.get("PORTAGE_TMPDIR", ""),
31181 + noiselevel=-1,
31182 + )
31183 + return 1
31184 +
31185 + # as some people use a separate PORTAGE_TMPDIR mount
31186 + # we prefer that as the checks below would otherwise be pointless
31187 + # for those people.
31188 + checkdir = first_existing(os.path.join(settings["PORTAGE_TMPDIR"], "portage"))
31189 +
31190 + if not os.access(checkdir, os.W_OK):
31191 + writemsg(
31192 + _(
31193 + "%s is not writable.\n"
31194 + "Likely cause is that you've mounted it as readonly.\n"
31195 + )
31196 + % checkdir,
31197 + noiselevel=-1,
31198 + )
31199 + return 1
31200 +
31201 + with tempfile.NamedTemporaryFile(prefix="exectest-", dir=checkdir) as fd:
31202 + os.chmod(fd.name, 0o755)
31203 + if not os.access(fd.name, os.X_OK):
31204 + writemsg(
31205 + _(
31206 + "Can not execute files in %s\n"
31207 + "Likely cause is that you've mounted it with one of the\n"
31208 + "following mount options: 'noexec', 'user', 'users'\n\n"
31209 + "Please make sure that portage can execute files in this directory.\n"
31210 + )
31211 + % checkdir,
31212 + noiselevel=-1,
31213 + )
31214 + return 1
31215 +
31216 + return os.EX_OK
31217 +
31218
31219 def _prepare_env_file(settings):
31220 - """
31221 - Extract environment.bz2 if it exists, but only if the destination
31222 - environment file doesn't already exist. There are lots of possible
31223 - states when doebuild() calls this function, and we want to avoid
31224 - clobbering an existing environment file.
31225 - """
31226 -
31227 - env_extractor = BinpkgEnvExtractor(background=False,
31228 - scheduler=asyncio._safe_loop(),
31229 - settings=settings)
31230 -
31231 - if env_extractor.dest_env_exists():
31232 - # There are lots of possible states when doebuild()
31233 - # calls this function, and we want to avoid
31234 - # clobbering an existing environment file.
31235 - return os.EX_OK
31236 -
31237 - if not env_extractor.saved_env_exists():
31238 - # If the environment.bz2 doesn't exist, then ebuild.sh will
31239 - # source the ebuild as a fallback.
31240 - return os.EX_OK
31241 -
31242 - env_extractor.start()
31243 - env_extractor.wait()
31244 - return env_extractor.returncode
31245 + """
31246 + Extract environment.bz2 if it exists, but only if the destination
31247 + environment file doesn't already exist. There are lots of possible
31248 + states when doebuild() calls this function, and we want to avoid
31249 + clobbering an existing environment file.
31250 + """
31251 +
31252 + env_extractor = BinpkgEnvExtractor(
31253 + background=False, scheduler=asyncio._safe_loop(), settings=settings
31254 + )
31255 +
31256 + if env_extractor.dest_env_exists():
31257 + # There are lots of possible states when doebuild()
31258 + # calls this function, and we want to avoid
31259 + # clobbering an existing environment file.
31260 + return os.EX_OK
31261 +
31262 + if not env_extractor.saved_env_exists():
31263 + # If the environment.bz2 doesn't exist, then ebuild.sh will
31264 + # source the ebuild as a fallback.
31265 + return os.EX_OK
31266 +
31267 + env_extractor.start()
31268 + env_extractor.wait()
31269 + return env_extractor.returncode
31270 +
31271
31272 def _spawn_actionmap(settings):
31273 - features = settings.features
31274 - restrict = settings["PORTAGE_RESTRICT"].split()
31275 - nosandbox = (("userpriv" in features) and \
31276 - ("usersandbox" not in features) and \
31277 - "userpriv" not in restrict and \
31278 - "nouserpriv" not in restrict)
31279 -
31280 - if not (portage.process.sandbox_capable or \
31281 - portage.process.macossandbox_capable):
31282 - nosandbox = True
31283 -
31284 - sesandbox = settings.selinux_enabled() and \
31285 - "sesandbox" in features
31286 -
31287 - droppriv = "userpriv" in features and \
31288 - "userpriv" not in restrict and \
31289 - secpass >= 2
31290 -
31291 - fakeroot = "fakeroot" in features
31292 -
31293 - portage_bin_path = settings["PORTAGE_BIN_PATH"]
31294 - ebuild_sh_binary = os.path.join(portage_bin_path,
31295 - os.path.basename(EBUILD_SH_BINARY))
31296 - misc_sh_binary = os.path.join(portage_bin_path,
31297 - os.path.basename(MISC_SH_BINARY))
31298 - ebuild_sh = _shell_quote(ebuild_sh_binary) + " %s"
31299 - misc_sh = _shell_quote(misc_sh_binary) + " __dyn_%s"
31300 -
31301 - # args are for the to spawn function
31302 - actionmap = {
31303 - "pretend": {"cmd":ebuild_sh, "args":{"droppriv":0, "free":1, "sesandbox":0, "fakeroot":0}},
31304 - "setup": {"cmd":ebuild_sh, "args":{"droppriv":0, "free":1, "sesandbox":0, "fakeroot":0}},
31305 - "unpack": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":0, "sesandbox":sesandbox, "fakeroot":0}},
31306 - "prepare": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":0, "sesandbox":sesandbox, "fakeroot":0}},
31307 - "configure":{"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
31308 - "compile": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
31309 - "test": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
31310 - "install": {"cmd":ebuild_sh, "args":{"droppriv":0, "free":0, "sesandbox":sesandbox, "fakeroot":fakeroot}},
31311 - "instprep": {"cmd":misc_sh, "args":{"droppriv":0, "free":0, "sesandbox":sesandbox, "fakeroot":fakeroot}},
31312 - "rpm": {"cmd":misc_sh, "args":{"droppriv":0, "free":0, "sesandbox":0, "fakeroot":fakeroot}},
31313 - "package": {"cmd":misc_sh, "args":{"droppriv":0, "free":0, "sesandbox":0, "fakeroot":fakeroot}},
31314 - }
31315 -
31316 - return actionmap
31317 + features = settings.features
31318 + restrict = settings["PORTAGE_RESTRICT"].split()
31319 + nosandbox = (
31320 + ("userpriv" in features)
31321 + and ("usersandbox" not in features)
31322 + and "userpriv" not in restrict
31323 + and "nouserpriv" not in restrict
31324 + )
31325 +
31326 - if not portage.process.sandbox_capable:
31327 ++ if not (portage.process.sandbox_capable
31328 ++ or portage.process.macossandbox_capable):
31329 + nosandbox = True
31330 +
31331 + sesandbox = settings.selinux_enabled() and "sesandbox" in features
31332 +
31333 + droppriv = "userpriv" in features and "userpriv" not in restrict and secpass >= 2
31334 +
31335 + fakeroot = "fakeroot" in features
31336 +
31337 + portage_bin_path = settings["PORTAGE_BIN_PATH"]
31338 + ebuild_sh_binary = os.path.join(
31339 + portage_bin_path, os.path.basename(EBUILD_SH_BINARY)
31340 + )
31341 + misc_sh_binary = os.path.join(portage_bin_path, os.path.basename(MISC_SH_BINARY))
31342 + ebuild_sh = _shell_quote(ebuild_sh_binary) + " %s"
31343 + misc_sh = _shell_quote(misc_sh_binary) + " __dyn_%s"
31344 +
31345 + # args are for the to spawn function
31346 + actionmap = {
31347 + "pretend": {
31348 + "cmd": ebuild_sh,
31349 + "args": {"droppriv": 0, "free": 1, "sesandbox": 0, "fakeroot": 0},
31350 + },
31351 + "setup": {
31352 + "cmd": ebuild_sh,
31353 + "args": {"droppriv": 0, "free": 1, "sesandbox": 0, "fakeroot": 0},
31354 + },
31355 + "unpack": {
31356 + "cmd": ebuild_sh,
31357 + "args": {
31358 + "droppriv": droppriv,
31359 + "free": 0,
31360 + "sesandbox": sesandbox,
31361 + "fakeroot": 0,
31362 + },
31363 + },
31364 + "prepare": {
31365 + "cmd": ebuild_sh,
31366 + "args": {
31367 + "droppriv": droppriv,
31368 + "free": 0,
31369 + "sesandbox": sesandbox,
31370 + "fakeroot": 0,
31371 + },
31372 + },
31373 + "configure": {
31374 + "cmd": ebuild_sh,
31375 + "args": {
31376 + "droppriv": droppriv,
31377 + "free": nosandbox,
31378 + "sesandbox": sesandbox,
31379 + "fakeroot": 0,
31380 + },
31381 + },
31382 + "compile": {
31383 + "cmd": ebuild_sh,
31384 + "args": {
31385 + "droppriv": droppriv,
31386 + "free": nosandbox,
31387 + "sesandbox": sesandbox,
31388 + "fakeroot": 0,
31389 + },
31390 + },
31391 + "test": {
31392 + "cmd": ebuild_sh,
31393 + "args": {
31394 + "droppriv": droppriv,
31395 + "free": nosandbox,
31396 + "sesandbox": sesandbox,
31397 + "fakeroot": 0,
31398 + },
31399 + },
31400 + "install": {
31401 + "cmd": ebuild_sh,
31402 + "args": {
31403 + "droppriv": 0,
31404 + "free": 0,
31405 + "sesandbox": sesandbox,
31406 + "fakeroot": fakeroot,
31407 + },
31408 + },
31409 + "instprep": {
31410 + "cmd": misc_sh,
31411 + "args": {
31412 + "droppriv": 0,
31413 + "free": 0,
31414 + "sesandbox": sesandbox,
31415 + "fakeroot": fakeroot,
31416 + },
31417 + },
31418 + "rpm": {
31419 + "cmd": misc_sh,
31420 + "args": {"droppriv": 0, "free": 0, "sesandbox": 0, "fakeroot": fakeroot},
31421 + },
31422 + "package": {
31423 + "cmd": misc_sh,
31424 + "args": {"droppriv": 0, "free": 0, "sesandbox": 0, "fakeroot": fakeroot},
31425 + },
31426 + }
31427 +
31428 + return actionmap
31429 +
31430
31431 def _validate_deps(mysettings, myroot, mydo, mydbapi):
31432
31433 @@@ -1482,382 -1881,363 +1893,451 @@@
31434
31435 # XXX This would be to replace getstatusoutput completely.
31436 # XXX Issue: cannot block execution. Deadlock condition.
31437 - def spawn(mystring, mysettings, debug=False, free=False, droppriv=False,
31438 - sesandbox=False, fakeroot=False, networked=True, ipc=True,
31439 - mountns=False, pidns=False, **keywords):
31440 - """
31441 - Spawn a subprocess with extra portage-specific options.
31442 - Optiosn include:
31443 -
31444 - Sandbox: Sandbox means the spawned process will be limited in its ability t
31445 - read and write files (normally this means it is restricted to ${D}/)
31446 - SElinux Sandbox: Enables sandboxing on SElinux
31447 - Reduced Privileges: Drops privilages such that the process runs as portage:portage
31448 - instead of as root.
31449 -
31450 - Notes: os.system cannot be used because it messes with signal handling. Instead we
31451 - use the portage.process spawn* family of functions.
31452 -
31453 - This function waits for the process to terminate.
31454 -
31455 - @param mystring: Command to run
31456 - @type mystring: String
31457 - @param mysettings: Either a Dict of Key,Value pairs or an instance of portage.config
31458 - @type mysettings: Dictionary or config instance
31459 - @param debug: Ignored
31460 - @type debug: Boolean
31461 - @param free: Enable sandboxing for this process
31462 - @type free: Boolean
31463 - @param droppriv: Drop to portage:portage when running this command
31464 - @type droppriv: Boolean
31465 - @param sesandbox: Enable SELinux Sandboxing (toggles a context switch)
31466 - @type sesandbox: Boolean
31467 - @param fakeroot: Run this command with faked root privileges
31468 - @type fakeroot: Boolean
31469 - @param networked: Run this command with networking access enabled
31470 - @type networked: Boolean
31471 - @param ipc: Run this command with host IPC access enabled
31472 - @type ipc: Boolean
31473 - @param mountns: Run this command inside mount namespace
31474 - @type mountns: Boolean
31475 - @param pidns: Run this command in isolated PID namespace
31476 - @type pidns: Boolean
31477 - @param keywords: Extra options encoded as a dict, to be passed to spawn
31478 - @type keywords: Dictionary
31479 - @rtype: Integer
31480 - @return:
31481 - 1. The return code of the spawned process.
31482 - """
31483 -
31484 - check_config_instance(mysettings)
31485 -
31486 - fd_pipes = keywords.get("fd_pipes")
31487 - if fd_pipes is None:
31488 - fd_pipes = {
31489 - 0:portage._get_stdin().fileno(),
31490 - 1:sys.__stdout__.fileno(),
31491 - 2:sys.__stderr__.fileno(),
31492 - }
31493 - # In some cases the above print statements don't flush stdout, so
31494 - # it needs to be flushed before allowing a child process to use it
31495 - # so that output always shows in the correct order.
31496 - stdout_filenos = (sys.__stdout__.fileno(), sys.__stderr__.fileno())
31497 - for fd in fd_pipes.values():
31498 - if fd in stdout_filenos:
31499 - sys.__stdout__.flush()
31500 - sys.__stderr__.flush()
31501 - break
31502 -
31503 - features = mysettings.features
31504 -
31505 - # Use Linux namespaces if available
31506 - if uid == 0 and platform.system() == 'Linux':
31507 - keywords['unshare_net'] = not networked
31508 - keywords['unshare_ipc'] = not ipc
31509 - keywords['unshare_mount'] = mountns
31510 - keywords['unshare_pid'] = pidns
31511 -
31512 - if not networked and mysettings.get("EBUILD_PHASE") != "nofetch" and \
31513 - ("network-sandbox-proxy" in features or "distcc" in features):
31514 - # Provide a SOCKS5-over-UNIX-socket proxy to escape sandbox
31515 - # Don't do this for pkg_nofetch, since the spawn_nofetch
31516 - # function creates a private PORTAGE_TMPDIR.
31517 - try:
31518 - proxy = get_socks5_proxy(mysettings)
31519 - except NotImplementedError:
31520 - pass
31521 - else:
31522 - mysettings['PORTAGE_SOCKS5_PROXY'] = proxy
31523 - mysettings['DISTCC_SOCKS_PROXY'] = proxy
31524 -
31525 - # TODO: Enable fakeroot to be used together with droppriv. The
31526 - # fake ownership/permissions will have to be converted to real
31527 - # permissions in the merge phase.
31528 - fakeroot = fakeroot and uid != 0 and portage.process.fakeroot_capable
31529 - portage_build_uid = os.getuid()
31530 - portage_build_gid = os.getgid()
31531 - logname = None
31532 - if uid == 0 and portage_uid and portage_gid and hasattr(os, "setgroups"):
31533 - if droppriv:
31534 - logname = portage.data._portage_username
31535 - keywords.update({
31536 - "uid": portage_uid,
31537 - "gid": portage_gid,
31538 - "groups": userpriv_groups,
31539 - "umask": 0o22
31540 - })
31541 -
31542 - # Adjust pty ownership so that subprocesses
31543 - # can directly access /dev/fd/{1,2}.
31544 - stdout_fd = fd_pipes.get(1)
31545 - if stdout_fd is not None:
31546 - try:
31547 - subprocess_tty = _os.ttyname(stdout_fd)
31548 - except OSError:
31549 - pass
31550 - else:
31551 - try:
31552 - parent_tty = _os.ttyname(sys.__stdout__.fileno())
31553 - except OSError:
31554 - parent_tty = None
31555 -
31556 - if subprocess_tty != parent_tty:
31557 - _os.chown(subprocess_tty,
31558 - int(portage_uid), int(portage_gid))
31559 -
31560 - if "userpriv" in features and "userpriv" not in mysettings["PORTAGE_RESTRICT"].split() and secpass >= 2:
31561 - # Since Python 3.4, getpwuid and getgrgid
31562 - # require int type (no proxies).
31563 - portage_build_uid = int(portage_uid)
31564 - portage_build_gid = int(portage_gid)
31565 -
31566 - if "PORTAGE_BUILD_USER" not in mysettings:
31567 - user = None
31568 - try:
31569 - user = pwd.getpwuid(portage_build_uid).pw_name
31570 - except KeyError:
31571 - if portage_build_uid == 0:
31572 - user = "root"
31573 - elif portage_build_uid == portage_uid:
31574 - user = portage.data._portage_username
31575 - # PREFIX LOCAL: accept numeric uid
31576 - else:
31577 - user = portage_uid
31578 - # END PREFIX LOCAL
31579 - if user is not None:
31580 - mysettings["PORTAGE_BUILD_USER"] = user
31581 -
31582 - if "PORTAGE_BUILD_GROUP" not in mysettings:
31583 - group = None
31584 - try:
31585 - group = grp.getgrgid(portage_build_gid).gr_name
31586 - except KeyError:
31587 - if portage_build_gid == 0:
31588 - group = "root"
31589 - elif portage_build_gid == portage_gid:
31590 - group = portage.data._portage_grpname
31591 - # PREFIX LOCAL: accept numeric gid
31592 - else:
31593 - group = portage_gid
31594 - # END PREFIX LOCAL
31595 - if group is not None:
31596 - mysettings["PORTAGE_BUILD_GROUP"] = group
31597 -
31598 - if not free:
31599 - free=((droppriv and "usersandbox" not in features) or \
31600 - (not droppriv and "sandbox" not in features and \
31601 - "usersandbox" not in features and not fakeroot))
31602 -
31603 - if not free and not (fakeroot or portage.process.sandbox_capable or \
31604 - portage.process.macossandbox_capable):
31605 - free = True
31606 -
31607 - if mysettings.mycpv is not None:
31608 - keywords["opt_name"] = "[%s]" % mysettings.mycpv
31609 - else:
31610 - keywords["opt_name"] = "[%s/%s]" % \
31611 - (mysettings.get("CATEGORY",""), mysettings.get("PF",""))
31612 -
31613 - if free or "SANDBOX_ACTIVE" in os.environ:
31614 - keywords["opt_name"] += " bash"
31615 - spawn_func = portage.process.spawn_bash
31616 - elif fakeroot:
31617 - keywords["opt_name"] += " fakeroot"
31618 - keywords["fakeroot_state"] = os.path.join(mysettings["T"], "fakeroot.state")
31619 - spawn_func = portage.process.spawn_fakeroot
31620 - elif "sandbox" in features and platform.system() == 'Darwin':
31621 - keywords["opt_name"] += " macossandbox"
31622 - sbprofile = MACOSSANDBOX_PROFILE
31623 -
31624 - # determine variable names from profile: split
31625 - # "text@@VARNAME@@moretext@@OTHERVAR@@restoftext" into
31626 - # ("text", # "VARNAME", "moretext", "OTHERVAR", "restoftext")
31627 - # and extract variable named by reading every second item.
31628 - variables = []
31629 - for line in sbprofile.split("\n"):
31630 - variables.extend(line.split("@@")[1:-1:2])
31631 -
31632 - for var in variables:
31633 - paths = ""
31634 - if var in mysettings:
31635 - paths = mysettings[var]
31636 - else:
31637 - writemsg("Warning: sandbox profile references variable %s "
31638 - "which is not set.\nThe rule using it will have no "
31639 - "effect, which is most likely not the intended "
31640 - "result.\nPlease check make.conf/make.globals.\n" %
31641 - var)
31642 -
31643 - # not set or empty value
31644 - if not paths:
31645 - sbprofile = sbprofile.replace("@@%s@@" % var, "")
31646 - continue
31647 -
31648 - rules_literal = ""
31649 - rules_regex = ""
31650 -
31651 - # FIXME: Allow for quoting inside the variable
31652 - # to allow paths with spaces in them?
31653 - for path in paths.split(" "):
31654 - # do a second round of token
31655 - # replacements to be able to reference
31656 - # settings like EPREFIX or
31657 - # PORTAGE_BUILDDIR.
31658 - for token in path.split("@@")[1:-1:2]:
31659 - if token not in mysettings:
31660 - continue
31661 -
31662 - path = path.replace("@@%s@@" % token, mysettings[token])
31663 -
31664 - if "@@" in path:
31665 - # unreplaced tokens left -
31666 - # silently ignore path - needed
31667 - # for PORTAGE_ACTUAL_DISTDIR
31668 - # which isn't always set
31669 - pass
31670 - elif path[-1] == os.sep:
31671 - # path ends in slash - make it a
31672 - # regex and allow access
31673 - # recursively.
31674 - path = path.replace(r'+', r'\+')
31675 - path = path.replace(r'*', r'\*')
31676 - path = path.replace(r'[', r'\[')
31677 - path = path.replace(r']', r'\]')
31678 - rules_regex += " #\"^%s\"\n" % path
31679 - else:
31680 - rules_literal += " #\"%s\"\n" % path
31681 -
31682 - rules = ""
31683 - if rules_literal:
31684 - rules += " (literal\n" + rules_literal + " )\n"
31685 - if rules_regex:
31686 - rules += " (regex\n" + rules_regex + " )\n"
31687 - sbprofile = sbprofile.replace("@@%s@@" % var, rules)
31688 -
31689 - keywords["profile"] = sbprofile
31690 - spawn_func = portage.process.spawn_macossandbox
31691 - else:
31692 - keywords["opt_name"] += " sandbox"
31693 - spawn_func = portage.process.spawn_sandbox
31694 -
31695 - if sesandbox:
31696 - spawn_func = selinux.spawn_wrapper(spawn_func,
31697 - mysettings["PORTAGE_SANDBOX_T"])
31698 -
31699 - logname_backup = None
31700 - if logname is not None:
31701 - logname_backup = mysettings.configdict["env"].get("LOGNAME")
31702 - mysettings.configdict["env"]["LOGNAME"] = logname
31703 -
31704 - try:
31705 - if keywords.get("returnpid"):
31706 - return spawn_func(mystring, env=mysettings.environ(),
31707 - **keywords)
31708 -
31709 - proc = EbuildSpawnProcess(
31710 - background=False, args=mystring,
31711 - scheduler=SchedulerInterface(asyncio._safe_loop()),
31712 - spawn_func=spawn_func,
31713 - settings=mysettings, **keywords)
31714 -
31715 - proc.start()
31716 - proc.wait()
31717 -
31718 - return proc.returncode
31719 -
31720 - finally:
31721 - if logname is None:
31722 - pass
31723 - elif logname_backup is None:
31724 - mysettings.configdict["env"].pop("LOGNAME", None)
31725 - else:
31726 - mysettings.configdict["env"]["LOGNAME"] = logname_backup
31727 +
31728 +
31729 + def spawn(
31730 + mystring,
31731 + mysettings,
31732 + debug=False,
31733 + free=False,
31734 + droppriv=False,
31735 + sesandbox=False,
31736 + fakeroot=False,
31737 + networked=True,
31738 + ipc=True,
31739 + mountns=False,
31740 + pidns=False,
31741 + **keywords
31742 + ):
31743 + """
31744 + Spawn a subprocess with extra portage-specific options.
31745 + Optiosn include:
31746 +
31747 + Sandbox: Sandbox means the spawned process will be limited in its ability t
31748 + read and write files (normally this means it is restricted to ${D}/)
31749 + SElinux Sandbox: Enables sandboxing on SElinux
31750 + Reduced Privileges: Drops privilages such that the process runs as portage:portage
31751 + instead of as root.
31752 +
31753 + Notes: os.system cannot be used because it messes with signal handling. Instead we
31754 + use the portage.process spawn* family of functions.
31755 +
31756 + This function waits for the process to terminate.
31757 +
31758 + @param mystring: Command to run
31759 + @type mystring: String
31760 + @param mysettings: Either a Dict of Key,Value pairs or an instance of portage.config
31761 + @type mysettings: Dictionary or config instance
31762 + @param debug: Ignored
31763 + @type debug: Boolean
31764 + @param free: Enable sandboxing for this process
31765 + @type free: Boolean
31766 + @param droppriv: Drop to portage:portage when running this command
31767 + @type droppriv: Boolean
31768 + @param sesandbox: Enable SELinux Sandboxing (toggles a context switch)
31769 + @type sesandbox: Boolean
31770 + @param fakeroot: Run this command with faked root privileges
31771 + @type fakeroot: Boolean
31772 + @param networked: Run this command with networking access enabled
31773 + @type networked: Boolean
31774 + @param ipc: Run this command with host IPC access enabled
31775 + @type ipc: Boolean
31776 + @param mountns: Run this command inside mount namespace
31777 + @type mountns: Boolean
31778 + @param pidns: Run this command in isolated PID namespace
31779 + @type pidns: Boolean
31780 + @param keywords: Extra options encoded as a dict, to be passed to spawn
31781 + @type keywords: Dictionary
31782 + @rtype: Integer
31783 + @return:
31784 + 1. The return code of the spawned process.
31785 + """
31786 +
31787 + check_config_instance(mysettings)
31788 +
31789 + fd_pipes = keywords.get("fd_pipes")
31790 + if fd_pipes is None:
31791 + fd_pipes = {
31792 + 0: portage._get_stdin().fileno(),
31793 + 1: sys.__stdout__.fileno(),
31794 + 2: sys.__stderr__.fileno(),
31795 + }
31796 + # In some cases the above print statements don't flush stdout, so
31797 + # it needs to be flushed before allowing a child process to use it
31798 + # so that output always shows in the correct order.
31799 + stdout_filenos = (sys.__stdout__.fileno(), sys.__stderr__.fileno())
31800 + for fd in fd_pipes.values():
31801 + if fd in stdout_filenos:
31802 + sys.__stdout__.flush()
31803 + sys.__stderr__.flush()
31804 + break
31805 +
31806 + features = mysettings.features
31807 +
31808 + # Use Linux namespaces if available
31809 + if uid == 0 and platform.system() == "Linux":
31810 + keywords["unshare_net"] = not networked
31811 + keywords["unshare_ipc"] = not ipc
31812 + keywords["unshare_mount"] = mountns
31813 + keywords["unshare_pid"] = pidns
31814 +
31815 + if (
31816 + not networked
31817 + and mysettings.get("EBUILD_PHASE") != "nofetch"
31818 + and ("network-sandbox-proxy" in features or "distcc" in features)
31819 + ):
31820 + # Provide a SOCKS5-over-UNIX-socket proxy to escape sandbox
31821 + # Don't do this for pkg_nofetch, since the spawn_nofetch
31822 + # function creates a private PORTAGE_TMPDIR.
31823 + try:
31824 + proxy = get_socks5_proxy(mysettings)
31825 + except NotImplementedError:
31826 + pass
31827 + else:
31828 + mysettings["PORTAGE_SOCKS5_PROXY"] = proxy
31829 + mysettings["DISTCC_SOCKS_PROXY"] = proxy
31830 +
31831 + # TODO: Enable fakeroot to be used together with droppriv. The
31832 + # fake ownership/permissions will have to be converted to real
31833 + # permissions in the merge phase.
31834 + fakeroot = fakeroot and uid != 0 and portage.process.fakeroot_capable
31835 + portage_build_uid = os.getuid()
31836 + portage_build_gid = os.getgid()
31837 + logname = None
31838 + if uid == 0 and portage_uid and portage_gid and hasattr(os, "setgroups"):
31839 + if droppriv:
31840 + logname = portage.data._portage_username
31841 + keywords.update(
31842 + {
31843 + "uid": portage_uid,
31844 + "gid": portage_gid,
31845 + "groups": userpriv_groups,
31846 + "umask": 0o22,
31847 + }
31848 + )
31849 +
31850 + # Adjust pty ownership so that subprocesses
31851 + # can directly access /dev/fd/{1,2}.
31852 + stdout_fd = fd_pipes.get(1)
31853 + if stdout_fd is not None:
31854 + try:
31855 + subprocess_tty = _os.ttyname(stdout_fd)
31856 + except OSError:
31857 + pass
31858 + else:
31859 + try:
31860 + parent_tty = _os.ttyname(sys.__stdout__.fileno())
31861 + except OSError:
31862 + parent_tty = None
31863 +
31864 + if subprocess_tty != parent_tty:
31865 + _os.chown(subprocess_tty, int(portage_uid), int(portage_gid))
31866 +
31867 + if (
31868 + "userpriv" in features
31869 + and "userpriv" not in mysettings["PORTAGE_RESTRICT"].split()
31870 + and secpass >= 2
31871 + ):
31872 + # Since Python 3.4, getpwuid and getgrgid
31873 + # require int type (no proxies).
31874 + portage_build_uid = int(portage_uid)
31875 + portage_build_gid = int(portage_gid)
31876 +
31877 + if "PORTAGE_BUILD_USER" not in mysettings:
31878 + user = None
31879 + try:
31880 + user = pwd.getpwuid(portage_build_uid).pw_name
31881 + except KeyError:
31882 + if portage_build_uid == 0:
31883 + user = "root"
31884 + elif portage_build_uid == portage_uid:
31885 + user = portage.data._portage_username
31886 ++ # BEGIN PREFIX LOCAL: accept numeric uid
31887 ++ else:
31888 ++ user = portage_uid
31889 ++ # END PREFIX LOCAL
31890 + if user is not None:
31891 + mysettings["PORTAGE_BUILD_USER"] = user
31892 +
31893 + if "PORTAGE_BUILD_GROUP" not in mysettings:
31894 + group = None
31895 + try:
31896 + group = grp.getgrgid(portage_build_gid).gr_name
31897 + except KeyError:
31898 + if portage_build_gid == 0:
31899 + group = "root"
31900 + elif portage_build_gid == portage_gid:
31901 + group = portage.data._portage_grpname
31902 ++ # BEGIN PREFIX LOCAL: accept numeric gid
31903 ++ else:
31904 ++ group = portage_gid
31905 ++ # END PREFIX LOCAL
31906 + if group is not None:
31907 + mysettings["PORTAGE_BUILD_GROUP"] = group
31908 +
31909 + if not free:
31910 + free = (droppriv and "usersandbox" not in features) or (
31911 + not droppriv
31912 + and "sandbox" not in features
31913 + and "usersandbox" not in features
31914 + and not fakeroot
31915 + )
31916 +
31917 - if not free and not (fakeroot or portage.process.sandbox_capable):
31918 ++ if not free and not (fakeroot or portage.process.sandbox_capable
31919 ++ or portage.process.macossandbox_capable): # PREFIX LOCAL
31920 + free = True
31921 +
31922 + if mysettings.mycpv is not None:
31923 + keywords["opt_name"] = "[%s]" % mysettings.mycpv
31924 + else:
31925 + keywords["opt_name"] = "[%s/%s]" % (
31926 + mysettings.get("CATEGORY", ""),
31927 + mysettings.get("PF", ""),
31928 + )
31929 +
31930 + if free or "SANDBOX_ACTIVE" in os.environ:
31931 + keywords["opt_name"] += " bash"
31932 + spawn_func = portage.process.spawn_bash
31933 + elif fakeroot:
31934 + keywords["opt_name"] += " fakeroot"
31935 + keywords["fakeroot_state"] = os.path.join(mysettings["T"], "fakeroot.state")
31936 + spawn_func = portage.process.spawn_fakeroot
31937 ++ # BEGIN PREFIX LOCAL
31938 ++ elif "sandbox" in features and platform.system() == 'Darwin':
31939 ++ keywords["opt_name"] += " macossandbox"
31940 ++ sbprofile = MACOSSANDBOX_PROFILE
31941 ++
31942 ++ # determine variable names from profile: split
31943 ++ # "text@@VARNAME@@moretext@@OTHERVAR@@restoftext" into
31944 ++ # ("text", # "VARNAME", "moretext", "OTHERVAR", "restoftext")
31945 ++ # and extract variable named by reading every second item.
31946 ++ variables = []
31947 ++ for line in sbprofile.split("\n"):
31948 ++ variables.extend(line.split("@@")[1:-1:2])
31949 ++
31950 ++ for var in variables:
31951 ++ paths = ""
31952 ++ if var in mysettings:
31953 ++ paths = mysettings[var]
31954 ++ else:
31955 ++ writemsg("Warning: sandbox profile references variable %s "
31956 ++ "which is not set.\nThe rule using it will have no "
31957 ++ "effect, which is most likely not the intended "
31958 ++ "result.\nPlease check make.conf/make.globals.\n" %
31959 ++ var)
31960 ++
31961 ++ # not set or empty value
31962 ++ if not paths:
31963 ++ sbprofile = sbprofile.replace("@@%s@@" % var, "")
31964 ++ continue
31965 ++
31966 ++ rules_literal = ""
31967 ++ rules_regex = ""
31968 ++
31969 ++ # FIXME: Allow for quoting inside the variable
31970 ++ # to allow paths with spaces in them?
31971 ++ for path in paths.split(" "):
31972 ++ # do a second round of token
31973 ++ # replacements to be able to reference
31974 ++ # settings like EPREFIX or
31975 ++ # PORTAGE_BUILDDIR.
31976 ++ for token in path.split("@@")[1:-1:2]:
31977 ++ if token not in mysettings:
31978 ++ continue
31979 ++
31980 ++ path = path.replace("@@%s@@" % token, mysettings[token])
31981 ++
31982 ++ if "@@" in path:
31983 ++ # unreplaced tokens left -
31984 ++ # silently ignore path - needed
31985 ++ # for PORTAGE_ACTUAL_DISTDIR
31986 ++ # which isn't always set
31987 ++ pass
31988 ++ elif path[-1] == os.sep:
31989 ++ # path ends in slash - make it a
31990 ++ # regex and allow access
31991 ++ # recursively.
31992 ++ path = path.replace(r'+', r'\+')
31993 ++ path = path.replace(r'*', r'\*')
31994 ++ path = path.replace(r'[', r'\[')
31995 ++ path = path.replace(r']', r'\]')
31996 ++ rules_regex += " #\"^%s\"\n" % path
31997 ++ else:
31998 ++ rules_literal += " #\"%s\"\n" % path
31999 ++
32000 ++ rules = ""
32001 ++ if rules_literal:
32002 ++ rules += " (literal\n" + rules_literal + " )\n"
32003 ++ if rules_regex:
32004 ++ rules += " (regex\n" + rules_regex + " )\n"
32005 ++ sbprofile = sbprofile.replace("@@%s@@" % var, rules)
32006 ++
32007 ++ keywords["profile"] = sbprofile
32008 ++ spawn_func = portage.process.spawn_macossandbox
32009 ++ # END PREFIX LOCAL
32010 + else:
32011 + keywords["opt_name"] += " sandbox"
32012 + spawn_func = portage.process.spawn_sandbox
32013 +
32014 + if sesandbox:
32015 + spawn_func = selinux.spawn_wrapper(spawn_func, mysettings["PORTAGE_SANDBOX_T"])
32016 +
32017 + logname_backup = None
32018 + if logname is not None:
32019 + logname_backup = mysettings.configdict["env"].get("LOGNAME")
32020 + mysettings.configdict["env"]["LOGNAME"] = logname
32021 +
32022 + try:
32023 + if keywords.get("returnpid"):
32024 + return spawn_func(mystring, env=mysettings.environ(), **keywords)
32025 +
32026 + proc = EbuildSpawnProcess(
32027 + background=False,
32028 + args=mystring,
32029 + scheduler=SchedulerInterface(asyncio._safe_loop()),
32030 + spawn_func=spawn_func,
32031 + settings=mysettings,
32032 + **keywords
32033 + )
32034 +
32035 + proc.start()
32036 + proc.wait()
32037 +
32038 + return proc.returncode
32039 +
32040 + finally:
32041 + if logname is None:
32042 + pass
32043 + elif logname_backup is None:
32044 + mysettings.configdict["env"].pop("LOGNAME", None)
32045 + else:
32046 + mysettings.configdict["env"]["LOGNAME"] = logname_backup
32047 +
32048
32049 # parse actionmap to spawn ebuild with the appropriate args
32050 - def spawnebuild(mydo, actionmap, mysettings, debug, alwaysdep=0,
32051 - logfile=None, fd_pipes=None, returnpid=False):
32052 -
32053 - if returnpid:
32054 - warnings.warn("portage.spawnebuild() called "
32055 - "with returnpid parameter enabled. This usage will "
32056 - "not be supported in the future.",
32057 - DeprecationWarning, stacklevel=2)
32058 -
32059 - if not returnpid and \
32060 - (alwaysdep or "noauto" not in mysettings.features):
32061 - # process dependency first
32062 - if "dep" in actionmap[mydo]:
32063 - retval = spawnebuild(actionmap[mydo]["dep"], actionmap,
32064 - mysettings, debug, alwaysdep=alwaysdep, logfile=logfile,
32065 - fd_pipes=fd_pipes, returnpid=returnpid)
32066 - if retval:
32067 - return retval
32068 -
32069 - eapi = mysettings["EAPI"]
32070 -
32071 - if mydo in ("configure", "prepare") and not eapi_has_src_prepare_and_src_configure(eapi):
32072 - return os.EX_OK
32073 -
32074 - if mydo == "pretend" and not eapi_has_pkg_pretend(eapi):
32075 - return os.EX_OK
32076 -
32077 - if not (mydo == "install" and "noauto" in mysettings.features):
32078 - check_file = os.path.join(
32079 - mysettings["PORTAGE_BUILDDIR"], ".%sed" % mydo.rstrip('e'))
32080 - if os.path.exists(check_file):
32081 - writemsg_stdout(_(">>> It appears that "
32082 - "'%(action)s' has already executed for '%(pkg)s'; skipping.\n") %
32083 - {"action":mydo, "pkg":mysettings["PF"]})
32084 - writemsg_stdout(_(">>> Remove '%(file)s' to force %(action)s.\n") %
32085 - {"file":check_file, "action":mydo})
32086 - return os.EX_OK
32087 -
32088 - return _spawn_phase(mydo, mysettings,
32089 - actionmap=actionmap, logfile=logfile,
32090 - fd_pipes=fd_pipes, returnpid=returnpid)
32091
32092 - _post_phase_cmds = {
32093
32094 - "install" : [
32095 - "install_qa_check",
32096 - "install_symlink_html_docs",
32097 - "install_hooks"],
32098 -
32099 - "preinst" : (
32100 - (
32101 - # Since SELinux does not allow LD_PRELOAD across domain transitions,
32102 - # disable the LD_PRELOAD sandbox for preinst_selinux_labels.
32103 - {
32104 - "ld_preload_sandbox": False,
32105 - "selinux_only": True,
32106 - },
32107 - [
32108 - "preinst_selinux_labels",
32109 - ],
32110 - ),
32111 - (
32112 - {},
32113 - [
32114 - "preinst_aix",
32115 - "preinst_sfperms",
32116 - "preinst_suid_scan",
32117 - "preinst_qa_check",
32118 - ],
32119 - ),
32120 - ),
32121 - "postinst" : [
32122 - "postinst_aix",
32123 - "postinst_qa_check"],
32124 + def spawnebuild(
32125 + mydo,
32126 + actionmap,
32127 + mysettings,
32128 + debug,
32129 + alwaysdep=0,
32130 + logfile=None,
32131 + fd_pipes=None,
32132 + returnpid=False,
32133 + ):
32134 +
32135 + if returnpid:
32136 + warnings.warn(
32137 + "portage.spawnebuild() called "
32138 + "with returnpid parameter enabled. This usage will "
32139 + "not be supported in the future.",
32140 + DeprecationWarning,
32141 + stacklevel=2,
32142 + )
32143 +
32144 + if not returnpid and (alwaysdep or "noauto" not in mysettings.features):
32145 + # process dependency first
32146 + if "dep" in actionmap[mydo]:
32147 + retval = spawnebuild(
32148 + actionmap[mydo]["dep"],
32149 + actionmap,
32150 + mysettings,
32151 + debug,
32152 + alwaysdep=alwaysdep,
32153 + logfile=logfile,
32154 + fd_pipes=fd_pipes,
32155 + returnpid=returnpid,
32156 + )
32157 + if retval:
32158 + return retval
32159 +
32160 + eapi = mysettings["EAPI"]
32161 +
32162 + if mydo in ("configure", "prepare") and not eapi_has_src_prepare_and_src_configure(
32163 + eapi
32164 + ):
32165 + return os.EX_OK
32166 +
32167 + if mydo == "pretend" and not eapi_has_pkg_pretend(eapi):
32168 + return os.EX_OK
32169 +
32170 + if not (mydo == "install" and "noauto" in mysettings.features):
32171 + check_file = os.path.join(
32172 + mysettings["PORTAGE_BUILDDIR"], ".%sed" % mydo.rstrip("e")
32173 + )
32174 + if os.path.exists(check_file):
32175 + writemsg_stdout(
32176 + _(
32177 + ">>> It appears that "
32178 + "'%(action)s' has already executed for '%(pkg)s'; skipping.\n"
32179 + )
32180 + % {"action": mydo, "pkg": mysettings["PF"]}
32181 + )
32182 + writemsg_stdout(
32183 + _(">>> Remove '%(file)s' to force %(action)s.\n")
32184 + % {"file": check_file, "action": mydo}
32185 + )
32186 + return os.EX_OK
32187 +
32188 + return _spawn_phase(
32189 + mydo,
32190 + mysettings,
32191 + actionmap=actionmap,
32192 + logfile=logfile,
32193 + fd_pipes=fd_pipes,
32194 + returnpid=returnpid,
32195 + )
32196 +
32197 +
32198 + _post_phase_cmds = {
32199 + "install": ["install_qa_check", "install_symlink_html_docs", "install_hooks"],
32200 + "preinst": (
32201 + (
32202 + # Since SELinux does not allow LD_PRELOAD across domain transitions,
32203 + # disable the LD_PRELOAD sandbox for preinst_selinux_labels.
32204 + {
32205 + "ld_preload_sandbox": False,
32206 + "selinux_only": True,
32207 + },
32208 + [
32209 + "preinst_selinux_labels",
32210 + ],
32211 + ),
32212 + (
32213 + {},
32214 + [
32215 ++ # PREFIX LOCAL
32216 ++ "preinst_aix",
32217 + "preinst_sfperms",
32218 + "preinst_suid_scan",
32219 + "preinst_qa_check",
32220 + ],
32221 + ),
32222 + ),
32223 - "postinst": ["postinst_qa_check"],
32224 ++ "postinst": [
32225 ++ # PREFIX LOCAL
32226 ++ "postinst_aix",
32227 ++ "postinst_qa_check",
32228 ++ ],
32229 }
32230
32231 +
32232 def _post_phase_userpriv_perms(mysettings):
32233 - if "userpriv" in mysettings.features and secpass >= 2:
32234 - """ Privileged phases may have left files that need to be made
32235 - writable to a less privileged user."""
32236 - for path in (mysettings["HOME"], mysettings["T"]):
32237 - apply_recursive_permissions(path,
32238 - uid=portage_uid, gid=portage_gid, dirmode=0o700, dirmask=0,
32239 - filemode=0o600, filemask=0)
32240 + if "userpriv" in mysettings.features and secpass >= 2:
32241 + """Privileged phases may have left files that need to be made
32242 + writable to a less privileged user."""
32243 + for path in (mysettings["HOME"], mysettings["T"]):
32244 + apply_recursive_permissions(
32245 + path,
32246 + uid=portage_uid,
32247 + gid=portage_gid,
32248 + dirmode=0o700,
32249 + dirmask=0,
32250 + filemode=0o600,
32251 + filemask=0,
32252 + )
32253
32254
32255 def _post_phase_emptydir_cleanup(mysettings):
32256 diff --cc lib/portage/package/ebuild/fetch.py
32257 index 7c95245a7,2d3625800..2fcd33bd9
32258 --- a/lib/portage/package/ebuild/fetch.py
32259 +++ b/lib/portage/package/ebuild/fetch.py
32260 @@@ -22,29 -22,44 +22,46 @@@ from urllib.parse import urlpars
32261 from urllib.parse import quote as urlquote
32262
32263 import portage
32264 - portage.proxy.lazyimport.lazyimport(globals(),
32265 - 'portage.package.ebuild.config:check_config_instance,config',
32266 - 'portage.package.ebuild.doebuild:doebuild_environment,' + \
32267 - '_doebuild_spawn',
32268 - 'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
32269 - 'portage.util:atomic_ofstream',
32270 - 'portage.util.configparser:SafeConfigParser,read_configs,' +
32271 - 'ConfigParserError',
32272 - 'portage.util.install_mask:_raise_exc',
32273 - 'portage.util._urlopen:urlopen',
32274 +
32275 + portage.proxy.lazyimport.lazyimport(
32276 + globals(),
32277 + "portage.package.ebuild.config:check_config_instance,config",
32278 + "portage.package.ebuild.doebuild:doebuild_environment," + "_doebuild_spawn",
32279 + "portage.package.ebuild.prepare_build_dirs:prepare_build_dirs",
32280 + "portage.util:atomic_ofstream",
32281 + "portage.util.configparser:SafeConfigParser,read_configs," + "ConfigParserError",
32282 + "portage.util.install_mask:_raise_exc",
32283 + "portage.util._urlopen:urlopen",
32284 )
32285
32286 - from portage import os, selinux, shutil, _encodings, \
32287 - _movefile, _shell_quote, _unicode_encode
32288 - from portage.checksum import (get_valid_checksum_keys, perform_md5, verify_all,
32289 - _filter_unaccelarated_hashes, _hash_filter, _apply_hash_filter,
32290 - checksum_str)
32291 - from portage.const import BASH_BINARY, CUSTOM_MIRRORS_FILE, \
32292 - GLOBAL_CONFIG_PATH
32293 + from portage import (
32294 + os,
32295 + selinux,
32296 + shutil,
32297 + _encodings,
32298 + _movefile,
32299 + _shell_quote,
32300 + _unicode_encode,
32301 + )
32302 + from portage.checksum import (
32303 + get_valid_checksum_keys,
32304 + perform_md5,
32305 + verify_all,
32306 + _filter_unaccelarated_hashes,
32307 + _hash_filter,
32308 + _apply_hash_filter,
32309 + checksum_str,
32310 + )
32311 + from portage.const import BASH_BINARY, CUSTOM_MIRRORS_FILE, GLOBAL_CONFIG_PATH
32312 ++# PREFIX LOCAL
32313 +from portage.const import rootgid
32314 from portage.data import portage_gid, portage_uid, userpriv_groups
32315 - from portage.exception import FileNotFound, OperationNotPermitted, \
32316 - PortageException, TryAgain
32317 + from portage.exception import (
32318 + FileNotFound,
32319 + OperationNotPermitted,
32320 + PortageException,
32321 + TryAgain,
32322 + )
32323 from portage.localization import _
32324 from portage.locks import lockfile, unlockfile
32325 from portage.output import colorize, EOutput
32326 @@@ -181,55 -217,63 +219,64 @@@ def _userpriv_test_write_file(settings
32327
32328
32329 def _ensure_distdir(settings, distdir):
32330 - """
32331 - Ensure that DISTDIR exists with appropriate permissions.
32332 -
32333 - @param settings: portage config
32334 - @type settings: portage.package.ebuild.config.config
32335 - @param distdir: DISTDIR path
32336 - @type distdir: str
32337 - @raise PortageException: portage.exception wrapper exception
32338 - """
32339 - global _userpriv_test_write_file_cache
32340 - dirmode = 0o070
32341 - filemode = 0o60
32342 - modemask = 0o2
32343 - dir_gid = portage_gid
32344 - if "FAKED_MODE" in settings:
32345 - # When inside fakeroot, directories with portage's gid appear
32346 - # to have root's gid. Therefore, use root's gid instead of
32347 - # portage's gid to avoid spurrious permissions adjustments
32348 - # when inside fakeroot.
32349 - dir_gid = rootgid
32350 -
32351 - userfetch = portage.data.secpass >= 2 and "userfetch" in settings.features
32352 - userpriv = portage.data.secpass >= 2 and "userpriv" in settings.features
32353 - write_test_file = os.path.join(distdir, ".__portage_test_write__")
32354 -
32355 - try:
32356 - st = os.stat(distdir)
32357 - except OSError:
32358 - st = None
32359 -
32360 - if st is not None and stat.S_ISDIR(st.st_mode):
32361 - if not (userfetch or userpriv):
32362 - return
32363 - if _userpriv_test_write_file(settings, write_test_file):
32364 - return
32365 -
32366 - _userpriv_test_write_file_cache.pop(write_test_file, None)
32367 - if ensure_dirs(distdir, gid=dir_gid, mode=dirmode, mask=modemask):
32368 - if st is None:
32369 - # The directory has just been created
32370 - # and therefore it must be empty.
32371 - return
32372 - writemsg(_("Adjusting permissions recursively: '%s'\n") % distdir,
32373 - noiselevel=-1)
32374 - if not apply_recursive_permissions(distdir,
32375 - gid=dir_gid, dirmode=dirmode, dirmask=modemask,
32376 - filemode=filemode, filemask=modemask, onerror=_raise_exc):
32377 - raise OperationNotPermitted(
32378 - _("Failed to apply recursive permissions for the portage group."))
32379 + """
32380 + Ensure that DISTDIR exists with appropriate permissions.
32381 +
32382 + @param settings: portage config
32383 + @type settings: portage.package.ebuild.config.config
32384 + @param distdir: DISTDIR path
32385 + @type distdir: str
32386 + @raise PortageException: portage.exception wrapper exception
32387 + """
32388 + global _userpriv_test_write_file_cache
32389 + dirmode = 0o070
32390 + filemode = 0o60
32391 + modemask = 0o2
32392 + dir_gid = portage_gid
32393 + if "FAKED_MODE" in settings:
32394 + # When inside fakeroot, directories with portage's gid appear
32395 + # to have root's gid. Therefore, use root's gid instead of
32396 + # portage's gid to avoid spurrious permissions adjustments
32397 + # when inside fakeroot.
32398 - dir_gid = 0
32399 ++ # PREFIX LOCAL: do not assume root to be 0
32400 ++ dir_gid = rootgid
32401 +
32402 + userfetch = portage.data.secpass >= 2 and "userfetch" in settings.features
32403 + userpriv = portage.data.secpass >= 2 and "userpriv" in settings.features
32404 + write_test_file = os.path.join(distdir, ".__portage_test_write__")
32405 +
32406 + try:
32407 + st = os.stat(distdir)
32408 + except OSError:
32409 + st = None
32410 +
32411 + if st is not None and stat.S_ISDIR(st.st_mode):
32412 + if not (userfetch or userpriv):
32413 + return
32414 + if _userpriv_test_write_file(settings, write_test_file):
32415 + return
32416 +
32417 + _userpriv_test_write_file_cache.pop(write_test_file, None)
32418 + if ensure_dirs(distdir, gid=dir_gid, mode=dirmode, mask=modemask):
32419 + if st is None:
32420 + # The directory has just been created
32421 + # and therefore it must be empty.
32422 + return
32423 + writemsg(
32424 + _("Adjusting permissions recursively: '%s'\n") % distdir, noiselevel=-1
32425 + )
32426 + if not apply_recursive_permissions(
32427 + distdir,
32428 + gid=dir_gid,
32429 + dirmode=dirmode,
32430 + dirmask=modemask,
32431 + filemode=filemode,
32432 + filemask=modemask,
32433 + onerror=_raise_exc,
32434 + ):
32435 + raise OperationNotPermitted(
32436 + _("Failed to apply recursive permissions for the portage group.")
32437 + )
32438
32439
32440 def _checksum_failure_temp_file(settings, distdir, basename):
32441 diff --cc lib/portage/process.py
32442 index d608e6237,84e09f8ec..5879694f9
32443 --- a/lib/portage/process.py
32444 +++ b/lib/portage/process.py
32445 @@@ -19,11 -19,13 +19,13 @@@ from portage import o
32446 from portage import _encodings
32447 from portage import _unicode_encode
32448 import portage
32449 - portage.proxy.lazyimport.lazyimport(globals(),
32450 - 'portage.util:dump_traceback,writemsg',
32451 +
32452 + portage.proxy.lazyimport.lazyimport(
32453 + globals(),
32454 + "portage.util:dump_traceback,writemsg",
32455 )
32456
32457 -from portage.const import BASH_BINARY, SANDBOX_BINARY, FAKEROOT_BINARY
32458 +from portage.const import BASH_BINARY, SANDBOX_BINARY, MACOSSANDBOX_BINARY, FAKEROOT_BINARY
32459 from portage.exception import CommandNotFound
32460 from portage.util._ctypes import find_library, LoadLibrary, ctypes
32461
32462 @@@ -55,165 -58,166 +58,181 @@@ except AttributeError
32463 # Prefer /proc/self/fd if available (/dev/fd
32464 # doesn't work on solaris, see bug #474536).
32465 for _fd_dir in ("/proc/self/fd", "/dev/fd"):
32466 - if os.path.isdir(_fd_dir):
32467 - break
32468 - else:
32469 - _fd_dir = None
32470 + if os.path.isdir(_fd_dir):
32471 + break
32472 + else:
32473 + _fd_dir = None
32474
32475 # /dev/fd does not work on FreeBSD, see bug #478446
32476 - if platform.system() in ('FreeBSD',) and _fd_dir == '/dev/fd':
32477 - _fd_dir = None
32478 + if platform.system() in ("FreeBSD",) and _fd_dir == "/dev/fd":
32479 + _fd_dir = None
32480
32481 if _fd_dir is not None:
32482 - def get_open_fds():
32483 - return (int(fd) for fd in os.listdir(_fd_dir) if fd.isdigit())
32484 -
32485 - if platform.python_implementation() == 'PyPy':
32486 - # EAGAIN observed with PyPy 1.8.
32487 - _get_open_fds = get_open_fds
32488 - def get_open_fds():
32489 - try:
32490 - return _get_open_fds()
32491 - except OSError as e:
32492 - if e.errno != errno.EAGAIN:
32493 - raise
32494 - return range(max_fd_limit)
32495 +
32496 + def get_open_fds():
32497 + return (int(fd) for fd in os.listdir(_fd_dir) if fd.isdigit())
32498 +
32499 + if platform.python_implementation() == "PyPy":
32500 + # EAGAIN observed with PyPy 1.8.
32501 + _get_open_fds = get_open_fds
32502 +
32503 + def get_open_fds():
32504 + try:
32505 + return _get_open_fds()
32506 + except OSError as e:
32507 + if e.errno != errno.EAGAIN:
32508 + raise
32509 + return range(max_fd_limit)
32510
32511 elif os.path.isdir("/proc/%s/fd" % portage.getpid()):
32512 - # In order for this function to work in forked subprocesses,
32513 - # os.getpid() must be called from inside the function.
32514 - def get_open_fds():
32515 - return (int(fd) for fd in os.listdir("/proc/%s/fd" % portage.getpid())
32516 - if fd.isdigit())
32517 + # In order for this function to work in forked subprocesses,
32518 + # os.getpid() must be called from inside the function.
32519 + def get_open_fds():
32520 + return (
32521 + int(fd)
32522 + for fd in os.listdir("/proc/%s/fd" % portage.getpid())
32523 + if fd.isdigit()
32524 + )
32525
32526 else:
32527 - def get_open_fds():
32528 - return range(max_fd_limit)
32529
32530 - sandbox_capable = (os.path.isfile(SANDBOX_BINARY) and
32531 - os.access(SANDBOX_BINARY, os.X_OK))
32532 + def get_open_fds():
32533 + return range(max_fd_limit)
32534
32535 - fakeroot_capable = (os.path.isfile(FAKEROOT_BINARY) and
32536 - os.access(FAKEROOT_BINARY, os.X_OK))
32537 +
32538 + sandbox_capable = os.path.isfile(SANDBOX_BINARY) and os.access(SANDBOX_BINARY, os.X_OK)
32539 +
32540 + fakeroot_capable = os.path.isfile(FAKEROOT_BINARY) and os.access(
32541 + FAKEROOT_BINARY, os.X_OK
32542 + )
32543
32544 +macossandbox_capable = (os.path.isfile(MACOSSANDBOX_BINARY) and
32545 + os.access(MACOSSANDBOX_BINARY, os.X_OK))
32546
32547 def sanitize_fds():
32548 - """
32549 - Set the inheritable flag to False for all open file descriptors,
32550 - except for those corresponding to stdin, stdout, and stderr. This
32551 - ensures that any unintentionally inherited file descriptors will
32552 - not be inherited by child processes.
32553 - """
32554 - if _set_inheritable is not None:
32555 -
32556 - whitelist = frozenset([
32557 - portage._get_stdin().fileno(),
32558 - sys.__stdout__.fileno(),
32559 - sys.__stderr__.fileno(),
32560 - ])
32561 -
32562 - for fd in get_open_fds():
32563 - if fd not in whitelist:
32564 - try:
32565 - _set_inheritable(fd, False)
32566 - except OSError:
32567 - pass
32568 + """
32569 + Set the inheritable flag to False for all open file descriptors,
32570 + except for those corresponding to stdin, stdout, and stderr. This
32571 + ensures that any unintentionally inherited file descriptors will
32572 + not be inherited by child processes.
32573 + """
32574 + if _set_inheritable is not None:
32575 +
32576 + whitelist = frozenset(
32577 + [
32578 + portage._get_stdin().fileno(),
32579 + sys.__stdout__.fileno(),
32580 + sys.__stderr__.fileno(),
32581 + ]
32582 + )
32583 +
32584 + for fd in get_open_fds():
32585 + if fd not in whitelist:
32586 + try:
32587 + _set_inheritable(fd, False)
32588 + except OSError:
32589 + pass
32590
32591
32592 def spawn_bash(mycommand, debug=False, opt_name=None, **keywords):
32593 - """
32594 - Spawns a bash shell running a specific commands
32595 -
32596 - @param mycommand: The command for bash to run
32597 - @type mycommand: String
32598 - @param debug: Turn bash debugging on (set -x)
32599 - @type debug: Boolean
32600 - @param opt_name: Name of the spawned process (detaults to binary name)
32601 - @type opt_name: String
32602 - @param keywords: Extra Dictionary arguments to pass to spawn
32603 - @type keywords: Dictionary
32604 - """
32605 -
32606 - args = [BASH_BINARY]
32607 - if not opt_name:
32608 - opt_name = os.path.basename(mycommand.split()[0])
32609 - if debug:
32610 - # Print commands and their arguments as they are executed.
32611 - args.append("-x")
32612 - args.append("-c")
32613 - args.append(mycommand)
32614 - return spawn(args, opt_name=opt_name, **keywords)
32615 + """
32616 + Spawns a bash shell running a specific commands
32617 +
32618 + @param mycommand: The command for bash to run
32619 + @type mycommand: String
32620 + @param debug: Turn bash debugging on (set -x)
32621 + @type debug: Boolean
32622 + @param opt_name: Name of the spawned process (detaults to binary name)
32623 + @type opt_name: String
32624 + @param keywords: Extra Dictionary arguments to pass to spawn
32625 + @type keywords: Dictionary
32626 + """
32627 +
32628 + args = [BASH_BINARY]
32629 + if not opt_name:
32630 + opt_name = os.path.basename(mycommand.split()[0])
32631 + if debug:
32632 + # Print commands and their arguments as they are executed.
32633 + args.append("-x")
32634 + args.append("-c")
32635 + args.append(mycommand)
32636 + return spawn(args, opt_name=opt_name, **keywords)
32637 +
32638
32639 def spawn_sandbox(mycommand, opt_name=None, **keywords):
32640 - if not sandbox_capable:
32641 - return spawn_bash(mycommand, opt_name=opt_name, **keywords)
32642 - args = [SANDBOX_BINARY]
32643 - if not opt_name:
32644 - opt_name = os.path.basename(mycommand.split()[0])
32645 - args.append(mycommand)
32646 - return spawn(args, opt_name=opt_name, **keywords)
32647 + if not sandbox_capable:
32648 + return spawn_bash(mycommand, opt_name=opt_name, **keywords)
32649 + args = [SANDBOX_BINARY]
32650 + if not opt_name:
32651 + opt_name = os.path.basename(mycommand.split()[0])
32652 + args.append(mycommand)
32653 + return spawn(args, opt_name=opt_name, **keywords)
32654 +
32655
32656 def spawn_fakeroot(mycommand, fakeroot_state=None, opt_name=None, **keywords):
32657 - args = [FAKEROOT_BINARY]
32658 - if not opt_name:
32659 - opt_name = os.path.basename(mycommand.split()[0])
32660 - if fakeroot_state:
32661 - open(fakeroot_state, "a").close()
32662 - args.append("-s")
32663 - args.append(fakeroot_state)
32664 - args.append("-i")
32665 - args.append(fakeroot_state)
32666 - args.append("--")
32667 - args.append(BASH_BINARY)
32668 - args.append("-c")
32669 - args.append(mycommand)
32670 - return spawn(args, opt_name=opt_name, **keywords)
32671 + args = [FAKEROOT_BINARY]
32672 + if not opt_name:
32673 + opt_name = os.path.basename(mycommand.split()[0])
32674 + if fakeroot_state:
32675 + open(fakeroot_state, "a").close()
32676 + args.append("-s")
32677 + args.append(fakeroot_state)
32678 + args.append("-i")
32679 + args.append(fakeroot_state)
32680 + args.append("--")
32681 + args.append(BASH_BINARY)
32682 + args.append("-c")
32683 + args.append(mycommand)
32684 + return spawn(args, opt_name=opt_name, **keywords)
32685 +
32686
32687 +def spawn_macossandbox(mycommand, profile=None, opt_name=None, **keywords):
32688 + if not macossandbox_capable:
32689 + return spawn_bash(mycommand, opt_name=opt_name, **keywords)
32690 + args=[MACOSSANDBOX_BINARY]
32691 + if not opt_name:
32692 + opt_name = os.path.basename(mycommand.split()[0])
32693 + args.append("-p")
32694 + args.append(profile)
32695 + args.append(BASH_BINARY)
32696 + args.append("-c")
32697 + args.append(mycommand)
32698 + return spawn(args, opt_name=opt_name, **keywords)
32699 +
32700 _exithandlers = []
32701 +
32702 +
32703 def atexit_register(func, *args, **kargs):
32704 - """Wrapper around atexit.register that is needed in order to track
32705 - what is registered. For example, when portage restarts itself via
32706 - os.execv, the atexit module does not work so we have to do it
32707 - manually by calling the run_exitfuncs() function in this module."""
32708 - _exithandlers.append((func, args, kargs))
32709 + """Wrapper around atexit.register that is needed in order to track
32710 + what is registered. For example, when portage restarts itself via
32711 + os.execv, the atexit module does not work so we have to do it
32712 + manually by calling the run_exitfuncs() function in this module."""
32713 + _exithandlers.append((func, args, kargs))
32714 +
32715
32716 def run_exitfuncs():
32717 - """This should behave identically to the routine performed by
32718 - the atexit module at exit time. It's only necessary to call this
32719 - function when atexit will not work (because of os.execv, for
32720 - example)."""
32721 -
32722 - # This function is a copy of the private atexit._run_exitfuncs()
32723 - # from the python 2.4.2 sources. The only difference from the
32724 - # original function is in the output to stderr.
32725 - exc_info = None
32726 - while _exithandlers:
32727 - func, targs, kargs = _exithandlers.pop()
32728 - try:
32729 - func(*targs, **kargs)
32730 - except SystemExit:
32731 - exc_info = sys.exc_info()
32732 - except: # No idea what they called, so we need this broad except here.
32733 - dump_traceback("Error in portage.process.run_exitfuncs", noiselevel=0)
32734 - exc_info = sys.exc_info()
32735 -
32736 - if exc_info is not None:
32737 - raise exc_info[0](exc_info[1]).with_traceback(exc_info[2])
32738 + """This should behave identically to the routine performed by
32739 + the atexit module at exit time. It's only necessary to call this
32740 + function when atexit will not work (because of os.execv, for
32741 + example)."""
32742 +
32743 + # This function is a copy of the private atexit._run_exitfuncs()
32744 + # from the python 2.4.2 sources. The only difference from the
32745 + # original function is in the output to stderr.
32746 + exc_info = None
32747 + while _exithandlers:
32748 + func, targs, kargs = _exithandlers.pop()
32749 + try:
32750 + func(*targs, **kargs)
32751 + except SystemExit:
32752 + exc_info = sys.exc_info()
32753 + except: # No idea what they called, so we need this broad except here.
32754 + dump_traceback("Error in portage.process.run_exitfuncs", noiselevel=0)
32755 + exc_info = sys.exc_info()
32756 +
32757 + if exc_info is not None:
32758 + raise exc_info[0](exc_info[1]).with_traceback(exc_info[2])
32759 +
32760
32761 atexit.register(run_exitfuncs)
32762
32763 diff --cc lib/portage/tests/lazyimport/test_lazy_import_portage_baseline.py
32764 index 2bc54698a,cf239240c..1115b18d7
32765 --- a/lib/portage/tests/lazyimport/test_lazy_import_portage_baseline.py
32766 +++ b/lib/portage/tests/lazyimport/test_lazy_import_portage_baseline.py
32767 @@@ -11,19 -11,26 +11,28 @@@ from portage.util._eventloop.global_eve
32768 from _emerge.PipeReader import PipeReader
32769 from _emerge.SpawnProcess import SpawnProcess
32770
32771 +
32772 class LazyImportPortageBaselineTestCase(TestCase):
32773
32774 - _module_re = re.compile(r'^(portage|repoman|_emerge)\.')
32775 + _module_re = re.compile(r"^(portage|repoman|_emerge)\.")
32776
32777 - _baseline_imports = frozenset([
32778 - 'portage.const', 'portage.localization',
32779 - 'portage.proxy', 'portage.proxy.lazyimport',
32780 - 'portage.proxy.objectproxy',
32781 - 'portage._selinux',
32782 - 'portage.const_autotool',
32783 - ])
32784 + _baseline_imports = frozenset(
32785 + [
32786 + "portage.const",
32787 + "portage.localization",
32788 + "portage.proxy",
32789 + "portage.proxy.lazyimport",
32790 + "portage.proxy.objectproxy",
32791 + "portage._selinux",
32792 ++ # PREFIX LOCAL
32793 ++ 'portage.const_autotool',
32794 + ]
32795 + )
32796
32797 - _baseline_import_cmd = [portage._python_interpreter, '-c', '''
32798 + _baseline_import_cmd = [
32799 + portage._python_interpreter,
32800 + "-c",
32801 + """
32802 import os
32803 import sys
32804 sys.path.insert(0, os.environ["PORTAGE_PYM_PATH"])
32805 diff --cc lib/portage/tests/resolver/ResolverPlayground.py
32806 index 67267a5cd,fdd0714e6..969d8f2fb
32807 --- a/lib/portage/tests/resolver/ResolverPlayground.py
32808 +++ b/lib/portage/tests/resolver/ResolverPlayground.py
32809 @@@ -72,616 -88,669 +88,671 @@@ class ResolverPlayground
32810 </pkgmetadata>
32811 """
32812
32813 - portage_bin = (
32814 - 'ebuild',
32815 - 'egencache',
32816 - 'emerge',
32817 - 'emerge-webrsync',
32818 - 'emirrordist',
32819 - 'glsa-check',
32820 - 'portageq',
32821 - 'quickpkg',
32822 - )
32823 -
32824 - portage_sbin = (
32825 - 'archive-conf',
32826 - 'dispatch-conf',
32827 - 'emaint',
32828 - 'env-update',
32829 - 'etc-update',
32830 - 'fixpackages',
32831 - 'regenworld',
32832 - )
32833 -
32834 - def __init__(self, ebuilds={}, binpkgs={}, installed={}, profile={}, repo_configs={}, \
32835 - user_config={}, sets={}, world=[], world_sets=[], distfiles={}, eclasses={},
32836 - eprefix=None, targetroot=False, debug=False):
32837 - """
32838 - ebuilds: cpv -> metadata mapping simulating available ebuilds.
32839 - installed: cpv -> metadata mapping simulating installed packages.
32840 - If a metadata key is missing, it gets a default value.
32841 - profile: settings defined by the profile.
32842 - """
32843 -
32844 - self.debug = debug
32845 - if eprefix is None:
32846 - self.eprefix = normalize_path(tempfile.mkdtemp())
32847 -
32848 - # EPREFIX/bin is used by fake true_binaries. Real binaries goes into EPREFIX/usr/bin
32849 - eubin = os.path.join(self.eprefix, "usr", "bin")
32850 - ensure_dirs(eubin)
32851 - for x in self.portage_bin:
32852 - os.symlink(os.path.join(PORTAGE_BIN_PATH, x), os.path.join(eubin, x))
32853 -
32854 - eusbin = os.path.join(self.eprefix, "usr", "sbin")
32855 - ensure_dirs(eusbin)
32856 - for x in self.portage_sbin:
32857 - os.symlink(os.path.join(PORTAGE_BIN_PATH, x), os.path.join(eusbin, x))
32858 -
32859 - essential_binaries = (
32860 - "awk",
32861 - "basename",
32862 - "bzip2",
32863 - "cat",
32864 - "chgrp",
32865 - "chmod",
32866 - "chown",
32867 - "comm",
32868 - "cp",
32869 - "egrep",
32870 - "env",
32871 - "find",
32872 - "grep",
32873 - "head",
32874 - "install",
32875 - "ln",
32876 - "mkdir",
32877 - "mkfifo",
32878 - "mktemp",
32879 - "mv",
32880 - "readlink",
32881 - "rm",
32882 - "sed",
32883 - "sort",
32884 - "tar",
32885 - "tr",
32886 - "uname",
32887 - "uniq",
32888 - "xargs",
32889 - "zstd",
32890 - )
32891 - # Exclude internal wrappers from PATH lookup.
32892 - orig_path = os.environ['PATH']
32893 - included_paths = []
32894 - for path in orig_path.split(':'):
32895 - if path and not fnmatch.fnmatch(path, '*/portage/*/ebuild-helpers*'):
32896 - included_paths.append(path)
32897 - try:
32898 - os.environ['PATH'] = ':'.join(included_paths)
32899 - for x in essential_binaries:
32900 - path = find_binary(x)
32901 - if path is None:
32902 - raise portage.exception.CommandNotFound(x)
32903 - os.symlink(path, os.path.join(eubin, x))
32904 - finally:
32905 - os.environ['PATH'] = orig_path
32906 - else:
32907 - self.eprefix = normalize_path(eprefix)
32908 -
32909 - # Tests may override portage.const.EPREFIX in order to
32910 - # simulate a prefix installation. It's reasonable to do
32911 - # this because tests should be self-contained such that
32912 - # the "real" value of portage.const.EPREFIX is entirely
32913 - # irrelevant (see bug #492932).
32914 - self._orig_eprefix = portage.const.EPREFIX
32915 - portage.const.EPREFIX = self.eprefix.rstrip(os.sep)
32916 -
32917 - self.eroot = self.eprefix + os.sep
32918 - if targetroot:
32919 - self.target_root = os.path.join(self.eroot, 'target_root')
32920 - else:
32921 - self.target_root = os.sep
32922 - self.distdir = os.path.join(self.eroot, "var", "portage", "distfiles")
32923 - self.pkgdir = os.path.join(self.eprefix, "pkgdir")
32924 - self.vdbdir = os.path.join(self.eroot, "var/db/pkg")
32925 - os.makedirs(self.vdbdir)
32926 -
32927 - if not debug:
32928 - portage.util.noiselimit = -2
32929 -
32930 - self._repositories = {}
32931 - #Make sure the main repo is always created
32932 - self._get_repo_dir("test_repo")
32933 -
32934 - self._create_distfiles(distfiles)
32935 - self._create_ebuilds(ebuilds)
32936 - self._create_binpkgs(binpkgs)
32937 - self._create_installed(installed)
32938 - self._create_profile(ebuilds, eclasses, installed, profile, repo_configs, user_config, sets)
32939 - self._create_world(world, world_sets)
32940 -
32941 - self.settings, self.trees = self._load_config()
32942 -
32943 - self._create_ebuild_manifests(ebuilds)
32944 -
32945 - portage.util.noiselimit = 0
32946 -
32947 - def reload_config(self):
32948 - """
32949 - Reload configuration from disk, which is useful if it has
32950 - been modified after the constructor has been called.
32951 - """
32952 - for eroot in self.trees:
32953 - portdb = self.trees[eroot]["porttree"].dbapi
32954 - portdb.close_caches()
32955 - self.settings, self.trees = self._load_config()
32956 -
32957 - def _get_repo_dir(self, repo):
32958 - """
32959 - Create the repo directory if needed.
32960 - """
32961 - if repo not in self._repositories:
32962 - if repo == "test_repo":
32963 - self._repositories["DEFAULT"] = {"main-repo": repo}
32964 -
32965 - repo_path = os.path.join(self.eroot, "var", "repositories", repo)
32966 - self._repositories[repo] = {"location": repo_path}
32967 - profile_path = os.path.join(repo_path, "profiles")
32968 -
32969 - try:
32970 - os.makedirs(profile_path)
32971 - except os.error:
32972 - pass
32973 -
32974 - repo_name_file = os.path.join(profile_path, "repo_name")
32975 - with open(repo_name_file, "w") as f:
32976 - f.write("%s\n" % repo)
32977 -
32978 - return self._repositories[repo]["location"]
32979 -
32980 - def _create_distfiles(self, distfiles):
32981 - os.makedirs(self.distdir)
32982 - for k, v in distfiles.items():
32983 - with open(os.path.join(self.distdir, k), 'wb') as f:
32984 - f.write(v)
32985 -
32986 - def _create_ebuilds(self, ebuilds):
32987 - for cpv in ebuilds:
32988 - a = Atom("=" + cpv, allow_repo=True)
32989 - repo = a.repo
32990 - if repo is None:
32991 - repo = "test_repo"
32992 -
32993 - metadata = ebuilds[cpv].copy()
32994 - copyright_header = metadata.pop("COPYRIGHT_HEADER", None)
32995 - eapi = metadata.pop("EAPI", "0")
32996 - misc_content = metadata.pop("MISC_CONTENT", None)
32997 - metadata.setdefault("DEPEND", "")
32998 - metadata.setdefault("SLOT", "0")
32999 - metadata.setdefault("KEYWORDS", "x86")
33000 - metadata.setdefault("IUSE", "")
33001 -
33002 - unknown_keys = set(metadata).difference(
33003 - portage.dbapi.dbapi._known_keys)
33004 - if unknown_keys:
33005 - raise ValueError("metadata of ebuild '%s' contains unknown keys: %s" %
33006 - (cpv, sorted(unknown_keys)))
33007 -
33008 - repo_dir = self._get_repo_dir(repo)
33009 - ebuild_dir = os.path.join(repo_dir, a.cp)
33010 - ebuild_path = os.path.join(ebuild_dir, a.cpv.split("/")[1] + ".ebuild")
33011 - try:
33012 - os.makedirs(ebuild_dir)
33013 - except os.error:
33014 - pass
33015 -
33016 - with open(ebuild_path, "w") as f:
33017 - if copyright_header is not None:
33018 - f.write(copyright_header)
33019 - f.write('EAPI="%s"\n' % eapi)
33020 - for k, v in metadata.items():
33021 - f.write('%s="%s"\n' % (k, v))
33022 - if misc_content is not None:
33023 - f.write(misc_content)
33024 -
33025 - def _create_ebuild_manifests(self, ebuilds):
33026 - tmpsettings = config(clone=self.settings)
33027 - tmpsettings['PORTAGE_QUIET'] = '1'
33028 - for cpv in ebuilds:
33029 - a = Atom("=" + cpv, allow_repo=True)
33030 - repo = a.repo
33031 - if repo is None:
33032 - repo = "test_repo"
33033 -
33034 - repo_dir = self._get_repo_dir(repo)
33035 - ebuild_dir = os.path.join(repo_dir, a.cp)
33036 - ebuild_path = os.path.join(ebuild_dir, a.cpv.split("/")[1] + ".ebuild")
33037 -
33038 - portdb = self.trees[self.eroot]["porttree"].dbapi
33039 - tmpsettings['O'] = ebuild_dir
33040 - if not digestgen(mysettings=tmpsettings, myportdb=portdb):
33041 - raise AssertionError('digest creation failed for %s' % ebuild_path)
33042 -
33043 - def _create_binpkgs(self, binpkgs):
33044 - # When using BUILD_ID, there can be mutiple instances for the
33045 - # same cpv. Therefore, binpkgs may be an iterable instead of
33046 - # a dict.
33047 - items = getattr(binpkgs, 'items', None)
33048 - items = items() if items is not None else binpkgs
33049 - for cpv, metadata in items:
33050 - a = Atom("=" + cpv, allow_repo=True)
33051 - repo = a.repo
33052 - if repo is None:
33053 - repo = "test_repo"
33054 -
33055 - pn = catsplit(a.cp)[1]
33056 - cat, pf = catsplit(a.cpv)
33057 - metadata = metadata.copy()
33058 - metadata.setdefault("SLOT", "0")
33059 - metadata.setdefault("KEYWORDS", "x86")
33060 - metadata.setdefault("BUILD_TIME", "0")
33061 - metadata["repository"] = repo
33062 - metadata["CATEGORY"] = cat
33063 - metadata["PF"] = pf
33064 - metadata["EPREFIX"] = self.eprefix
33065 -
33066 - repo_dir = self.pkgdir
33067 - category_dir = os.path.join(repo_dir, cat)
33068 - if "BUILD_ID" in metadata:
33069 - binpkg_path = os.path.join(category_dir, pn,
33070 - "%s-%s.xpak"% (pf, metadata["BUILD_ID"]))
33071 - else:
33072 - binpkg_path = os.path.join(category_dir, pf + ".tbz2")
33073 -
33074 - ensure_dirs(os.path.dirname(binpkg_path))
33075 - t = portage.xpak.tbz2(binpkg_path)
33076 - t.recompose_mem(portage.xpak.xpak_mem(metadata))
33077 -
33078 - def _create_installed(self, installed):
33079 - for cpv in installed:
33080 - a = Atom("=" + cpv, allow_repo=True)
33081 - repo = a.repo
33082 - if repo is None:
33083 - repo = "test_repo"
33084 -
33085 - vdb_pkg_dir = os.path.join(self.vdbdir, a.cpv)
33086 - try:
33087 - os.makedirs(vdb_pkg_dir)
33088 - except os.error:
33089 - pass
33090 -
33091 - metadata = installed[cpv].copy()
33092 - metadata.setdefault("SLOT", "0")
33093 - metadata.setdefault("BUILD_TIME", "0")
33094 - metadata.setdefault("COUNTER", "0")
33095 - metadata.setdefault("KEYWORDS", "~x86")
33096 -
33097 - unknown_keys = set(metadata).difference(
33098 - portage.dbapi.dbapi._known_keys)
33099 - unknown_keys.discard("BUILD_TIME")
33100 - unknown_keys.discard("BUILD_ID")
33101 - unknown_keys.discard("COUNTER")
33102 - unknown_keys.discard("repository")
33103 - unknown_keys.discard("USE")
33104 - unknown_keys.discard("PROVIDES")
33105 - unknown_keys.discard("REQUIRES")
33106 - if unknown_keys:
33107 - raise ValueError("metadata of installed '%s' contains unknown keys: %s" %
33108 - (cpv, sorted(unknown_keys)))
33109 -
33110 - metadata["repository"] = repo
33111 - for k, v in metadata.items():
33112 - with open(os.path.join(vdb_pkg_dir, k), "w") as f:
33113 - f.write("%s\n" % v)
33114 -
33115 - ebuild_path = os.path.join(vdb_pkg_dir, a.cpv.split("/")[1] + ".ebuild")
33116 - with open(ebuild_path, "w") as f:
33117 - f.write('EAPI="%s"\n' % metadata.pop('EAPI', '0'))
33118 - for k, v in metadata.items():
33119 - f.write('%s="%s"\n' % (k, v))
33120 -
33121 - env_path = os.path.join(vdb_pkg_dir, 'environment.bz2')
33122 - with bz2.BZ2File(env_path, mode='w') as f:
33123 - with open(ebuild_path, 'rb') as inputfile:
33124 - f.write(inputfile.read())
33125 -
33126 - def _create_profile(self, ebuilds, eclasses, installed, profile, repo_configs, user_config, sets):
33127 -
33128 - user_config_dir = os.path.join(self.eroot, USER_CONFIG_PATH)
33129 -
33130 - try:
33131 - os.makedirs(user_config_dir)
33132 - except os.error:
33133 - pass
33134 -
33135 - for repo in self._repositories:
33136 - if repo == "DEFAULT":
33137 - continue
33138 -
33139 - repo_dir = self._get_repo_dir(repo)
33140 - profile_dir = os.path.join(repo_dir, "profiles")
33141 - metadata_dir = os.path.join(repo_dir, "metadata")
33142 - os.makedirs(metadata_dir)
33143 -
33144 - #Create $REPO/profiles/categories
33145 - categories = set()
33146 - for cpv in ebuilds:
33147 - ebuilds_repo = Atom("="+cpv, allow_repo=True).repo
33148 - if ebuilds_repo is None:
33149 - ebuilds_repo = "test_repo"
33150 - if ebuilds_repo == repo:
33151 - categories.add(catsplit(cpv)[0])
33152 -
33153 - categories_file = os.path.join(profile_dir, "categories")
33154 - with open(categories_file, "w") as f:
33155 - for cat in categories:
33156 - f.write(cat + "\n")
33157 -
33158 - #Create $REPO/profiles/license_groups
33159 - license_file = os.path.join(profile_dir, "license_groups")
33160 - with open(license_file, "w") as f:
33161 - f.write("EULA TEST\n")
33162 -
33163 - repo_config = repo_configs.get(repo)
33164 - if repo_config:
33165 - for config_file, lines in repo_config.items():
33166 - if config_file not in self.config_files and not any(fnmatch.fnmatch(config_file, os.path.join(x, "*")) for x in self.config_files):
33167 - raise ValueError("Unknown config file: '%s'" % config_file)
33168 -
33169 - if config_file in ("layout.conf",):
33170 - file_name = os.path.join(repo_dir, "metadata", config_file)
33171 - else:
33172 - file_name = os.path.join(profile_dir, config_file)
33173 - if "/" in config_file and not os.path.isdir(os.path.dirname(file_name)):
33174 - os.makedirs(os.path.dirname(file_name))
33175 - with open(file_name, "w") as f:
33176 - for line in lines:
33177 - f.write("%s\n" % line)
33178 - # Temporarily write empty value of masters until it becomes default.
33179 - # TODO: Delete all references to "# use implicit masters" when empty value becomes default.
33180 - if config_file == "layout.conf" and not any(line.startswith(("masters =", "# use implicit masters")) for line in lines):
33181 - f.write("masters =\n")
33182 -
33183 - #Create $profile_dir/eclass (we fail to digest the ebuilds if it's not there)
33184 - eclass_dir = os.path.join(repo_dir, "eclass")
33185 - os.makedirs(eclass_dir)
33186 -
33187 - for eclass_name, eclass_content in eclasses.items():
33188 - with open(os.path.join(eclass_dir, "{}.eclass".format(eclass_name)), 'wt') as f:
33189 - if isinstance(eclass_content, str):
33190 - eclass_content = [eclass_content]
33191 - for line in eclass_content:
33192 - f.write("{}\n".format(line))
33193 -
33194 - # Temporarily write empty value of masters until it becomes default.
33195 - if not repo_config or "layout.conf" not in repo_config:
33196 - layout_conf_path = os.path.join(repo_dir, "metadata", "layout.conf")
33197 - with open(layout_conf_path, "w") as f:
33198 - f.write("masters =\n")
33199 -
33200 - if repo == "test_repo":
33201 - #Create a minimal profile in /var/db/repos/gentoo
33202 - sub_profile_dir = os.path.join(profile_dir, "default", "linux", "x86", "test_profile")
33203 - os.makedirs(sub_profile_dir)
33204 -
33205 - if not (profile and "eapi" in profile):
33206 - eapi_file = os.path.join(sub_profile_dir, "eapi")
33207 - with open(eapi_file, "w") as f:
33208 - f.write("0\n")
33209 -
33210 - make_defaults_file = os.path.join(sub_profile_dir, "make.defaults")
33211 - with open(make_defaults_file, "w") as f:
33212 - f.write("ARCH=\"x86\"\n")
33213 - f.write("ACCEPT_KEYWORDS=\"x86\"\n")
33214 -
33215 - use_force_file = os.path.join(sub_profile_dir, "use.force")
33216 - with open(use_force_file, "w") as f:
33217 - f.write("x86\n")
33218 -
33219 - parent_file = os.path.join(sub_profile_dir, "parent")
33220 - with open(parent_file, "w") as f:
33221 - f.write("..\n")
33222 -
33223 - if profile:
33224 - for config_file, lines in profile.items():
33225 - if config_file not in self.config_files:
33226 - raise ValueError("Unknown config file: '%s'" % config_file)
33227 -
33228 - file_name = os.path.join(sub_profile_dir, config_file)
33229 - with open(file_name, "w") as f:
33230 - for line in lines:
33231 - f.write("%s\n" % line)
33232 -
33233 - #Create profile symlink
33234 - os.symlink(sub_profile_dir, os.path.join(user_config_dir, "make.profile"))
33235 -
33236 - make_conf = {
33237 - "ACCEPT_KEYWORDS": "x86",
33238 - "CLEAN_DELAY": "0",
33239 - "DISTDIR" : self.distdir,
33240 - "EMERGE_WARNING_DELAY": "0",
33241 - "PKGDIR": self.pkgdir,
33242 - "PORTAGE_INST_GID": str(portage.data.portage_gid),
33243 - "PORTAGE_INST_UID": str(portage.data.portage_uid),
33244 - "PORTAGE_TMPDIR": os.path.join(self.eroot, 'var/tmp'),
33245 - }
33246 -
33247 - if os.environ.get("NOCOLOR"):
33248 - make_conf["NOCOLOR"] = os.environ["NOCOLOR"]
33249 -
33250 - # Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they
33251 - # need to be inherited by ebuild subprocesses.
33252 - if 'PORTAGE_USERNAME' in os.environ:
33253 - make_conf['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME']
33254 - if 'PORTAGE_GRPNAME' in os.environ:
33255 - make_conf['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME']
33256 -
33257 - make_conf_lines = []
33258 - for k_v in make_conf.items():
33259 - make_conf_lines.append('%s="%s"' % k_v)
33260 -
33261 - if "make.conf" in user_config:
33262 - make_conf_lines.extend(user_config["make.conf"])
33263 -
33264 - if not portage.process.sandbox_capable or \
33265 - os.environ.get("SANDBOX_ON") == "1":
33266 - # avoid problems from nested sandbox instances
33267 - make_conf_lines.append('FEATURES="${FEATURES} -sandbox -usersandbox"')
33268 -
33269 - configs = user_config.copy()
33270 - configs["make.conf"] = make_conf_lines
33271 -
33272 - for config_file, lines in configs.items():
33273 - if config_file not in self.config_files:
33274 - raise ValueError("Unknown config file: '%s'" % config_file)
33275 -
33276 - file_name = os.path.join(user_config_dir, config_file)
33277 - with open(file_name, "w") as f:
33278 - for line in lines:
33279 - f.write("%s\n" % line)
33280 -
33281 - #Create /usr/share/portage/config/make.globals
33282 - make_globals_path = os.path.join(self.eroot,
33283 - GLOBAL_CONFIG_PATH.lstrip(os.sep), "make.globals")
33284 - ensure_dirs(os.path.dirname(make_globals_path))
33285 - os.symlink(os.path.join(cnf_path, "make.globals"),
33286 - make_globals_path)
33287 -
33288 - #Create /usr/share/portage/config/sets/portage.conf
33289 - default_sets_conf_dir = os.path.join(self.eroot, "usr/share/portage/config/sets")
33290 -
33291 - try:
33292 - os.makedirs(default_sets_conf_dir)
33293 - except os.error:
33294 - pass
33295 -
33296 - provided_sets_portage_conf = (
33297 - os.path.join(cnf_path, "sets", "portage.conf"))
33298 - os.symlink(provided_sets_portage_conf, os.path.join(default_sets_conf_dir, "portage.conf"))
33299 -
33300 - set_config_dir = os.path.join(user_config_dir, "sets")
33301 -
33302 - try:
33303 - os.makedirs(set_config_dir)
33304 - except os.error:
33305 - pass
33306 -
33307 - for sets_file, lines in sets.items():
33308 - file_name = os.path.join(set_config_dir, sets_file)
33309 - with open(file_name, "w") as f:
33310 - for line in lines:
33311 - f.write("%s\n" % line)
33312 -
33313 - if cnf_path_repoman is not None:
33314 - #Create /usr/share/repoman
33315 - repoman_share_dir = os.path.join(self.eroot, 'usr', 'share', 'repoman')
33316 - os.symlink(cnf_path_repoman, repoman_share_dir)
33317 -
33318 - def _create_world(self, world, world_sets):
33319 - #Create /var/lib/portage/world
33320 - var_lib_portage = os.path.join(self.eroot, "var", "lib", "portage")
33321 - os.makedirs(var_lib_portage)
33322 -
33323 - world_file = os.path.join(var_lib_portage, "world")
33324 - world_set_file = os.path.join(var_lib_portage, "world_sets")
33325 -
33326 - with open(world_file, "w") as f:
33327 - for atom in world:
33328 - f.write("%s\n" % atom)
33329 -
33330 - with open(world_set_file, "w") as f:
33331 - for atom in world_sets:
33332 - f.write("%s\n" % atom)
33333 -
33334 - def _load_config(self):
33335 -
33336 - create_trees_kwargs = {}
33337 - if self.target_root != os.sep:
33338 - create_trees_kwargs["target_root"] = self.target_root
33339 -
33340 - env = {
33341 - "PORTAGE_REPOSITORIES": "\n".join("[%s]\n%s" % (repo_name, "\n".join("%s = %s" % (k, v) for k, v in repo_config.items())) for repo_name, repo_config in self._repositories.items())
33342 - }
33343 -
33344 - if self.debug:
33345 - env["PORTAGE_DEBUG"] = "1"
33346 -
33347 - trees = portage.create_trees(env=env, eprefix=self.eprefix,
33348 - **create_trees_kwargs)
33349 -
33350 - for root, root_trees in trees.items():
33351 - settings = root_trees["vartree"].settings
33352 - settings._init_dirs()
33353 - setconfig = load_default_config(settings, root_trees)
33354 - root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
33355 -
33356 - return trees[trees._target_eroot]["vartree"].settings, trees
33357 -
33358 - def run(self, atoms, options={}, action=None):
33359 - options = options.copy()
33360 - options["--pretend"] = True
33361 - if self.debug:
33362 - options["--debug"] = True
33363 -
33364 - if action is None:
33365 - if options.get("--depclean"):
33366 - action = "depclean"
33367 - elif options.get("--prune"):
33368 - action = "prune"
33369 -
33370 - if "--usepkgonly" in options:
33371 - options["--usepkg"] = True
33372 -
33373 - global_noiselimit = portage.util.noiselimit
33374 - global_emergelog_disable = _emerge.emergelog._disable
33375 - try:
33376 -
33377 - if not self.debug:
33378 - portage.util.noiselimit = -2
33379 - _emerge.emergelog._disable = True
33380 -
33381 - if action in ("depclean", "prune"):
33382 - depclean_result = _calc_depclean(self.settings, self.trees, None,
33383 - options, action, InternalPackageSet(initial_atoms=atoms, allow_wildcard=True), None)
33384 - result = ResolverPlaygroundDepcleanResult(
33385 - atoms,
33386 - depclean_result.returncode,
33387 - depclean_result.cleanlist,
33388 - depclean_result.ordered,
33389 - depclean_result.req_pkg_count,
33390 - depclean_result.depgraph,
33391 - )
33392 - else:
33393 - params = create_depgraph_params(options, action)
33394 - success, depgraph, favorites = backtrack_depgraph(
33395 - self.settings, self.trees, options, params, action, atoms, None)
33396 - depgraph._show_merge_list()
33397 - depgraph.display_problems()
33398 - result = ResolverPlaygroundResult(atoms, success, depgraph, favorites)
33399 - finally:
33400 - portage.util.noiselimit = global_noiselimit
33401 - _emerge.emergelog._disable = global_emergelog_disable
33402 -
33403 - return result
33404 -
33405 - def run_TestCase(self, test_case):
33406 - if not isinstance(test_case, ResolverPlaygroundTestCase):
33407 - raise TypeError("ResolverPlayground needs a ResolverPlaygroundTestCase")
33408 - for atoms in test_case.requests:
33409 - result = self.run(atoms, test_case.options, test_case.action)
33410 - if not test_case.compare_with_result(result):
33411 - return
33412 -
33413 - def cleanup(self):
33414 - for eroot in self.trees:
33415 - portdb = self.trees[eroot]["porttree"].dbapi
33416 - portdb.close_caches()
33417 - if self.debug:
33418 - print("\nEROOT=%s" % self.eroot)
33419 - else:
33420 - shutil.rmtree(self.eroot)
33421 - if hasattr(self, '_orig_eprefix'):
33422 - portage.const.EPREFIX = self._orig_eprefix
33423 + portage_bin = (
33424 + "ebuild",
33425 + "egencache",
33426 + "emerge",
33427 + "emerge-webrsync",
33428 + "emirrordist",
33429 + "glsa-check",
33430 + "portageq",
33431 + "quickpkg",
33432 + )
33433 +
33434 + portage_sbin = (
33435 + "archive-conf",
33436 + "dispatch-conf",
33437 + "emaint",
33438 + "env-update",
33439 + "etc-update",
33440 + "fixpackages",
33441 + "regenworld",
33442 + )
33443 +
33444 + def __init__(
33445 + self,
33446 + ebuilds={},
33447 + binpkgs={},
33448 + installed={},
33449 + profile={},
33450 + repo_configs={},
33451 + user_config={},
33452 + sets={},
33453 + world=[],
33454 + world_sets=[],
33455 + distfiles={},
33456 + eclasses={},
33457 + eprefix=None,
33458 + targetroot=False,
33459 + debug=False,
33460 + ):
33461 + """
33462 + ebuilds: cpv -> metadata mapping simulating available ebuilds.
33463 + installed: cpv -> metadata mapping simulating installed packages.
33464 + If a metadata key is missing, it gets a default value.
33465 + profile: settings defined by the profile.
33466 + """
33467 +
33468 + self.debug = debug
33469 + if eprefix is None:
33470 + self.eprefix = normalize_path(tempfile.mkdtemp())
33471 +
33472 + # EPREFIX/bin is used by fake true_binaries. Real binaries goes into EPREFIX/usr/bin
33473 + eubin = os.path.join(self.eprefix, "usr", "bin")
33474 + ensure_dirs(eubin)
33475 + for x in self.portage_bin:
33476 + os.symlink(os.path.join(PORTAGE_BIN_PATH, x), os.path.join(eubin, x))
33477 +
33478 + eusbin = os.path.join(self.eprefix, "usr", "sbin")
33479 + ensure_dirs(eusbin)
33480 + for x in self.portage_sbin:
33481 + os.symlink(os.path.join(PORTAGE_BIN_PATH, x), os.path.join(eusbin, x))
33482 +
33483 + essential_binaries = (
33484 + "awk",
33485 + "basename",
33486 + "bzip2",
33487 + "cat",
33488 + "chgrp",
33489 + "chmod",
33490 + "chown",
33491 + "comm",
33492 + "cp",
33493 + "egrep",
33494 + "env",
33495 + "find",
33496 + "grep",
33497 + "head",
33498 + "install",
33499 + "ln",
33500 + "mkdir",
33501 + "mkfifo",
33502 + "mktemp",
33503 + "mv",
33504 + "readlink",
33505 + "rm",
33506 + "sed",
33507 + "sort",
33508 + "tar",
33509 + "tr",
33510 + "uname",
33511 + "uniq",
33512 + "xargs",
33513 + "zstd",
33514 + )
33515 + # Exclude internal wrappers from PATH lookup.
33516 + orig_path = os.environ["PATH"]
33517 + included_paths = []
33518 + for path in orig_path.split(":"):
33519 + if path and not fnmatch.fnmatch(path, "*/portage/*/ebuild-helpers*"):
33520 + included_paths.append(path)
33521 + try:
33522 + os.environ["PATH"] = ":".join(included_paths)
33523 + for x in essential_binaries:
33524 + path = find_binary(x)
33525 + if path is None:
33526 + raise portage.exception.CommandNotFound(x)
33527 + os.symlink(path, os.path.join(eubin, x))
33528 + finally:
33529 + os.environ["PATH"] = orig_path
33530 + else:
33531 + self.eprefix = normalize_path(eprefix)
33532 +
33533 + # Tests may override portage.const.EPREFIX in order to
33534 + # simulate a prefix installation. It's reasonable to do
33535 + # this because tests should be self-contained such that
33536 + # the "real" value of portage.const.EPREFIX is entirely
33537 + # irrelevant (see bug #492932).
33538 + self._orig_eprefix = portage.const.EPREFIX
33539 + portage.const.EPREFIX = self.eprefix.rstrip(os.sep)
33540 +
33541 + self.eroot = self.eprefix + os.sep
33542 + if targetroot:
33543 + self.target_root = os.path.join(self.eroot, "target_root")
33544 + else:
33545 + self.target_root = os.sep
33546 + self.distdir = os.path.join(self.eroot, "var", "portage", "distfiles")
33547 + self.pkgdir = os.path.join(self.eprefix, "pkgdir")
33548 + self.vdbdir = os.path.join(self.eroot, "var/db/pkg")
33549 + os.makedirs(self.vdbdir)
33550 +
33551 + if not debug:
33552 + portage.util.noiselimit = -2
33553 +
33554 + self._repositories = {}
33555 + # Make sure the main repo is always created
33556 + self._get_repo_dir("test_repo")
33557 +
33558 + self._create_distfiles(distfiles)
33559 + self._create_ebuilds(ebuilds)
33560 + self._create_binpkgs(binpkgs)
33561 + self._create_installed(installed)
33562 + self._create_profile(
33563 + ebuilds, eclasses, installed, profile, repo_configs, user_config, sets
33564 + )
33565 + self._create_world(world, world_sets)
33566 +
33567 + self.settings, self.trees = self._load_config()
33568 +
33569 + self._create_ebuild_manifests(ebuilds)
33570 +
33571 + portage.util.noiselimit = 0
33572 +
33573 + def reload_config(self):
33574 + """
33575 + Reload configuration from disk, which is useful if it has
33576 + been modified after the constructor has been called.
33577 + """
33578 + for eroot in self.trees:
33579 + portdb = self.trees[eroot]["porttree"].dbapi
33580 + portdb.close_caches()
33581 + self.settings, self.trees = self._load_config()
33582 +
33583 + def _get_repo_dir(self, repo):
33584 + """
33585 + Create the repo directory if needed.
33586 + """
33587 + if repo not in self._repositories:
33588 + if repo == "test_repo":
33589 + self._repositories["DEFAULT"] = {"main-repo": repo}
33590 +
33591 + repo_path = os.path.join(self.eroot, "var", "repositories", repo)
33592 + self._repositories[repo] = {"location": repo_path}
33593 + profile_path = os.path.join(repo_path, "profiles")
33594 +
33595 + try:
33596 + os.makedirs(profile_path)
33597 + except os.error:
33598 + pass
33599 +
33600 + repo_name_file = os.path.join(profile_path, "repo_name")
33601 + with open(repo_name_file, "w") as f:
33602 + f.write("%s\n" % repo)
33603 +
33604 + return self._repositories[repo]["location"]
33605 +
33606 + def _create_distfiles(self, distfiles):
33607 + os.makedirs(self.distdir)
33608 + for k, v in distfiles.items():
33609 + with open(os.path.join(self.distdir, k), "wb") as f:
33610 + f.write(v)
33611 +
33612 + def _create_ebuilds(self, ebuilds):
33613 + for cpv in ebuilds:
33614 + a = Atom("=" + cpv, allow_repo=True)
33615 + repo = a.repo
33616 + if repo is None:
33617 + repo = "test_repo"
33618 +
33619 + metadata = ebuilds[cpv].copy()
33620 + copyright_header = metadata.pop("COPYRIGHT_HEADER", None)
33621 + eapi = metadata.pop("EAPI", "0")
33622 + misc_content = metadata.pop("MISC_CONTENT", None)
33623 + metadata.setdefault("DEPEND", "")
33624 + metadata.setdefault("SLOT", "0")
33625 + metadata.setdefault("KEYWORDS", "x86")
33626 + metadata.setdefault("IUSE", "")
33627 +
33628 + unknown_keys = set(metadata).difference(portage.dbapi.dbapi._known_keys)
33629 + if unknown_keys:
33630 + raise ValueError(
33631 + "metadata of ebuild '%s' contains unknown keys: %s"
33632 + % (cpv, sorted(unknown_keys))
33633 + )
33634 +
33635 + repo_dir = self._get_repo_dir(repo)
33636 + ebuild_dir = os.path.join(repo_dir, a.cp)
33637 + ebuild_path = os.path.join(ebuild_dir, a.cpv.split("/")[1] + ".ebuild")
33638 + try:
33639 + os.makedirs(ebuild_dir)
33640 + except os.error:
33641 + pass
33642 +
33643 + with open(ebuild_path, "w") as f:
33644 + if copyright_header is not None:
33645 + f.write(copyright_header)
33646 + f.write('EAPI="%s"\n' % eapi)
33647 + for k, v in metadata.items():
33648 + f.write('%s="%s"\n' % (k, v))
33649 + if misc_content is not None:
33650 + f.write(misc_content)
33651 +
33652 + def _create_ebuild_manifests(self, ebuilds):
33653 + tmpsettings = config(clone=self.settings)
33654 + tmpsettings["PORTAGE_QUIET"] = "1"
33655 + for cpv in ebuilds:
33656 + a = Atom("=" + cpv, allow_repo=True)
33657 + repo = a.repo
33658 + if repo is None:
33659 + repo = "test_repo"
33660 +
33661 + repo_dir = self._get_repo_dir(repo)
33662 + ebuild_dir = os.path.join(repo_dir, a.cp)
33663 + ebuild_path = os.path.join(ebuild_dir, a.cpv.split("/")[1] + ".ebuild")
33664 +
33665 + portdb = self.trees[self.eroot]["porttree"].dbapi
33666 + tmpsettings["O"] = ebuild_dir
33667 + if not digestgen(mysettings=tmpsettings, myportdb=portdb):
33668 + raise AssertionError("digest creation failed for %s" % ebuild_path)
33669 +
33670 + def _create_binpkgs(self, binpkgs):
33671 + # When using BUILD_ID, there can be mutiple instances for the
33672 + # same cpv. Therefore, binpkgs may be an iterable instead of
33673 + # a dict.
33674 + items = getattr(binpkgs, "items", None)
33675 + items = items() if items is not None else binpkgs
33676 + for cpv, metadata in items:
33677 + a = Atom("=" + cpv, allow_repo=True)
33678 + repo = a.repo
33679 + if repo is None:
33680 + repo = "test_repo"
33681 +
33682 + pn = catsplit(a.cp)[1]
33683 + cat, pf = catsplit(a.cpv)
33684 + metadata = metadata.copy()
33685 + metadata.setdefault("SLOT", "0")
33686 + metadata.setdefault("KEYWORDS", "x86")
33687 + metadata.setdefault("BUILD_TIME", "0")
33688 + metadata["repository"] = repo
33689 + metadata["CATEGORY"] = cat
33690 + metadata["PF"] = pf
33691 ++ # PREFIX LOCAL
33692 ++ metadata["EPREFIX"] = self.eprefix
33693 +
33694 + repo_dir = self.pkgdir
33695 + category_dir = os.path.join(repo_dir, cat)
33696 + if "BUILD_ID" in metadata:
33697 + binpkg_path = os.path.join(
33698 + category_dir, pn, "%s-%s.xpak" % (pf, metadata["BUILD_ID"])
33699 + )
33700 + else:
33701 + binpkg_path = os.path.join(category_dir, pf + ".tbz2")
33702 +
33703 + ensure_dirs(os.path.dirname(binpkg_path))
33704 + t = portage.xpak.tbz2(binpkg_path)
33705 + t.recompose_mem(portage.xpak.xpak_mem(metadata))
33706 +
33707 + def _create_installed(self, installed):
33708 + for cpv in installed:
33709 + a = Atom("=" + cpv, allow_repo=True)
33710 + repo = a.repo
33711 + if repo is None:
33712 + repo = "test_repo"
33713 +
33714 + vdb_pkg_dir = os.path.join(self.vdbdir, a.cpv)
33715 + try:
33716 + os.makedirs(vdb_pkg_dir)
33717 + except os.error:
33718 + pass
33719 +
33720 + metadata = installed[cpv].copy()
33721 + metadata.setdefault("SLOT", "0")
33722 + metadata.setdefault("BUILD_TIME", "0")
33723 + metadata.setdefault("COUNTER", "0")
33724 + metadata.setdefault("KEYWORDS", "~x86")
33725 +
33726 + unknown_keys = set(metadata).difference(portage.dbapi.dbapi._known_keys)
33727 + unknown_keys.discard("BUILD_TIME")
33728 + unknown_keys.discard("BUILD_ID")
33729 + unknown_keys.discard("COUNTER")
33730 + unknown_keys.discard("repository")
33731 + unknown_keys.discard("USE")
33732 + unknown_keys.discard("PROVIDES")
33733 + unknown_keys.discard("REQUIRES")
33734 + if unknown_keys:
33735 + raise ValueError(
33736 + "metadata of installed '%s' contains unknown keys: %s"
33737 + % (cpv, sorted(unknown_keys))
33738 + )
33739 +
33740 + metadata["repository"] = repo
33741 + for k, v in metadata.items():
33742 + with open(os.path.join(vdb_pkg_dir, k), "w") as f:
33743 + f.write("%s\n" % v)
33744 +
33745 + ebuild_path = os.path.join(vdb_pkg_dir, a.cpv.split("/")[1] + ".ebuild")
33746 + with open(ebuild_path, "w") as f:
33747 + f.write('EAPI="%s"\n' % metadata.pop("EAPI", "0"))
33748 + for k, v in metadata.items():
33749 + f.write('%s="%s"\n' % (k, v))
33750 +
33751 + env_path = os.path.join(vdb_pkg_dir, "environment.bz2")
33752 + with bz2.BZ2File(env_path, mode="w") as f:
33753 + with open(ebuild_path, "rb") as inputfile:
33754 + f.write(inputfile.read())
33755 +
33756 + def _create_profile(
33757 + self, ebuilds, eclasses, installed, profile, repo_configs, user_config, sets
33758 + ):
33759 +
33760 + user_config_dir = os.path.join(self.eroot, USER_CONFIG_PATH)
33761 +
33762 + try:
33763 + os.makedirs(user_config_dir)
33764 + except os.error:
33765 + pass
33766 +
33767 + for repo in self._repositories:
33768 + if repo == "DEFAULT":
33769 + continue
33770 +
33771 + repo_dir = self._get_repo_dir(repo)
33772 + profile_dir = os.path.join(repo_dir, "profiles")
33773 + metadata_dir = os.path.join(repo_dir, "metadata")
33774 + os.makedirs(metadata_dir)
33775 +
33776 + # Create $REPO/profiles/categories
33777 + categories = set()
33778 + for cpv in ebuilds:
33779 + ebuilds_repo = Atom("=" + cpv, allow_repo=True).repo
33780 + if ebuilds_repo is None:
33781 + ebuilds_repo = "test_repo"
33782 + if ebuilds_repo == repo:
33783 + categories.add(catsplit(cpv)[0])
33784 +
33785 + categories_file = os.path.join(profile_dir, "categories")
33786 + with open(categories_file, "w") as f:
33787 + for cat in categories:
33788 + f.write(cat + "\n")
33789 +
33790 + # Create $REPO/profiles/license_groups
33791 + license_file = os.path.join(profile_dir, "license_groups")
33792 + with open(license_file, "w") as f:
33793 + f.write("EULA TEST\n")
33794 +
33795 + repo_config = repo_configs.get(repo)
33796 + if repo_config:
33797 + for config_file, lines in repo_config.items():
33798 + if config_file not in self.config_files and not any(
33799 + fnmatch.fnmatch(config_file, os.path.join(x, "*"))
33800 + for x in self.config_files
33801 + ):
33802 + raise ValueError("Unknown config file: '%s'" % config_file)
33803 +
33804 + if config_file in ("layout.conf",):
33805 + file_name = os.path.join(repo_dir, "metadata", config_file)
33806 + else:
33807 + file_name = os.path.join(profile_dir, config_file)
33808 + if "/" in config_file and not os.path.isdir(
33809 + os.path.dirname(file_name)
33810 + ):
33811 + os.makedirs(os.path.dirname(file_name))
33812 + with open(file_name, "w") as f:
33813 + for line in lines:
33814 + f.write("%s\n" % line)
33815 + # Temporarily write empty value of masters until it becomes default.
33816 + # TODO: Delete all references to "# use implicit masters" when empty value becomes default.
33817 + if config_file == "layout.conf" and not any(
33818 + line.startswith(("masters =", "# use implicit masters"))
33819 + for line in lines
33820 + ):
33821 + f.write("masters =\n")
33822 +
33823 + # Create $profile_dir/eclass (we fail to digest the ebuilds if it's not there)
33824 + eclass_dir = os.path.join(repo_dir, "eclass")
33825 + os.makedirs(eclass_dir)
33826 +
33827 + for eclass_name, eclass_content in eclasses.items():
33828 + with open(
33829 + os.path.join(eclass_dir, "{}.eclass".format(eclass_name)), "wt"
33830 + ) as f:
33831 + if isinstance(eclass_content, str):
33832 + eclass_content = [eclass_content]
33833 + for line in eclass_content:
33834 + f.write("{}\n".format(line))
33835 +
33836 + # Temporarily write empty value of masters until it becomes default.
33837 + if not repo_config or "layout.conf" not in repo_config:
33838 + layout_conf_path = os.path.join(repo_dir, "metadata", "layout.conf")
33839 + with open(layout_conf_path, "w") as f:
33840 + f.write("masters =\n")
33841 +
33842 + if repo == "test_repo":
33843 + # Create a minimal profile in /var/db/repos/gentoo
33844 + sub_profile_dir = os.path.join(
33845 + profile_dir, "default", "linux", "x86", "test_profile"
33846 + )
33847 + os.makedirs(sub_profile_dir)
33848 +
33849 + if not (profile and "eapi" in profile):
33850 + eapi_file = os.path.join(sub_profile_dir, "eapi")
33851 + with open(eapi_file, "w") as f:
33852 + f.write("0\n")
33853 +
33854 + make_defaults_file = os.path.join(sub_profile_dir, "make.defaults")
33855 + with open(make_defaults_file, "w") as f:
33856 + f.write('ARCH="x86"\n')
33857 + f.write('ACCEPT_KEYWORDS="x86"\n')
33858 +
33859 + use_force_file = os.path.join(sub_profile_dir, "use.force")
33860 + with open(use_force_file, "w") as f:
33861 + f.write("x86\n")
33862 +
33863 + parent_file = os.path.join(sub_profile_dir, "parent")
33864 + with open(parent_file, "w") as f:
33865 + f.write("..\n")
33866 +
33867 + if profile:
33868 + for config_file, lines in profile.items():
33869 + if config_file not in self.config_files:
33870 + raise ValueError("Unknown config file: '%s'" % config_file)
33871 +
33872 + file_name = os.path.join(sub_profile_dir, config_file)
33873 + with open(file_name, "w") as f:
33874 + for line in lines:
33875 + f.write("%s\n" % line)
33876 +
33877 + # Create profile symlink
33878 + os.symlink(
33879 + sub_profile_dir, os.path.join(user_config_dir, "make.profile")
33880 + )
33881 +
33882 + make_conf = {
33883 + "ACCEPT_KEYWORDS": "x86",
33884 + "CLEAN_DELAY": "0",
33885 + "DISTDIR": self.distdir,
33886 + "EMERGE_WARNING_DELAY": "0",
33887 + "PKGDIR": self.pkgdir,
33888 + "PORTAGE_INST_GID": str(portage.data.portage_gid),
33889 + "PORTAGE_INST_UID": str(portage.data.portage_uid),
33890 + "PORTAGE_TMPDIR": os.path.join(self.eroot, "var/tmp"),
33891 + }
33892 +
33893 + if os.environ.get("NOCOLOR"):
33894 + make_conf["NOCOLOR"] = os.environ["NOCOLOR"]
33895 +
33896 + # Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they
33897 + # need to be inherited by ebuild subprocesses.
33898 + if "PORTAGE_USERNAME" in os.environ:
33899 + make_conf["PORTAGE_USERNAME"] = os.environ["PORTAGE_USERNAME"]
33900 + if "PORTAGE_GRPNAME" in os.environ:
33901 + make_conf["PORTAGE_GRPNAME"] = os.environ["PORTAGE_GRPNAME"]
33902 +
33903 + make_conf_lines = []
33904 + for k_v in make_conf.items():
33905 + make_conf_lines.append('%s="%s"' % k_v)
33906 +
33907 + if "make.conf" in user_config:
33908 + make_conf_lines.extend(user_config["make.conf"])
33909 +
33910 + if not portage.process.sandbox_capable or os.environ.get("SANDBOX_ON") == "1":
33911 + # avoid problems from nested sandbox instances
33912 + make_conf_lines.append('FEATURES="${FEATURES} -sandbox -usersandbox"')
33913 +
33914 + configs = user_config.copy()
33915 + configs["make.conf"] = make_conf_lines
33916 +
33917 + for config_file, lines in configs.items():
33918 + if config_file not in self.config_files:
33919 + raise ValueError("Unknown config file: '%s'" % config_file)
33920 +
33921 + file_name = os.path.join(user_config_dir, config_file)
33922 + with open(file_name, "w") as f:
33923 + for line in lines:
33924 + f.write("%s\n" % line)
33925 +
33926 + # Create /usr/share/portage/config/make.globals
33927 + make_globals_path = os.path.join(
33928 + self.eroot, GLOBAL_CONFIG_PATH.lstrip(os.sep), "make.globals"
33929 + )
33930 + ensure_dirs(os.path.dirname(make_globals_path))
33931 + os.symlink(os.path.join(cnf_path, "make.globals"), make_globals_path)
33932 +
33933 + # Create /usr/share/portage/config/sets/portage.conf
33934 + default_sets_conf_dir = os.path.join(
33935 + self.eroot, "usr/share/portage/config/sets"
33936 + )
33937 +
33938 + try:
33939 + os.makedirs(default_sets_conf_dir)
33940 + except os.error:
33941 + pass
33942 +
33943 + provided_sets_portage_conf = os.path.join(cnf_path, "sets", "portage.conf")
33944 + os.symlink(
33945 + provided_sets_portage_conf,
33946 + os.path.join(default_sets_conf_dir, "portage.conf"),
33947 + )
33948 +
33949 + set_config_dir = os.path.join(user_config_dir, "sets")
33950 +
33951 + try:
33952 + os.makedirs(set_config_dir)
33953 + except os.error:
33954 + pass
33955 +
33956 + for sets_file, lines in sets.items():
33957 + file_name = os.path.join(set_config_dir, sets_file)
33958 + with open(file_name, "w") as f:
33959 + for line in lines:
33960 + f.write("%s\n" % line)
33961 +
33962 + if cnf_path_repoman is not None:
33963 + # Create /usr/share/repoman
33964 + repoman_share_dir = os.path.join(self.eroot, "usr", "share", "repoman")
33965 + os.symlink(cnf_path_repoman, repoman_share_dir)
33966 +
33967 + def _create_world(self, world, world_sets):
33968 + # Create /var/lib/portage/world
33969 + var_lib_portage = os.path.join(self.eroot, "var", "lib", "portage")
33970 + os.makedirs(var_lib_portage)
33971 +
33972 + world_file = os.path.join(var_lib_portage, "world")
33973 + world_set_file = os.path.join(var_lib_portage, "world_sets")
33974 +
33975 + with open(world_file, "w") as f:
33976 + for atom in world:
33977 + f.write("%s\n" % atom)
33978 +
33979 + with open(world_set_file, "w") as f:
33980 + for atom in world_sets:
33981 + f.write("%s\n" % atom)
33982 +
33983 + def _load_config(self):
33984 +
33985 + create_trees_kwargs = {}
33986 + if self.target_root != os.sep:
33987 + create_trees_kwargs["target_root"] = self.target_root
33988 +
33989 + env = {
33990 + "PORTAGE_REPOSITORIES": "\n".join(
33991 + "[%s]\n%s"
33992 + % (
33993 + repo_name,
33994 + "\n".join("%s = %s" % (k, v) for k, v in repo_config.items()),
33995 + )
33996 + for repo_name, repo_config in self._repositories.items()
33997 + )
33998 + }
33999 +
34000 + if self.debug:
34001 + env["PORTAGE_DEBUG"] = "1"
34002 +
34003 + trees = portage.create_trees(
34004 + env=env, eprefix=self.eprefix, **create_trees_kwargs
34005 + )
34006 +
34007 + for root, root_trees in trees.items():
34008 + settings = root_trees["vartree"].settings
34009 + settings._init_dirs()
34010 + setconfig = load_default_config(settings, root_trees)
34011 + root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
34012 +
34013 + return trees[trees._target_eroot]["vartree"].settings, trees
34014 +
34015 + def run(self, atoms, options={}, action=None):
34016 + options = options.copy()
34017 + options["--pretend"] = True
34018 + if self.debug:
34019 + options["--debug"] = True
34020 +
34021 + if action is None:
34022 + if options.get("--depclean"):
34023 + action = "depclean"
34024 + elif options.get("--prune"):
34025 + action = "prune"
34026 +
34027 + if "--usepkgonly" in options:
34028 + options["--usepkg"] = True
34029 +
34030 + global_noiselimit = portage.util.noiselimit
34031 + global_emergelog_disable = _emerge.emergelog._disable
34032 + try:
34033 +
34034 + if not self.debug:
34035 + portage.util.noiselimit = -2
34036 + _emerge.emergelog._disable = True
34037 +
34038 + if action in ("depclean", "prune"):
34039 + depclean_result = _calc_depclean(
34040 + self.settings,
34041 + self.trees,
34042 + None,
34043 + options,
34044 + action,
34045 + InternalPackageSet(initial_atoms=atoms, allow_wildcard=True),
34046 + None,
34047 + )
34048 + result = ResolverPlaygroundDepcleanResult(
34049 + atoms,
34050 + depclean_result.returncode,
34051 + depclean_result.cleanlist,
34052 + depclean_result.ordered,
34053 + depclean_result.req_pkg_count,
34054 + depclean_result.depgraph,
34055 + )
34056 + else:
34057 + params = create_depgraph_params(options, action)
34058 + success, depgraph, favorites = backtrack_depgraph(
34059 + self.settings, self.trees, options, params, action, atoms, None
34060 + )
34061 + depgraph._show_merge_list()
34062 + depgraph.display_problems()
34063 + result = ResolverPlaygroundResult(atoms, success, depgraph, favorites)
34064 + finally:
34065 + portage.util.noiselimit = global_noiselimit
34066 + _emerge.emergelog._disable = global_emergelog_disable
34067 +
34068 + return result
34069 +
34070 + def run_TestCase(self, test_case):
34071 + if not isinstance(test_case, ResolverPlaygroundTestCase):
34072 + raise TypeError("ResolverPlayground needs a ResolverPlaygroundTestCase")
34073 + for atoms in test_case.requests:
34074 + result = self.run(atoms, test_case.options, test_case.action)
34075 + if not test_case.compare_with_result(result):
34076 + return
34077 +
34078 + def cleanup(self):
34079 + for eroot in self.trees:
34080 + portdb = self.trees[eroot]["porttree"].dbapi
34081 + portdb.close_caches()
34082 + if self.debug:
34083 + print("\nEROOT=%s" % self.eroot)
34084 + else:
34085 + shutil.rmtree(self.eroot)
34086 + if hasattr(self, "_orig_eprefix"):
34087 + portage.const.EPREFIX = self._orig_eprefix
34088
34089
34090 class ResolverPlaygroundTestCase:
34091 diff --cc lib/portage/util/__init__.py
34092 index 8c2f96f56,5ade7f660..11a7d0677
34093 --- a/lib/portage/util/__init__.py
34094 +++ b/lib/portage/util/__init__.py
34095 @@@ -904,918 -1053,970 +1054,983 @@@ def varexpand(mystring, mydict=None, er
34096 # broken and removed, but can still be imported
34097 pickle_write = None
34098
34099 +
34100 def pickle_read(filename, default=None, debug=0):
34101 - if not os.access(filename, os.R_OK):
34102 - writemsg(_("pickle_read(): File not readable. '") + filename + "'\n", 1)
34103 - return default
34104 - data = None
34105 - try:
34106 - myf = open(_unicode_encode(filename,
34107 - encoding=_encodings['fs'], errors='strict'), 'rb')
34108 - mypickle = pickle.Unpickler(myf)
34109 - data = mypickle.load()
34110 - myf.close()
34111 - del mypickle, myf
34112 - writemsg(_("pickle_read(): Loaded pickle. '") + filename + "'\n", 1)
34113 - except SystemExit as e:
34114 - raise
34115 - except Exception as e:
34116 - writemsg(_("!!! Failed to load pickle: ") + str(e) + "\n", 1)
34117 - data = default
34118 - return data
34119 + if not os.access(filename, os.R_OK):
34120 + writemsg(_("pickle_read(): File not readable. '") + filename + "'\n", 1)
34121 + return default
34122 + data = None
34123 + try:
34124 + myf = open(
34125 + _unicode_encode(filename, encoding=_encodings["fs"], errors="strict"), "rb"
34126 + )
34127 + mypickle = pickle.Unpickler(myf)
34128 + data = mypickle.load()
34129 + myf.close()
34130 + del mypickle, myf
34131 + writemsg(_("pickle_read(): Loaded pickle. '") + filename + "'\n", 1)
34132 + except SystemExit as e:
34133 + raise
34134 + except Exception as e:
34135 + writemsg(_("!!! Failed to load pickle: ") + str(e) + "\n", 1)
34136 + data = default
34137 + return data
34138 +
34139
34140 def dump_traceback(msg, noiselevel=1):
34141 - info = sys.exc_info()
34142 - if not info[2]:
34143 - stack = traceback.extract_stack()[:-1]
34144 - error = None
34145 - else:
34146 - stack = traceback.extract_tb(info[2])
34147 - error = str(info[1])
34148 - writemsg("\n====================================\n", noiselevel=noiselevel)
34149 - writemsg("%s\n\n" % msg, noiselevel=noiselevel)
34150 - for line in traceback.format_list(stack):
34151 - writemsg(line, noiselevel=noiselevel)
34152 - if error:
34153 - writemsg(error+"\n", noiselevel=noiselevel)
34154 - writemsg("====================================\n\n", noiselevel=noiselevel)
34155 + info = sys.exc_info()
34156 + if not info[2]:
34157 + stack = traceback.extract_stack()[:-1]
34158 + error = None
34159 + else:
34160 + stack = traceback.extract_tb(info[2])
34161 + error = str(info[1])
34162 + writemsg("\n====================================\n", noiselevel=noiselevel)
34163 + writemsg("%s\n\n" % msg, noiselevel=noiselevel)
34164 + for line in traceback.format_list(stack):
34165 + writemsg(line, noiselevel=noiselevel)
34166 + if error:
34167 + writemsg(error + "\n", noiselevel=noiselevel)
34168 + writemsg("====================================\n\n", noiselevel=noiselevel)
34169 +
34170
34171 class cmp_sort_key:
34172 - """
34173 - In python-3.0 the list.sort() method no longer has a "cmp" keyword
34174 - argument. This class acts as an adapter which converts a cmp function
34175 - into one that's suitable for use as the "key" keyword argument to
34176 - list.sort(), making it easier to port code for python-3.0 compatibility.
34177 - It works by generating key objects which use the given cmp function to
34178 - implement their __lt__ method.
34179 -
34180 - Beginning with Python 2.7 and 3.2, equivalent functionality is provided
34181 - by functools.cmp_to_key().
34182 - """
34183 - __slots__ = ("_cmp_func",)
34184 + """
34185 + In python-3.0 the list.sort() method no longer has a "cmp" keyword
34186 + argument. This class acts as an adapter which converts a cmp function
34187 + into one that's suitable for use as the "key" keyword argument to
34188 + list.sort(), making it easier to port code for python-3.0 compatibility.
34189 + It works by generating key objects which use the given cmp function to
34190 + implement their __lt__ method.
34191 +
34192 + Beginning with Python 2.7 and 3.2, equivalent functionality is provided
34193 + by functools.cmp_to_key().
34194 + """
34195 +
34196 + __slots__ = ("_cmp_func",)
34197
34198 - def __init__(self, cmp_func):
34199 - """
34200 - @type cmp_func: callable which takes 2 positional arguments
34201 - @param cmp_func: A cmp function.
34202 - """
34203 - self._cmp_func = cmp_func
34204 + def __init__(self, cmp_func):
34205 + """
34206 + @type cmp_func: callable which takes 2 positional arguments
34207 + @param cmp_func: A cmp function.
34208 + """
34209 + self._cmp_func = cmp_func
34210
34211 - def __call__(self, lhs):
34212 - return self._cmp_key(self._cmp_func, lhs)
34213 + def __call__(self, lhs):
34214 + return self._cmp_key(self._cmp_func, lhs)
34215
34216 - class _cmp_key:
34217 - __slots__ = ("_cmp_func", "_obj")
34218 + class _cmp_key:
34219 + __slots__ = ("_cmp_func", "_obj")
34220
34221 - def __init__(self, cmp_func, obj):
34222 - self._cmp_func = cmp_func
34223 - self._obj = obj
34224 + def __init__(self, cmp_func, obj):
34225 + self._cmp_func = cmp_func
34226 + self._obj = obj
34227 +
34228 + def __lt__(self, other):
34229 + if other.__class__ is not self.__class__:
34230 + raise TypeError(
34231 + "Expected type %s, got %s" % (self.__class__, other.__class__)
34232 + )
34233 + return self._cmp_func(self._obj, other._obj) < 0
34234
34235 - def __lt__(self, other):
34236 - if other.__class__ is not self.__class__:
34237 - raise TypeError("Expected type %s, got %s" % \
34238 - (self.__class__, other.__class__))
34239 - return self._cmp_func(self._obj, other._obj) < 0
34240
34241 def unique_array(s):
34242 - """lifted from python cookbook, credit: Tim Peters
34243 - Return a list of the elements in s in arbitrary order, sans duplicates"""
34244 - n = len(s)
34245 - # assume all elements are hashable, if so, it's linear
34246 - try:
34247 - return list(set(s))
34248 - except TypeError:
34249 - pass
34250 -
34251 - # so much for linear. abuse sort.
34252 - try:
34253 - t = list(s)
34254 - t.sort()
34255 - except TypeError:
34256 - pass
34257 - else:
34258 - assert n > 0
34259 - last = t[0]
34260 - lasti = i = 1
34261 - while i < n:
34262 - if t[i] != last:
34263 - t[lasti] = last = t[i]
34264 - lasti += 1
34265 - i += 1
34266 - return t[:lasti]
34267 -
34268 - # blah. back to original portage.unique_array
34269 - u = []
34270 - for x in s:
34271 - if x not in u:
34272 - u.append(x)
34273 - return u
34274 + """lifted from python cookbook, credit: Tim Peters
34275 + Return a list of the elements in s in arbitrary order, sans duplicates"""
34276 + n = len(s)
34277 + # assume all elements are hashable, if so, it's linear
34278 + try:
34279 + return list(set(s))
34280 + except TypeError:
34281 + pass
34282 +
34283 + # so much for linear. abuse sort.
34284 + try:
34285 + t = list(s)
34286 + t.sort()
34287 + except TypeError:
34288 + pass
34289 + else:
34290 + assert n > 0
34291 + last = t[0]
34292 + lasti = i = 1
34293 + while i < n:
34294 + if t[i] != last:
34295 + t[lasti] = last = t[i]
34296 + lasti += 1
34297 + i += 1
34298 + return t[:lasti]
34299 +
34300 + # blah. back to original portage.unique_array
34301 + u = []
34302 + for x in s:
34303 + if x not in u:
34304 + u.append(x)
34305 + return u
34306 +
34307
34308 def unique_everseen(iterable, key=None):
34309 - """
34310 - List unique elements, preserving order. Remember all elements ever seen.
34311 - Taken from itertools documentation.
34312 - """
34313 - # unique_everseen('AAAABBBCCDAABBB') --> A B C D
34314 - # unique_everseen('ABBCcAD', str.lower) --> A B C D
34315 - seen = set()
34316 - seen_add = seen.add
34317 - if key is None:
34318 - for element in filterfalse(seen.__contains__, iterable):
34319 - seen_add(element)
34320 - yield element
34321 - else:
34322 - for element in iterable:
34323 - k = key(element)
34324 - if k not in seen:
34325 - seen_add(k)
34326 - yield element
34327 + """
34328 + List unique elements, preserving order. Remember all elements ever seen.
34329 + Taken from itertools documentation.
34330 + """
34331 + # unique_everseen('AAAABBBCCDAABBB') --> A B C D
34332 + # unique_everseen('ABBCcAD', str.lower) --> A B C D
34333 + seen = set()
34334 + seen_add = seen.add
34335 + if key is None:
34336 + for element in filterfalse(seen.__contains__, iterable):
34337 + seen_add(element)
34338 + yield element
34339 + else:
34340 + for element in iterable:
34341 + k = key(element)
34342 + if k not in seen:
34343 + seen_add(k)
34344 + yield element
34345 +
34346
34347 def _do_stat(filename, follow_links=True):
34348 - try:
34349 - if follow_links:
34350 - return os.stat(filename)
34351 - return os.lstat(filename)
34352 - except OSError as oe:
34353 - func_call = "stat('%s')" % filename
34354 - if oe.errno == errno.EPERM:
34355 - raise OperationNotPermitted(func_call)
34356 - if oe.errno == errno.EACCES:
34357 - raise PermissionDenied(func_call)
34358 - if oe.errno == errno.ENOENT:
34359 - raise FileNotFound(filename)
34360 - raise
34361 -
34362 - def apply_permissions(filename, uid=-1, gid=-1, mode=-1, mask=-1,
34363 - stat_cached=None, follow_links=True):
34364 - """Apply user, group, and mode bits to a file if the existing bits do not
34365 - already match. The default behavior is to force an exact match of mode
34366 - bits. When mask=0 is specified, mode bits on the target file are allowed
34367 - to be a superset of the mode argument (via logical OR). When mask>0, the
34368 - mode bits that the target file is allowed to have are restricted via
34369 - logical XOR.
34370 - Returns True if the permissions were modified and False otherwise."""
34371 -
34372 - modified = False
34373 -
34374 - # Since Python 3.4, chown requires int type (no proxies).
34375 - uid = int(uid)
34376 - gid = int(gid)
34377 -
34378 - if stat_cached is None:
34379 - stat_cached = _do_stat(filename, follow_links=follow_links)
34380 -
34381 - if (uid != -1 and uid != stat_cached.st_uid) or \
34382 - (gid != -1 and gid != stat_cached.st_gid):
34383 - try:
34384 - if follow_links:
34385 - os.chown(filename, uid, gid)
34386 - else:
34387 - portage.data.lchown(filename, uid, gid)
34388 - modified = True
34389 - except OSError as oe:
34390 - func_call = "chown('%s', %i, %i)" % (filename, uid, gid)
34391 - if oe.errno == errno.EPERM:
34392 - raise OperationNotPermitted(func_call)
34393 - elif oe.errno == errno.EACCES:
34394 - raise PermissionDenied(func_call)
34395 - elif oe.errno == errno.EROFS:
34396 - raise ReadOnlyFileSystem(func_call)
34397 - elif oe.errno == errno.ENOENT:
34398 - raise FileNotFound(filename)
34399 - else:
34400 - raise
34401 -
34402 - new_mode = -1
34403 - st_mode = stat_cached.st_mode & 0o7777 # protect from unwanted bits
34404 - if mask >= 0:
34405 - if mode == -1:
34406 - mode = 0 # Don't add any mode bits when mode is unspecified.
34407 - else:
34408 - mode = mode & 0o7777
34409 - if (mode & st_mode != mode) or \
34410 - ((mask ^ st_mode) & st_mode != st_mode):
34411 - new_mode = mode | st_mode
34412 - new_mode = (mask ^ new_mode) & new_mode
34413 - elif mode != -1:
34414 - mode = mode & 0o7777 # protect from unwanted bits
34415 - if mode != st_mode:
34416 - new_mode = mode
34417 -
34418 - # The chown system call may clear S_ISUID and S_ISGID
34419 - # bits, so those bits are restored if necessary.
34420 - if modified and new_mode == -1 and \
34421 - (st_mode & stat.S_ISUID or st_mode & stat.S_ISGID):
34422 - if mode == -1:
34423 - new_mode = st_mode
34424 - else:
34425 - mode = mode & 0o7777
34426 - if mask >= 0:
34427 - new_mode = mode | st_mode
34428 - new_mode = (mask ^ new_mode) & new_mode
34429 - else:
34430 - new_mode = mode
34431 - if not (new_mode & stat.S_ISUID or new_mode & stat.S_ISGID):
34432 - new_mode = -1
34433 -
34434 - if not follow_links and stat.S_ISLNK(stat_cached.st_mode):
34435 - # Mode doesn't matter for symlinks.
34436 - new_mode = -1
34437 -
34438 - if new_mode != -1:
34439 - try:
34440 - os.chmod(filename, new_mode)
34441 - modified = True
34442 - except OSError as oe:
34443 - func_call = "chmod('%s', %s)" % (filename, oct(new_mode))
34444 - if oe.errno == errno.EPERM:
34445 - raise OperationNotPermitted(func_call)
34446 - elif oe.errno == errno.EACCES:
34447 - raise PermissionDenied(func_call)
34448 - elif oe.errno == errno.EROFS:
34449 - raise ReadOnlyFileSystem(func_call)
34450 - elif oe.errno == errno.ENOENT:
34451 - raise FileNotFound(filename)
34452 - raise
34453 - return modified
34454 + try:
34455 + if follow_links:
34456 + return os.stat(filename)
34457 + return os.lstat(filename)
34458 + except OSError as oe:
34459 + func_call = "stat('%s')" % filename
34460 + if oe.errno == errno.EPERM:
34461 + raise OperationNotPermitted(func_call)
34462 + if oe.errno == errno.EACCES:
34463 + raise PermissionDenied(func_call)
34464 + if oe.errno == errno.ENOENT:
34465 + raise FileNotFound(filename)
34466 + raise
34467 +
34468 +
34469 + def apply_permissions(
34470 + filename, uid=-1, gid=-1, mode=-1, mask=-1, stat_cached=None, follow_links=True
34471 + ):
34472 + """Apply user, group, and mode bits to a file if the existing bits do not
34473 + already match. The default behavior is to force an exact match of mode
34474 + bits. When mask=0 is specified, mode bits on the target file are allowed
34475 + to be a superset of the mode argument (via logical OR). When mask>0, the
34476 + mode bits that the target file is allowed to have are restricted via
34477 + logical XOR.
34478 + Returns True if the permissions were modified and False otherwise."""
34479 +
34480 + modified = False
34481 +
34482 + # Since Python 3.4, chown requires int type (no proxies).
34483 + uid = int(uid)
34484 + gid = int(gid)
34485 +
34486 + if stat_cached is None:
34487 + stat_cached = _do_stat(filename, follow_links=follow_links)
34488 +
34489 + if (uid != -1 and uid != stat_cached.st_uid) or (
34490 + gid != -1 and gid != stat_cached.st_gid
34491 + ):
34492 + try:
34493 + if follow_links:
34494 + os.chown(filename, uid, gid)
34495 + else:
34496 + portage.data.lchown(filename, uid, gid)
34497 + modified = True
34498 + except OSError as oe:
34499 + func_call = "chown('%s', %i, %i)" % (filename, uid, gid)
34500 + if oe.errno == errno.EPERM:
34501 + raise OperationNotPermitted(func_call)
34502 + elif oe.errno == errno.EACCES:
34503 + raise PermissionDenied(func_call)
34504 + elif oe.errno == errno.EROFS:
34505 + raise ReadOnlyFileSystem(func_call)
34506 + elif oe.errno == errno.ENOENT:
34507 + raise FileNotFound(filename)
34508 + else:
34509 + raise
34510 +
34511 + new_mode = -1
34512 + st_mode = stat_cached.st_mode & 0o7777 # protect from unwanted bits
34513 + if mask >= 0:
34514 + if mode == -1:
34515 + mode = 0 # Don't add any mode bits when mode is unspecified.
34516 + else:
34517 + mode = mode & 0o7777
34518 + if (mode & st_mode != mode) or ((mask ^ st_mode) & st_mode != st_mode):
34519 + new_mode = mode | st_mode
34520 + new_mode = (mask ^ new_mode) & new_mode
34521 + elif mode != -1:
34522 + mode = mode & 0o7777 # protect from unwanted bits
34523 + if mode != st_mode:
34524 + new_mode = mode
34525 +
34526 + # The chown system call may clear S_ISUID and S_ISGID
34527 + # bits, so those bits are restored if necessary.
34528 + if (
34529 + modified
34530 + and new_mode == -1
34531 + and (st_mode & stat.S_ISUID or st_mode & stat.S_ISGID)
34532 + ):
34533 + if mode == -1:
34534 + new_mode = st_mode
34535 + else:
34536 + mode = mode & 0o7777
34537 + if mask >= 0:
34538 + new_mode = mode | st_mode
34539 + new_mode = (mask ^ new_mode) & new_mode
34540 + else:
34541 + new_mode = mode
34542 + if not (new_mode & stat.S_ISUID or new_mode & stat.S_ISGID):
34543 + new_mode = -1
34544 +
34545 + if not follow_links and stat.S_ISLNK(stat_cached.st_mode):
34546 + # Mode doesn't matter for symlinks.
34547 + new_mode = -1
34548 +
34549 + if new_mode != -1:
34550 + try:
34551 + os.chmod(filename, new_mode)
34552 + modified = True
34553 + except OSError as oe:
34554 + func_call = "chmod('%s', %s)" % (filename, oct(new_mode))
34555 + if oe.errno == errno.EPERM:
34556 + raise OperationNotPermitted(func_call)
34557 + elif oe.errno == errno.EACCES:
34558 + raise PermissionDenied(func_call)
34559 + elif oe.errno == errno.EROFS:
34560 + raise ReadOnlyFileSystem(func_call)
34561 + elif oe.errno == errno.ENOENT:
34562 + raise FileNotFound(filename)
34563 + raise
34564 + return modified
34565 +
34566
34567 def apply_stat_permissions(filename, newstat, **kwargs):
34568 - """A wrapper around apply_secpass_permissions that gets
34569 - uid, gid, and mode from a stat object"""
34570 - return apply_secpass_permissions(filename, uid=newstat.st_uid, gid=newstat.st_gid,
34571 - mode=newstat.st_mode, **kwargs)
34572 -
34573 - def apply_recursive_permissions(top, uid=-1, gid=-1,
34574 - dirmode=-1, dirmask=-1, filemode=-1, filemask=-1, onerror=None):
34575 - """A wrapper around apply_secpass_permissions that applies permissions
34576 - recursively. If optional argument onerror is specified, it should be a
34577 - function; it will be called with one argument, a PortageException instance.
34578 - Returns True if all permissions are applied and False if some are left
34579 - unapplied."""
34580 -
34581 - # Avoid issues with circular symbolic links, as in bug #339670.
34582 - follow_links = False
34583 -
34584 - if onerror is None:
34585 - # Default behavior is to dump errors to stderr so they won't
34586 - # go unnoticed. Callers can pass in a quiet instance.
34587 - def onerror(e):
34588 - if isinstance(e, OperationNotPermitted):
34589 - writemsg(_("Operation Not Permitted: %s\n") % str(e),
34590 - noiselevel=-1)
34591 - elif isinstance(e, FileNotFound):
34592 - writemsg(_("File Not Found: '%s'\n") % str(e), noiselevel=-1)
34593 - else:
34594 - raise
34595 -
34596 - # For bug 554084, always apply permissions to a directory before
34597 - # that directory is traversed.
34598 - all_applied = True
34599 -
34600 - try:
34601 - stat_cached = _do_stat(top, follow_links=follow_links)
34602 - except FileNotFound:
34603 - # backward compatibility
34604 - return True
34605 -
34606 - if stat.S_ISDIR(stat_cached.st_mode):
34607 - mode = dirmode
34608 - mask = dirmask
34609 - else:
34610 - mode = filemode
34611 - mask = filemask
34612 -
34613 - try:
34614 - applied = apply_secpass_permissions(top,
34615 - uid=uid, gid=gid, mode=mode, mask=mask,
34616 - stat_cached=stat_cached, follow_links=follow_links)
34617 - if not applied:
34618 - all_applied = False
34619 - except PortageException as e:
34620 - all_applied = False
34621 - onerror(e)
34622 -
34623 - for dirpath, dirnames, filenames in os.walk(top):
34624 - for name, mode, mask in chain(
34625 - ((x, filemode, filemask) for x in filenames),
34626 - ((x, dirmode, dirmask) for x in dirnames)):
34627 - try:
34628 - applied = apply_secpass_permissions(os.path.join(dirpath, name),
34629 - uid=uid, gid=gid, mode=mode, mask=mask,
34630 - follow_links=follow_links)
34631 - if not applied:
34632 - all_applied = False
34633 - except PortageException as e:
34634 - # Ignore InvalidLocation exceptions such as FileNotFound
34635 - # and DirectoryNotFound since sometimes things disappear,
34636 - # like when adjusting permissions on DISTCC_DIR.
34637 - if not isinstance(e, portage.exception.InvalidLocation):
34638 - all_applied = False
34639 - onerror(e)
34640 - return all_applied
34641 -
34642 - def apply_secpass_permissions(filename, uid=-1, gid=-1, mode=-1, mask=-1,
34643 - stat_cached=None, follow_links=True):
34644 - """A wrapper around apply_permissions that uses secpass and simple
34645 - logic to apply as much of the permissions as possible without
34646 - generating an obviously avoidable permission exception. Despite
34647 - attempts to avoid an exception, it's possible that one will be raised
34648 - anyway, so be prepared.
34649 - Returns True if all permissions are applied and False if some are left
34650 - unapplied."""
34651 -
34652 - if stat_cached is None:
34653 - stat_cached = _do_stat(filename, follow_links=follow_links)
34654 -
34655 - all_applied = True
34656 -
34657 - # Avoid accessing portage.data.secpass when possible, since
34658 - # it triggers config loading (undesirable for chmod-lite).
34659 - if (uid != -1 or gid != -1) and portage.data.secpass < 2:
34660 -
34661 - if uid != -1 and \
34662 - uid != stat_cached.st_uid:
34663 - all_applied = False
34664 - uid = -1
34665 -
34666 - if gid != -1 and \
34667 - gid != stat_cached.st_gid and \
34668 - gid not in os.getgroups():
34669 - all_applied = False
34670 - gid = -1
34671 -
34672 - apply_permissions(filename, uid=uid, gid=gid, mode=mode, mask=mask,
34673 - stat_cached=stat_cached, follow_links=follow_links)
34674 - return all_applied
34675 + """A wrapper around apply_secpass_permissions that gets
34676 + uid, gid, and mode from a stat object"""
34677 + return apply_secpass_permissions(
34678 + filename, uid=newstat.st_uid, gid=newstat.st_gid, mode=newstat.st_mode, **kwargs
34679 + )
34680 +
34681 +
34682 + def apply_recursive_permissions(
34683 + top, uid=-1, gid=-1, dirmode=-1, dirmask=-1, filemode=-1, filemask=-1, onerror=None
34684 + ):
34685 + """A wrapper around apply_secpass_permissions that applies permissions
34686 + recursively. If optional argument onerror is specified, it should be a
34687 + function; it will be called with one argument, a PortageException instance.
34688 + Returns True if all permissions are applied and False if some are left
34689 + unapplied."""
34690 +
34691 + # Avoid issues with circular symbolic links, as in bug #339670.
34692 + follow_links = False
34693 +
34694 + if onerror is None:
34695 + # Default behavior is to dump errors to stderr so they won't
34696 + # go unnoticed. Callers can pass in a quiet instance.
34697 + def onerror(e):
34698 + if isinstance(e, OperationNotPermitted):
34699 + writemsg(_("Operation Not Permitted: %s\n") % str(e), noiselevel=-1)
34700 + elif isinstance(e, FileNotFound):
34701 + writemsg(_("File Not Found: '%s'\n") % str(e), noiselevel=-1)
34702 + else:
34703 + raise
34704 +
34705 + # For bug 554084, always apply permissions to a directory before
34706 + # that directory is traversed.
34707 + all_applied = True
34708 +
34709 + try:
34710 + stat_cached = _do_stat(top, follow_links=follow_links)
34711 + except FileNotFound:
34712 + # backward compatibility
34713 + return True
34714 +
34715 + if stat.S_ISDIR(stat_cached.st_mode):
34716 + mode = dirmode
34717 + mask = dirmask
34718 + else:
34719 + mode = filemode
34720 + mask = filemask
34721 +
34722 + try:
34723 + applied = apply_secpass_permissions(
34724 + top,
34725 + uid=uid,
34726 + gid=gid,
34727 + mode=mode,
34728 + mask=mask,
34729 + stat_cached=stat_cached,
34730 + follow_links=follow_links,
34731 + )
34732 + if not applied:
34733 + all_applied = False
34734 + except PortageException as e:
34735 + all_applied = False
34736 + onerror(e)
34737 +
34738 + for dirpath, dirnames, filenames in os.walk(top):
34739 + for name, mode, mask in chain(
34740 + ((x, filemode, filemask) for x in filenames),
34741 + ((x, dirmode, dirmask) for x in dirnames),
34742 + ):
34743 + try:
34744 + applied = apply_secpass_permissions(
34745 + os.path.join(dirpath, name),
34746 + uid=uid,
34747 + gid=gid,
34748 + mode=mode,
34749 + mask=mask,
34750 + follow_links=follow_links,
34751 + )
34752 + if not applied:
34753 + all_applied = False
34754 + except PortageException as e:
34755 + # Ignore InvalidLocation exceptions such as FileNotFound
34756 + # and DirectoryNotFound since sometimes things disappear,
34757 + # like when adjusting permissions on DISTCC_DIR.
34758 + if not isinstance(e, portage.exception.InvalidLocation):
34759 + all_applied = False
34760 + onerror(e)
34761 + return all_applied
34762 +
34763 +
34764 + def apply_secpass_permissions(
34765 + filename, uid=-1, gid=-1, mode=-1, mask=-1, stat_cached=None, follow_links=True
34766 + ):
34767 + """A wrapper around apply_permissions that uses secpass and simple
34768 + logic to apply as much of the permissions as possible without
34769 + generating an obviously avoidable permission exception. Despite
34770 + attempts to avoid an exception, it's possible that one will be raised
34771 + anyway, so be prepared.
34772 + Returns True if all permissions are applied and False if some are left
34773 + unapplied."""
34774 +
34775 + if stat_cached is None:
34776 + stat_cached = _do_stat(filename, follow_links=follow_links)
34777 +
34778 + all_applied = True
34779 +
34780 + # Avoid accessing portage.data.secpass when possible, since
34781 + # it triggers config loading (undesirable for chmod-lite).
34782 + if (uid != -1 or gid != -1) and portage.data.secpass < 2:
34783 +
34784 + if uid != -1 and uid != stat_cached.st_uid:
34785 + all_applied = False
34786 + uid = -1
34787 +
34788 + if gid != -1 and gid != stat_cached.st_gid and gid not in os.getgroups():
34789 + all_applied = False
34790 + gid = -1
34791 +
34792 + apply_permissions(
34793 + filename,
34794 + uid=uid,
34795 + gid=gid,
34796 + mode=mode,
34797 + mask=mask,
34798 + stat_cached=stat_cached,
34799 + follow_links=follow_links,
34800 + )
34801 + return all_applied
34802 +
34803
34804 class atomic_ofstream(AbstractContextManager, ObjectProxy):
34805 - """Write a file atomically via os.rename(). Atomic replacement prevents
34806 - interprocess interference and prevents corruption of the target
34807 - file when the write is interrupted (for example, when an 'out of space'
34808 - error occurs)."""
34809 -
34810 - def __init__(self, filename, mode='w', follow_links=True, **kargs):
34811 - """Opens a temporary filename.pid in the same directory as filename."""
34812 - ObjectProxy.__init__(self)
34813 - object.__setattr__(self, '_aborted', False)
34814 - if 'b' in mode:
34815 - open_func = open
34816 - else:
34817 - open_func = io.open
34818 - kargs.setdefault('encoding', _encodings['content'])
34819 - kargs.setdefault('errors', 'backslashreplace')
34820 -
34821 - if follow_links:
34822 - canonical_path = os.path.realpath(filename)
34823 - object.__setattr__(self, '_real_name', canonical_path)
34824 - tmp_name = "%s.%i" % (canonical_path, portage.getpid())
34825 - try:
34826 - object.__setattr__(self, '_file',
34827 - open_func(_unicode_encode(tmp_name,
34828 - encoding=_encodings['fs'], errors='strict'),
34829 - mode=mode, **kargs))
34830 - return
34831 - except IOError as e:
34832 - if canonical_path == filename:
34833 - raise
34834 - # Ignore this error, since it's irrelevant
34835 - # and the below open call will produce a
34836 - # new error if necessary.
34837 -
34838 - object.__setattr__(self, '_real_name', filename)
34839 - tmp_name = "%s.%i" % (filename, portage.getpid())
34840 - object.__setattr__(self, '_file',
34841 - open_func(_unicode_encode(tmp_name,
34842 - encoding=_encodings['fs'], errors='strict'),
34843 - mode=mode, **kargs))
34844 -
34845 - def __exit__(self, exc_type, exc_val, exc_tb):
34846 - if exc_type is not None:
34847 - self.abort()
34848 - else:
34849 - self.close()
34850 -
34851 - def _get_target(self):
34852 - return object.__getattribute__(self, '_file')
34853 -
34854 - def __getattribute__(self, attr):
34855 - if attr in ('close', 'abort', '__del__'):
34856 - return object.__getattribute__(self, attr)
34857 - return getattr(object.__getattribute__(self, '_file'), attr)
34858 -
34859 - def close(self):
34860 - """Closes the temporary file, copies permissions (if possible),
34861 - and performs the atomic replacement via os.rename(). If the abort()
34862 - method has been called, then the temp file is closed and removed."""
34863 - f = object.__getattribute__(self, '_file')
34864 - real_name = object.__getattribute__(self, '_real_name')
34865 - if not f.closed:
34866 - try:
34867 - f.close()
34868 - if not object.__getattribute__(self, '_aborted'):
34869 - try:
34870 - apply_stat_permissions(f.name, os.stat(real_name))
34871 - except OperationNotPermitted:
34872 - pass
34873 - except FileNotFound:
34874 - pass
34875 - except OSError as oe: # from the above os.stat call
34876 - if oe.errno in (errno.ENOENT, errno.EPERM):
34877 - pass
34878 - else:
34879 - raise
34880 - os.rename(f.name, real_name)
34881 - finally:
34882 - # Make sure we cleanup the temp file
34883 - # even if an exception is raised.
34884 - try:
34885 - os.unlink(f.name)
34886 - except OSError as oe:
34887 - pass
34888 -
34889 - def abort(self):
34890 - """If an error occurs while writing the file, the user should
34891 - call this method in order to leave the target file unchanged.
34892 - This will call close() automatically."""
34893 - if not object.__getattribute__(self, '_aborted'):
34894 - object.__setattr__(self, '_aborted', True)
34895 - self.close()
34896 -
34897 - def __del__(self):
34898 - """If the user does not explicitly call close(), it is
34899 - assumed that an error has occurred, so we abort()."""
34900 - try:
34901 - f = object.__getattribute__(self, '_file')
34902 - except AttributeError:
34903 - pass
34904 - else:
34905 - if not f.closed:
34906 - self.abort()
34907 - # ensure destructor from the base class is called
34908 - base_destructor = getattr(ObjectProxy, '__del__', None)
34909 - if base_destructor is not None:
34910 - base_destructor(self)
34911 + """Write a file atomically via os.rename(). Atomic replacement prevents
34912 + interprocess interference and prevents corruption of the target
34913 + file when the write is interrupted (for example, when an 'out of space'
34914 + error occurs)."""
34915 +
34916 + def __init__(self, filename, mode="w", follow_links=True, **kargs):
34917 + """Opens a temporary filename.pid in the same directory as filename."""
34918 + ObjectProxy.__init__(self)
34919 + object.__setattr__(self, "_aborted", False)
34920 + if "b" in mode:
34921 + open_func = open
34922 + else:
34923 + open_func = io.open
34924 + kargs.setdefault("encoding", _encodings["content"])
34925 + kargs.setdefault("errors", "backslashreplace")
34926 +
34927 + if follow_links:
34928 + canonical_path = os.path.realpath(filename)
34929 + object.__setattr__(self, "_real_name", canonical_path)
34930 + tmp_name = "%s.%i" % (canonical_path, portage.getpid())
34931 + try:
34932 + object.__setattr__(
34933 + self,
34934 + "_file",
34935 + open_func(
34936 + _unicode_encode(
34937 + tmp_name, encoding=_encodings["fs"], errors="strict"
34938 + ),
34939 + mode=mode,
34940 + **kargs
34941 + ),
34942 + )
34943 + return
34944 + except IOError as e:
34945 + if canonical_path == filename:
34946 + raise
34947 + # Ignore this error, since it's irrelevant
34948 + # and the below open call will produce a
34949 + # new error if necessary.
34950 +
34951 + object.__setattr__(self, "_real_name", filename)
34952 + tmp_name = "%s.%i" % (filename, portage.getpid())
34953 + object.__setattr__(
34954 + self,
34955 + "_file",
34956 + open_func(
34957 + _unicode_encode(tmp_name, encoding=_encodings["fs"], errors="strict"),
34958 + mode=mode,
34959 + **kargs
34960 + ),
34961 + )
34962 +
34963 + def __exit__(self, exc_type, exc_val, exc_tb):
34964 + if exc_type is not None:
34965 + self.abort()
34966 + else:
34967 + self.close()
34968 +
34969 + def _get_target(self):
34970 + return object.__getattribute__(self, "_file")
34971 +
34972 + def __getattribute__(self, attr):
34973 + if attr in ("close", "abort", "__del__"):
34974 + return object.__getattribute__(self, attr)
34975 + return getattr(object.__getattribute__(self, "_file"), attr)
34976 +
34977 + def close(self):
34978 + """Closes the temporary file, copies permissions (if possible),
34979 + and performs the atomic replacement via os.rename(). If the abort()
34980 + method has been called, then the temp file is closed and removed."""
34981 + f = object.__getattribute__(self, "_file")
34982 + real_name = object.__getattribute__(self, "_real_name")
34983 + if not f.closed:
34984 + try:
34985 + f.close()
34986 + if not object.__getattribute__(self, "_aborted"):
34987 + try:
34988 + apply_stat_permissions(f.name, os.stat(real_name))
34989 + except OperationNotPermitted:
34990 + pass
34991 + except FileNotFound:
34992 + pass
34993 + except OSError as oe: # from the above os.stat call
34994 + if oe.errno in (errno.ENOENT, errno.EPERM):
34995 + pass
34996 + else:
34997 + raise
34998 + os.rename(f.name, real_name)
34999 + finally:
35000 + # Make sure we cleanup the temp file
35001 + # even if an exception is raised.
35002 + try:
35003 + os.unlink(f.name)
35004 + except OSError as oe:
35005 + pass
35006 +
35007 + def abort(self):
35008 + """If an error occurs while writing the file, the user should
35009 + call this method in order to leave the target file unchanged.
35010 + This will call close() automatically."""
35011 + if not object.__getattribute__(self, "_aborted"):
35012 + object.__setattr__(self, "_aborted", True)
35013 + self.close()
35014 +
35015 + def __del__(self):
35016 + """If the user does not explicitly call close(), it is
35017 + assumed that an error has occurred, so we abort()."""
35018 + try:
35019 + f = object.__getattribute__(self, "_file")
35020 + except AttributeError:
35021 + pass
35022 + else:
35023 + if not f.closed:
35024 + self.abort()
35025 + # ensure destructor from the base class is called
35026 + base_destructor = getattr(ObjectProxy, "__del__", None)
35027 + if base_destructor is not None:
35028 + base_destructor(self)
35029 +
35030
35031 def write_atomic(file_path, content, **kwargs):
35032 - f = None
35033 - try:
35034 - f = atomic_ofstream(file_path, **kwargs)
35035 - f.write(content)
35036 - f.close()
35037 - except (IOError, OSError) as e:
35038 - if f:
35039 - f.abort()
35040 - func_call = "write_atomic('%s')" % file_path
35041 - if e.errno == errno.EPERM:
35042 - raise OperationNotPermitted(func_call)
35043 - elif e.errno == errno.EACCES:
35044 - raise PermissionDenied(func_call)
35045 - elif e.errno == errno.EROFS:
35046 - raise ReadOnlyFileSystem(func_call)
35047 - elif e.errno == errno.ENOENT:
35048 - raise FileNotFound(file_path)
35049 - else:
35050 - raise
35051 + f = None
35052 + try:
35053 + f = atomic_ofstream(file_path, **kwargs)
35054 + f.write(content)
35055 + f.close()
35056 + except (IOError, OSError) as e:
35057 + if f:
35058 + f.abort()
35059 + func_call = "write_atomic('%s')" % file_path
35060 + if e.errno == errno.EPERM:
35061 + raise OperationNotPermitted(func_call)
35062 + elif e.errno == errno.EACCES:
35063 + raise PermissionDenied(func_call)
35064 + elif e.errno == errno.EROFS:
35065 + raise ReadOnlyFileSystem(func_call)
35066 + elif e.errno == errno.ENOENT:
35067 + raise FileNotFound(file_path)
35068 + else:
35069 + raise
35070
35071 - def ensure_dirs(dir_path, **kwargs):
35072 - """Create a directory and call apply_permissions.
35073 - Returns True if a directory is created or the permissions needed to be
35074 - modified, and False otherwise.
35075
35076 - This function's handling of EEXIST errors makes it useful for atomic
35077 - directory creation, in which multiple processes may be competing to
35078 - create the same directory.
35079 - """
35080 + def ensure_dirs(dir_path, **kwargs):
35081 + """Create a directory and call apply_permissions.
35082 + Returns True if a directory is created or the permissions needed to be
35083 + modified, and False otherwise.
35084 +
35085 + This function's handling of EEXIST errors makes it useful for atomic
35086 + directory creation, in which multiple processes may be competing to
35087 + create the same directory.
35088 + """
35089 +
35090 + created_dir = False
35091 +
35092 + try:
35093 + os.makedirs(dir_path)
35094 + created_dir = True
35095 + except OSError as oe:
35096 + func_call = "makedirs('%s')" % dir_path
35097 + if oe.errno in (errno.EEXIST,):
35098 + pass
35099 + else:
35100 + if os.path.isdir(dir_path):
35101 + # NOTE: DragonFly raises EPERM for makedir('/')
35102 + # and that is supposed to be ignored here.
35103 + # Also, sometimes mkdir raises EISDIR on FreeBSD
35104 + # and we want to ignore that too (bug #187518).
35105 + pass
35106 + elif oe.errno == errno.EPERM:
35107 + raise OperationNotPermitted(func_call)
35108 + elif oe.errno == errno.EACCES:
35109 + raise PermissionDenied(func_call)
35110 + elif oe.errno == errno.EROFS:
35111 + raise ReadOnlyFileSystem(func_call)
35112 + else:
35113 + raise
35114 + if kwargs:
35115 + perms_modified = apply_permissions(dir_path, **kwargs)
35116 + else:
35117 + perms_modified = False
35118 + return created_dir or perms_modified
35119
35120 - created_dir = False
35121 -
35122 - try:
35123 - os.makedirs(dir_path)
35124 - created_dir = True
35125 - except OSError as oe:
35126 - func_call = "makedirs('%s')" % dir_path
35127 - if oe.errno in (errno.EEXIST,):
35128 - pass
35129 - else:
35130 - if os.path.isdir(dir_path):
35131 - # NOTE: DragonFly raises EPERM for makedir('/')
35132 - # and that is supposed to be ignored here.
35133 - # Also, sometimes mkdir raises EISDIR on FreeBSD
35134 - # and we want to ignore that too (bug #187518).
35135 - pass
35136 - elif oe.errno == errno.EPERM:
35137 - raise OperationNotPermitted(func_call)
35138 - elif oe.errno == errno.EACCES:
35139 - raise PermissionDenied(func_call)
35140 - elif oe.errno == errno.EROFS:
35141 - raise ReadOnlyFileSystem(func_call)
35142 - else:
35143 - raise
35144 - if kwargs:
35145 - perms_modified = apply_permissions(dir_path, **kwargs)
35146 - else:
35147 - perms_modified = False
35148 - return created_dir or perms_modified
35149
35150 class LazyItemsDict(UserDict):
35151 - """A mapping object that behaves like a standard dict except that it allows
35152 - for lazy initialization of values via callable objects. Lazy items can be
35153 - overwritten and deleted just as normal items."""
35154 -
35155 - __slots__ = ('lazy_items',)
35156 -
35157 - def __init__(self, *args, **kwargs):
35158 -
35159 - self.lazy_items = {}
35160 - UserDict.__init__(self, *args, **kwargs)
35161 -
35162 - def addLazyItem(self, item_key, value_callable, *pargs, **kwargs):
35163 - """Add a lazy item for the given key. When the item is requested,
35164 - value_callable will be called with *pargs and **kwargs arguments."""
35165 - self.lazy_items[item_key] = \
35166 - self._LazyItem(value_callable, pargs, kwargs, False)
35167 - # make it show up in self.keys(), etc...
35168 - UserDict.__setitem__(self, item_key, None)
35169 -
35170 - def addLazySingleton(self, item_key, value_callable, *pargs, **kwargs):
35171 - """This is like addLazyItem except value_callable will only be called
35172 - a maximum of 1 time and the result will be cached for future requests."""
35173 - self.lazy_items[item_key] = \
35174 - self._LazyItem(value_callable, pargs, kwargs, True)
35175 - # make it show up in self.keys(), etc...
35176 - UserDict.__setitem__(self, item_key, None)
35177 -
35178 - def update(self, *args, **kwargs):
35179 - if len(args) > 1:
35180 - raise TypeError(
35181 - "expected at most 1 positional argument, got " + \
35182 - repr(len(args)))
35183 - if args:
35184 - map_obj = args[0]
35185 - else:
35186 - map_obj = None
35187 - if map_obj is None:
35188 - pass
35189 - elif isinstance(map_obj, LazyItemsDict):
35190 - for k in map_obj:
35191 - if k in map_obj.lazy_items:
35192 - UserDict.__setitem__(self, k, None)
35193 - else:
35194 - UserDict.__setitem__(self, k, map_obj[k])
35195 - self.lazy_items.update(map_obj.lazy_items)
35196 - else:
35197 - UserDict.update(self, map_obj)
35198 - if kwargs:
35199 - UserDict.update(self, kwargs)
35200 -
35201 - def __getitem__(self, item_key):
35202 - if item_key in self.lazy_items:
35203 - lazy_item = self.lazy_items[item_key]
35204 - pargs = lazy_item.pargs
35205 - if pargs is None:
35206 - pargs = ()
35207 - kwargs = lazy_item.kwargs
35208 - if kwargs is None:
35209 - kwargs = {}
35210 - result = lazy_item.func(*pargs, **kwargs)
35211 - if lazy_item.singleton:
35212 - self[item_key] = result
35213 - return result
35214 -
35215 - return UserDict.__getitem__(self, item_key)
35216 -
35217 - def __setitem__(self, item_key, value):
35218 - if item_key in self.lazy_items:
35219 - del self.lazy_items[item_key]
35220 - UserDict.__setitem__(self, item_key, value)
35221 -
35222 - def __delitem__(self, item_key):
35223 - if item_key in self.lazy_items:
35224 - del self.lazy_items[item_key]
35225 - UserDict.__delitem__(self, item_key)
35226 -
35227 - def clear(self):
35228 - self.lazy_items.clear()
35229 - UserDict.clear(self)
35230 -
35231 - def copy(self):
35232 - return self.__copy__()
35233 -
35234 - def __copy__(self):
35235 - return self.__class__(self)
35236 -
35237 - def __deepcopy__(self, memo=None):
35238 - """
35239 - This forces evaluation of each contained lazy item, and deepcopy of
35240 - the result. A TypeError is raised if any contained lazy item is not
35241 - a singleton, since it is not necessarily possible for the behavior
35242 - of this type of item to be safely preserved.
35243 - """
35244 - if memo is None:
35245 - memo = {}
35246 - result = self.__class__()
35247 - memo[id(self)] = result
35248 - for k in self:
35249 - k_copy = deepcopy(k, memo)
35250 - lazy_item = self.lazy_items.get(k)
35251 - if lazy_item is not None:
35252 - if not lazy_item.singleton:
35253 - raise TypeError("LazyItemsDict " + \
35254 - "deepcopy is unsafe with lazy items that are " + \
35255 - "not singletons: key=%s value=%s" % (k, lazy_item,))
35256 - UserDict.__setitem__(result, k_copy, deepcopy(self[k], memo))
35257 - return result
35258 -
35259 - class _LazyItem:
35260 -
35261 - __slots__ = ('func', 'pargs', 'kwargs', 'singleton')
35262 -
35263 - def __init__(self, func, pargs, kwargs, singleton):
35264 -
35265 - if not pargs:
35266 - pargs = None
35267 - if not kwargs:
35268 - kwargs = None
35269 -
35270 - self.func = func
35271 - self.pargs = pargs
35272 - self.kwargs = kwargs
35273 - self.singleton = singleton
35274 -
35275 - def __copy__(self):
35276 - return self.__class__(self.func, self.pargs,
35277 - self.kwargs, self.singleton)
35278 -
35279 - def __deepcopy__(self, memo=None):
35280 - """
35281 - Override this since the default implementation can fail silently,
35282 - leaving some attributes unset.
35283 - """
35284 - if memo is None:
35285 - memo = {}
35286 - result = self.__copy__()
35287 - memo[id(self)] = result
35288 - result.func = deepcopy(self.func, memo)
35289 - result.pargs = deepcopy(self.pargs, memo)
35290 - result.kwargs = deepcopy(self.kwargs, memo)
35291 - result.singleton = deepcopy(self.singleton, memo)
35292 - return result
35293 + """A mapping object that behaves like a standard dict except that it allows
35294 + for lazy initialization of values via callable objects. Lazy items can be
35295 + overwritten and deleted just as normal items."""
35296 +
35297 + __slots__ = ("lazy_items",)
35298 +
35299 + def __init__(self, *args, **kwargs):
35300 +
35301 + self.lazy_items = {}
35302 + UserDict.__init__(self, *args, **kwargs)
35303 +
35304 + def addLazyItem(self, item_key, value_callable, *pargs, **kwargs):
35305 + """Add a lazy item for the given key. When the item is requested,
35306 + value_callable will be called with *pargs and **kwargs arguments."""
35307 + self.lazy_items[item_key] = self._LazyItem(value_callable, pargs, kwargs, False)
35308 + # make it show up in self.keys(), etc...
35309 + UserDict.__setitem__(self, item_key, None)
35310 +
35311 + def addLazySingleton(self, item_key, value_callable, *pargs, **kwargs):
35312 + """This is like addLazyItem except value_callable will only be called
35313 + a maximum of 1 time and the result will be cached for future requests."""
35314 + self.lazy_items[item_key] = self._LazyItem(value_callable, pargs, kwargs, True)
35315 + # make it show up in self.keys(), etc...
35316 + UserDict.__setitem__(self, item_key, None)
35317 +
35318 + def update(self, *args, **kwargs):
35319 + if len(args) > 1:
35320 + raise TypeError(
35321 + "expected at most 1 positional argument, got " + repr(len(args))
35322 + )
35323 + if args:
35324 + map_obj = args[0]
35325 + else:
35326 + map_obj = None
35327 + if map_obj is None:
35328 + pass
35329 + elif isinstance(map_obj, LazyItemsDict):
35330 + for k in map_obj:
35331 + if k in map_obj.lazy_items:
35332 + UserDict.__setitem__(self, k, None)
35333 + else:
35334 + UserDict.__setitem__(self, k, map_obj[k])
35335 + self.lazy_items.update(map_obj.lazy_items)
35336 + else:
35337 + UserDict.update(self, map_obj)
35338 + if kwargs:
35339 + UserDict.update(self, kwargs)
35340 +
35341 + def __getitem__(self, item_key):
35342 + if item_key in self.lazy_items:
35343 + lazy_item = self.lazy_items[item_key]
35344 + pargs = lazy_item.pargs
35345 + if pargs is None:
35346 + pargs = ()
35347 + kwargs = lazy_item.kwargs
35348 + if kwargs is None:
35349 + kwargs = {}
35350 + result = lazy_item.func(*pargs, **kwargs)
35351 + if lazy_item.singleton:
35352 + self[item_key] = result
35353 + return result
35354 +
35355 + return UserDict.__getitem__(self, item_key)
35356 +
35357 + def __setitem__(self, item_key, value):
35358 + if item_key in self.lazy_items:
35359 + del self.lazy_items[item_key]
35360 + UserDict.__setitem__(self, item_key, value)
35361 +
35362 + def __delitem__(self, item_key):
35363 + if item_key in self.lazy_items:
35364 + del self.lazy_items[item_key]
35365 + UserDict.__delitem__(self, item_key)
35366 +
35367 + def clear(self):
35368 + self.lazy_items.clear()
35369 + UserDict.clear(self)
35370 +
35371 + def copy(self):
35372 + return self.__copy__()
35373 +
35374 + def __copy__(self):
35375 + return self.__class__(self)
35376 +
35377 + def __deepcopy__(self, memo=None):
35378 + """
35379 + This forces evaluation of each contained lazy item, and deepcopy of
35380 + the result. A TypeError is raised if any contained lazy item is not
35381 + a singleton, since it is not necessarily possible for the behavior
35382 + of this type of item to be safely preserved.
35383 + """
35384 + if memo is None:
35385 + memo = {}
35386 + result = self.__class__()
35387 + memo[id(self)] = result
35388 + for k in self:
35389 + k_copy = deepcopy(k, memo)
35390 + lazy_item = self.lazy_items.get(k)
35391 + if lazy_item is not None:
35392 + if not lazy_item.singleton:
35393 + raise TypeError(
35394 + "LazyItemsDict "
35395 + + "deepcopy is unsafe with lazy items that are "
35396 + + "not singletons: key=%s value=%s"
35397 + % (
35398 + k,
35399 + lazy_item,
35400 + )
35401 + )
35402 + UserDict.__setitem__(result, k_copy, deepcopy(self[k], memo))
35403 + return result
35404 +
35405 + class _LazyItem:
35406 +
35407 + __slots__ = ("func", "pargs", "kwargs", "singleton")
35408 +
35409 + def __init__(self, func, pargs, kwargs, singleton):
35410 +
35411 + if not pargs:
35412 + pargs = None
35413 + if not kwargs:
35414 + kwargs = None
35415 +
35416 + self.func = func
35417 + self.pargs = pargs
35418 + self.kwargs = kwargs
35419 + self.singleton = singleton
35420 +
35421 + def __copy__(self):
35422 + return self.__class__(self.func, self.pargs, self.kwargs, self.singleton)
35423 +
35424 + def __deepcopy__(self, memo=None):
35425 + """
35426 + Override this since the default implementation can fail silently,
35427 + leaving some attributes unset.
35428 + """
35429 + if memo is None:
35430 + memo = {}
35431 + result = self.__copy__()
35432 + memo[id(self)] = result
35433 + result.func = deepcopy(self.func, memo)
35434 + result.pargs = deepcopy(self.pargs, memo)
35435 + result.kwargs = deepcopy(self.kwargs, memo)
35436 + result.singleton = deepcopy(self.singleton, memo)
35437 + return result
35438 +
35439
35440 class ConfigProtect:
35441 - def __init__(self, myroot, protect_list, mask_list,
35442 - case_insensitive=False):
35443 - self.myroot = myroot
35444 - self.protect_list = protect_list
35445 - self.mask_list = mask_list
35446 - self.case_insensitive = case_insensitive
35447 - self.updateprotect()
35448 -
35449 - def updateprotect(self):
35450 - """Update internal state for isprotected() calls. Nonexistent paths
35451 - are ignored."""
35452 -
35453 - os = _os_merge
35454 -
35455 - self.protect = []
35456 - self._dirs = set()
35457 - for x in self.protect_list:
35458 - ppath = normalize_path(
35459 - os.path.join(self.myroot, x.lstrip(os.path.sep)))
35460 - # Protect files that don't exist (bug #523684). If the
35461 - # parent directory doesn't exist, we can safely skip it.
35462 - if os.path.isdir(os.path.dirname(ppath)):
35463 - self.protect.append(ppath)
35464 - try:
35465 - if stat.S_ISDIR(os.stat(ppath).st_mode):
35466 - self._dirs.add(ppath)
35467 - except OSError:
35468 - pass
35469 -
35470 - self.protectmask = []
35471 - for x in self.mask_list:
35472 - ppath = normalize_path(
35473 - os.path.join(self.myroot, x.lstrip(os.path.sep)))
35474 - if self.case_insensitive:
35475 - ppath = ppath.lower()
35476 - try:
35477 - """Use lstat so that anything, even a broken symlink can be
35478 - protected."""
35479 - if stat.S_ISDIR(os.lstat(ppath).st_mode):
35480 - self._dirs.add(ppath)
35481 - self.protectmask.append(ppath)
35482 - """Now use stat in case this is a symlink to a directory."""
35483 - if stat.S_ISDIR(os.stat(ppath).st_mode):
35484 - self._dirs.add(ppath)
35485 - except OSError:
35486 - # If it doesn't exist, there's no need to mask it.
35487 - pass
35488 -
35489 - def isprotected(self, obj):
35490 - """Returns True if obj is protected, False otherwise. The caller must
35491 - ensure that obj is normalized with a single leading slash. A trailing
35492 - slash is optional for directories."""
35493 - masked = 0
35494 - protected = 0
35495 - sep = os.path.sep
35496 - if self.case_insensitive:
35497 - obj = obj.lower()
35498 - for ppath in self.protect:
35499 - if len(ppath) > masked and obj.startswith(ppath):
35500 - if ppath in self._dirs:
35501 - if obj != ppath and not obj.startswith(ppath + sep):
35502 - # /etc/foo does not match /etc/foobaz
35503 - continue
35504 - elif obj != ppath:
35505 - # force exact match when CONFIG_PROTECT lists a
35506 - # non-directory
35507 - continue
35508 - protected = len(ppath)
35509 - #config file management
35510 - for pmpath in self.protectmask:
35511 - if len(pmpath) >= protected and obj.startswith(pmpath):
35512 - if pmpath in self._dirs:
35513 - if obj != pmpath and \
35514 - not obj.startswith(pmpath + sep):
35515 - # /etc/foo does not match /etc/foobaz
35516 - continue
35517 - elif obj != pmpath:
35518 - # force exact match when CONFIG_PROTECT_MASK lists
35519 - # a non-directory
35520 - continue
35521 - #skip, it's in the mask
35522 - masked = len(pmpath)
35523 - return protected > masked
35524 + def __init__(self, myroot, protect_list, mask_list, case_insensitive=False):
35525 + self.myroot = myroot
35526 + self.protect_list = protect_list
35527 + self.mask_list = mask_list
35528 + self.case_insensitive = case_insensitive
35529 + self.updateprotect()
35530 +
35531 + def updateprotect(self):
35532 + """Update internal state for isprotected() calls. Nonexistent paths
35533 + are ignored."""
35534 +
35535 + os = _os_merge
35536 +
35537 + self.protect = []
35538 + self._dirs = set()
35539 + for x in self.protect_list:
35540 + ppath = normalize_path(os.path.join(self.myroot, x.lstrip(os.path.sep)))
35541 + # Protect files that don't exist (bug #523684). If the
35542 + # parent directory doesn't exist, we can safely skip it.
35543 + if os.path.isdir(os.path.dirname(ppath)):
35544 + self.protect.append(ppath)
35545 + try:
35546 + if stat.S_ISDIR(os.stat(ppath).st_mode):
35547 + self._dirs.add(ppath)
35548 + except OSError:
35549 + pass
35550 +
35551 + self.protectmask = []
35552 + for x in self.mask_list:
35553 + ppath = normalize_path(os.path.join(self.myroot, x.lstrip(os.path.sep)))
35554 + if self.case_insensitive:
35555 + ppath = ppath.lower()
35556 + try:
35557 + """Use lstat so that anything, even a broken symlink can be
35558 + protected."""
35559 + if stat.S_ISDIR(os.lstat(ppath).st_mode):
35560 + self._dirs.add(ppath)
35561 + self.protectmask.append(ppath)
35562 + """Now use stat in case this is a symlink to a directory."""
35563 + if stat.S_ISDIR(os.stat(ppath).st_mode):
35564 + self._dirs.add(ppath)
35565 + except OSError:
35566 + # If it doesn't exist, there's no need to mask it.
35567 + pass
35568 +
35569 + def isprotected(self, obj):
35570 + """Returns True if obj is protected, False otherwise. The caller must
35571 + ensure that obj is normalized with a single leading slash. A trailing
35572 + slash is optional for directories."""
35573 + masked = 0
35574 + protected = 0
35575 + sep = os.path.sep
35576 + if self.case_insensitive:
35577 + obj = obj.lower()
35578 + for ppath in self.protect:
35579 + if len(ppath) > masked and obj.startswith(ppath):
35580 + if ppath in self._dirs:
35581 + if obj != ppath and not obj.startswith(ppath + sep):
35582 + # /etc/foo does not match /etc/foobaz
35583 + continue
35584 + elif obj != ppath:
35585 + # force exact match when CONFIG_PROTECT lists a
35586 + # non-directory
35587 + continue
35588 + protected = len(ppath)
35589 + # config file management
35590 + for pmpath in self.protectmask:
35591 + if len(pmpath) >= protected and obj.startswith(pmpath):
35592 + if pmpath in self._dirs:
35593 + if obj != pmpath and not obj.startswith(pmpath + sep):
35594 + # /etc/foo does not match /etc/foobaz
35595 + continue
35596 + elif obj != pmpath:
35597 + # force exact match when CONFIG_PROTECT_MASK lists
35598 + # a non-directory
35599 + continue
35600 + # skip, it's in the mask
35601 + masked = len(pmpath)
35602 + return protected > masked
35603 +
35604
35605 def new_protect_filename(mydest, newmd5=None, force=False):
35606 - """Resolves a config-protect filename for merging, optionally
35607 - using the last filename if the md5 matches. If force is True,
35608 - then a new filename will be generated even if mydest does not
35609 - exist yet.
35610 - (dest,md5) ==> 'string' --- path_to_target_filename
35611 - (dest) ==> ('next', 'highest') --- next_target and most-recent_target
35612 - """
35613 + """Resolves a config-protect filename for merging, optionally
35614 + using the last filename if the md5 matches. If force is True,
35615 + then a new filename will be generated even if mydest does not
35616 + exist yet.
35617 + (dest,md5) ==> 'string' --- path_to_target_filename
35618 + (dest) ==> ('next', 'highest') --- next_target and most-recent_target
35619 + """
35620 +
35621 + # config protection filename format:
35622 + # ._cfg0000_foo
35623 + # 0123456789012
35624 +
35625 + os = _os_merge
35626 +
35627 + prot_num = -1
35628 + last_pfile = ""
35629 +
35630 + if not force and not os.path.exists(mydest):
35631 + return mydest
35632 +
35633 + real_filename = os.path.basename(mydest)
35634 + real_dirname = os.path.dirname(mydest)
35635 + for pfile in os.listdir(real_dirname):
35636 + if pfile[0:5] != "._cfg":
35637 + continue
35638 + if pfile[10:] != real_filename:
35639 + continue
35640 + try:
35641 + new_prot_num = int(pfile[5:9])
35642 + if new_prot_num > prot_num:
35643 + prot_num = new_prot_num
35644 + last_pfile = pfile
35645 + except ValueError:
35646 + continue
35647 + prot_num = prot_num + 1
35648 +
35649 + new_pfile = normalize_path(
35650 + os.path.join(
35651 + real_dirname, "._cfg" + str(prot_num).zfill(4) + "_" + real_filename
35652 + )
35653 + )
35654 + old_pfile = normalize_path(os.path.join(real_dirname, last_pfile))
35655 + if last_pfile and newmd5:
35656 + try:
35657 + old_pfile_st = os.lstat(old_pfile)
35658 + except OSError as e:
35659 + if e.errno != errno.ENOENT:
35660 + raise
35661 + else:
35662 + if stat.S_ISLNK(old_pfile_st.st_mode):
35663 + try:
35664 + # Read symlink target as bytes, in case the
35665 + # target path has a bad encoding.
35666 + pfile_link = os.readlink(
35667 + _unicode_encode(
35668 + old_pfile, encoding=_encodings["merge"], errors="strict"
35669 + )
35670 + )
35671 + except OSError:
35672 + if e.errno != errno.ENOENT:
35673 + raise
35674 + else:
35675 + pfile_link = _unicode_decode(
35676 + pfile_link, encoding=_encodings["merge"], errors="replace"
35677 + )
35678 + if pfile_link == newmd5:
35679 + return old_pfile
35680 + else:
35681 + try:
35682 + last_pfile_md5 = portage.checksum._perform_md5_merge(old_pfile)
35683 + except FileNotFound:
35684 + # The file suddenly disappeared or it's a
35685 + # broken symlink.
35686 + pass
35687 + else:
35688 + if last_pfile_md5 == newmd5:
35689 + return old_pfile
35690 + return new_pfile
35691
35692 - # config protection filename format:
35693 - # ._cfg0000_foo
35694 - # 0123456789012
35695 -
35696 - os = _os_merge
35697 -
35698 - prot_num = -1
35699 - last_pfile = ""
35700 -
35701 - if not force and \
35702 - not os.path.exists(mydest):
35703 - return mydest
35704 -
35705 - real_filename = os.path.basename(mydest)
35706 - real_dirname = os.path.dirname(mydest)
35707 - for pfile in os.listdir(real_dirname):
35708 - if pfile[0:5] != "._cfg":
35709 - continue
35710 - if pfile[10:] != real_filename:
35711 - continue
35712 - try:
35713 - new_prot_num = int(pfile[5:9])
35714 - if new_prot_num > prot_num:
35715 - prot_num = new_prot_num
35716 - last_pfile = pfile
35717 - except ValueError:
35718 - continue
35719 - prot_num = prot_num + 1
35720 -
35721 - new_pfile = normalize_path(os.path.join(real_dirname,
35722 - "._cfg" + str(prot_num).zfill(4) + "_" + real_filename))
35723 - old_pfile = normalize_path(os.path.join(real_dirname, last_pfile))
35724 - if last_pfile and newmd5:
35725 - try:
35726 - old_pfile_st = os.lstat(old_pfile)
35727 - except OSError as e:
35728 - if e.errno != errno.ENOENT:
35729 - raise
35730 - else:
35731 - if stat.S_ISLNK(old_pfile_st.st_mode):
35732 - try:
35733 - # Read symlink target as bytes, in case the
35734 - # target path has a bad encoding.
35735 - pfile_link = os.readlink(_unicode_encode(old_pfile,
35736 - encoding=_encodings['merge'], errors='strict'))
35737 - except OSError:
35738 - if e.errno != errno.ENOENT:
35739 - raise
35740 - else:
35741 - pfile_link = _unicode_decode(pfile_link,
35742 - encoding=_encodings['merge'], errors='replace')
35743 - if pfile_link == newmd5:
35744 - return old_pfile
35745 - else:
35746 - try:
35747 - last_pfile_md5 = \
35748 - portage.checksum._perform_md5_merge(old_pfile)
35749 - except FileNotFound:
35750 - # The file suddenly disappeared or it's a
35751 - # broken symlink.
35752 - pass
35753 - else:
35754 - if last_pfile_md5 == newmd5:
35755 - return old_pfile
35756 - return new_pfile
35757
35758 def find_updated_config_files(target_root, config_protect):
35759 - """
35760 - Return a tuple of configuration files that needs to be updated.
35761 - The tuple contains lists organized like this:
35762 - [protected_dir, file_list]
35763 - If the protected config isn't a protected_dir but a procted_file, list is:
35764 - [protected_file, None]
35765 - If no configuration files needs to be updated, None is returned
35766 - """
35767 + """
35768 + Return a tuple of configuration files that needs to be updated.
35769 + The tuple contains lists organized like this:
35770 + [protected_dir, file_list]
35771 + If the protected config isn't a protected_dir but a procted_file, list is:
35772 + [protected_file, None]
35773 + If no configuration files needs to be updated, None is returned
35774 + """
35775 +
35776 + encoding = _encodings["fs"]
35777 +
35778 + if config_protect:
35779 + # directories with some protect files in them
35780 + for x in config_protect:
35781 + files = []
35782 +
35783 + x = os.path.join(target_root, x.lstrip(os.path.sep))
35784 + if not os.access(x, os.W_OK):
35785 + continue
35786 + try:
35787 + mymode = os.lstat(x).st_mode
35788 + except OSError:
35789 + continue
35790 +
35791 + if stat.S_ISLNK(mymode):
35792 + # We want to treat it like a directory if it
35793 + # is a symlink to an existing directory.
35794 + try:
35795 + real_mode = os.stat(x).st_mode
35796 + if stat.S_ISDIR(real_mode):
35797 + mymode = real_mode
35798 + except OSError:
35799 + pass
35800 +
35801 + if stat.S_ISDIR(mymode):
35802 + mycommand = (
35803 + "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
35804 + )
35805 + else:
35806 + mycommand = (
35807 + "find '%s' -maxdepth 1 -name '._cfg????_%s'"
35808 + % os.path.split(x.rstrip(os.path.sep))
35809 + )
35810 + mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
35811 + cmd = shlex_split(mycommand)
35812 +
35813 + cmd = [
35814 + _unicode_encode(arg, encoding=encoding, errors="strict") for arg in cmd
35815 + ]
35816 + proc = subprocess.Popen(
35817 + cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
35818 + )
35819 + output = _unicode_decode(proc.communicate()[0], encoding=encoding)
35820 + status = proc.wait()
35821 + if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
35822 + files = output.split("\0")
35823 + # split always produces an empty string as the last element
35824 + if files and not files[-1]:
35825 + del files[-1]
35826 + if files:
35827 + if stat.S_ISDIR(mymode):
35828 + yield (x, files)
35829 + else:
35830 + yield (x, None)
35831 +
35832 +
35833 + _ld_so_include_re = re.compile(r"^include\s+(\S.*)")
35834
35835 - encoding = _encodings['fs']
35836 -
35837 - if config_protect:
35838 - # directories with some protect files in them
35839 - for x in config_protect:
35840 - files = []
35841 -
35842 - x = os.path.join(target_root, x.lstrip(os.path.sep))
35843 - if not os.access(x, os.W_OK):
35844 - continue
35845 - try:
35846 - mymode = os.lstat(x).st_mode
35847 - except OSError:
35848 - continue
35849 -
35850 - if stat.S_ISLNK(mymode):
35851 - # We want to treat it like a directory if it
35852 - # is a symlink to an existing directory.
35853 - try:
35854 - real_mode = os.stat(x).st_mode
35855 - if stat.S_ISDIR(real_mode):
35856 - mymode = real_mode
35857 - except OSError:
35858 - pass
35859 -
35860 - if stat.S_ISDIR(mymode):
35861 - mycommand = \
35862 - "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
35863 - else:
35864 - mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
35865 - os.path.split(x.rstrip(os.path.sep))
35866 - mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
35867 - cmd = shlex_split(mycommand)
35868 -
35869 - cmd = [_unicode_encode(arg, encoding=encoding, errors='strict')
35870 - for arg in cmd]
35871 - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
35872 - stderr=subprocess.STDOUT)
35873 - output = _unicode_decode(proc.communicate()[0], encoding=encoding)
35874 - status = proc.wait()
35875 - if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
35876 - files = output.split('\0')
35877 - # split always produces an empty string as the last element
35878 - if files and not files[-1]:
35879 - del files[-1]
35880 - if files:
35881 - if stat.S_ISDIR(mymode):
35882 - yield (x, files)
35883 - else:
35884 - yield (x, None)
35885 -
35886 - _ld_so_include_re = re.compile(r'^include\s+(\S.*)')
35887
35888 def getlibpaths(root, env=None):
35889 - def read_ld_so_conf(path):
35890 - for l in grabfile(path):
35891 - include_match = _ld_so_include_re.match(l)
35892 - if include_match is not None:
35893 - subpath = os.path.join(os.path.dirname(path),
35894 - include_match.group(1))
35895 - for p in glob.glob(subpath):
35896 - for r in read_ld_so_conf(p):
35897 - yield r
35898 - else:
35899 - yield l
35900 -
35901 - """ Return a list of paths that are used for library lookups """
35902 - if env is None:
35903 - env = os.environ
35904 -
35905 - # PREFIX HACK: LD_LIBRARY_PATH isn't portable, and considered
35906 - # harmfull, so better not use it. We don't need any host OS lib
35907 - # paths either, so do Prefix case.
35908 - if EPREFIX != '':
35909 - rval = []
35910 - rval.append(EPREFIX + "/usr/lib")
35911 - rval.append(EPREFIX + "/lib")
35912 - # we don't know the CHOST here, so it's a bit hard to guess
35913 - # where GCC's and ld's libs are. Though, GCC's libs should be
35914 - # in lib and usr/lib, binutils' libs rarely used
35915 - else:
35916 - # the following is based on the information from ld.so(8)
35917 - rval = env.get("LD_LIBRARY_PATH", "").split(":")
35918 - rval.extend(read_ld_so_conf(os.path.join(root, "etc", "ld.so.conf")))
35919 - rval.append("/usr/lib")
35920 - rval.append("/lib")
35921 -
35922 - return [normalize_path(x) for x in rval if x]
35923 + def read_ld_so_conf(path):
35924 + for l in grabfile(path):
35925 + include_match = _ld_so_include_re.match(l)
35926 + if include_match is not None:
35927 + subpath = os.path.join(os.path.dirname(path), include_match.group(1))
35928 + for p in glob.glob(subpath):
35929 + for r in read_ld_so_conf(p):
35930 + yield r
35931 + else:
35932 + yield l
35933 +
35934 + """ Return a list of paths that are used for library lookups """
35935 + if env is None:
35936 + env = os.environ
35937 - # the following is based on the information from ld.so(8)
35938 - rval = env.get("LD_LIBRARY_PATH", "").split(":")
35939 - rval.extend(read_ld_so_conf(os.path.join(root, "etc", "ld.so.conf")))
35940 - rval.append("/usr/lib")
35941 - rval.append("/lib")
35942 ++ # BEGIN PREFIX LOCAL:
35943 ++ # LD_LIBRARY_PATH isn't portable, and considered harmfull, so better
35944 ++ # not use it. We don't need any host OS lib paths either, so do
35945 ++ # Prefix case.
35946 ++ if EPREFIX != '':
35947 ++ rval = []
35948 ++ rval.append(EPREFIX + "/usr/lib")
35949 ++ rval.append(EPREFIX + "/lib")
35950 ++ # we don't know the CHOST here, so it's a bit hard to guess
35951 ++ # where GCC's and ld's libs are. Though, GCC's libs should be
35952 ++ # in lib and usr/lib, binutils' libs are rarely used
35953 ++ else:
35954 ++ # END PREFIX LOCAL
35955 ++ # the following is based on the information from ld.so(8)
35956 ++ rval = env.get("LD_LIBRARY_PATH", "").split(":")
35957 ++ rval.extend(read_ld_so_conf(os.path.join(root, "etc", "ld.so.conf")))
35958 ++ rval.append("/usr/lib")
35959 ++ rval.append("/lib")
35960 +
35961 + return [normalize_path(x) for x in rval if x]
35962 diff --cc lib/portage/util/_info_files.py
35963 index de44b0fdc,528b273d9..2a8d277b3
35964 --- a/lib/portage/util/_info_files.py
35965 +++ b/lib/portage/util/_info_files.py
35966 @@@ -9,131 -9,132 +9,136 @@@ import subproces
35967
35968 import portage
35969 from portage import os
35970 +from portage.const import EPREFIX
35971
35972 +
35973 def chk_updated_info_files(root, infodirs, prev_mtimes):
35974
35975 - if os.path.exists(EPREFIX + "/usr/bin/install-info"):
35976 - out = portage.output.EOutput()
35977 - regen_infodirs = []
35978 - for z in infodirs:
35979 - if z == '':
35980 - continue
35981 - inforoot = portage.util.normalize_path(root + EPREFIX + z)
35982 - if os.path.isdir(inforoot) and \
35983 - not [x for x in os.listdir(inforoot) \
35984 - if x.startswith('.keepinfodir')]:
35985 - infomtime = os.stat(inforoot)[stat.ST_MTIME]
35986 - if inforoot not in prev_mtimes or \
35987 - prev_mtimes[inforoot] != infomtime:
35988 - regen_infodirs.append(inforoot)
35989 - if os.path.exists("/usr/bin/install-info"):
35990 ++ # PREFIX LOCAL
35991 ++ if os.path.exists(EPREFIX + "/usr/bin/install-info"):
35992 + out = portage.output.EOutput()
35993 + regen_infodirs = []
35994 + for z in infodirs:
35995 + if z == "":
35996 + continue
35997 - inforoot = portage.util.normalize_path(root + z)
35998 ++ # PREFIX LOCAL
35999 ++ inforoot = portage.util.normalize_path(root + EPREFIX + z)
36000 + if os.path.isdir(inforoot) and not [
36001 + x for x in os.listdir(inforoot) if x.startswith(".keepinfodir")
36002 + ]:
36003 + infomtime = os.stat(inforoot)[stat.ST_MTIME]
36004 + if inforoot not in prev_mtimes or prev_mtimes[inforoot] != infomtime:
36005 + regen_infodirs.append(inforoot)
36006
36007 - if not regen_infodirs:
36008 - portage.util.writemsg_stdout("\n")
36009 - if portage.util.noiselimit >= 0:
36010 - out.einfo("GNU info directory index is up-to-date.")
36011 - else:
36012 - portage.util.writemsg_stdout("\n")
36013 - if portage.util.noiselimit >= 0:
36014 - out.einfo("Regenerating GNU info directory index...")
36015 + if not regen_infodirs:
36016 + portage.util.writemsg_stdout("\n")
36017 + if portage.util.noiselimit >= 0:
36018 + out.einfo("GNU info directory index is up-to-date.")
36019 + else:
36020 + portage.util.writemsg_stdout("\n")
36021 + if portage.util.noiselimit >= 0:
36022 + out.einfo("Regenerating GNU info directory index...")
36023
36024 - dir_extensions = ("", ".gz", ".bz2")
36025 - icount = 0
36026 - badcount = 0
36027 - errmsg = ""
36028 - for inforoot in regen_infodirs:
36029 - if inforoot == '':
36030 - continue
36031 + dir_extensions = ("", ".gz", ".bz2")
36032 + icount = 0
36033 + badcount = 0
36034 + errmsg = ""
36035 + for inforoot in regen_infodirs:
36036 + if inforoot == "":
36037 + continue
36038
36039 - if not os.path.isdir(inforoot) or \
36040 - not os.access(inforoot, os.W_OK):
36041 - continue
36042 + if not os.path.isdir(inforoot) or not os.access(inforoot, os.W_OK):
36043 + continue
36044
36045 - file_list = os.listdir(inforoot)
36046 - file_list.sort()
36047 - dir_file = os.path.join(inforoot, "dir")
36048 - moved_old_dir = False
36049 - processed_count = 0
36050 - for x in file_list:
36051 - if x.startswith(".") or \
36052 - os.path.isdir(os.path.join(inforoot, x)):
36053 - continue
36054 - if x.startswith("dir"):
36055 - skip = False
36056 - for ext in dir_extensions:
36057 - if x == "dir" + ext or \
36058 - x == "dir" + ext + ".old":
36059 - skip = True
36060 - break
36061 - if skip:
36062 - continue
36063 - if processed_count == 0:
36064 - for ext in dir_extensions:
36065 - try:
36066 - os.rename(dir_file + ext, dir_file + ext + ".old")
36067 - moved_old_dir = True
36068 - except EnvironmentError as e:
36069 - if e.errno != errno.ENOENT:
36070 - raise
36071 - del e
36072 - processed_count += 1
36073 - try:
36074 - proc = subprocess.Popen(
36075 - ['%s/usr/bin/install-info' % EPREFIX,
36076 - '--dir-file=%s' % os.path.join(inforoot, "dir"),
36077 - os.path.join(inforoot, x)],
36078 - env=dict(os.environ, LANG="C", LANGUAGE="C"),
36079 - stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
36080 - except OSError:
36081 - myso = None
36082 - else:
36083 - myso = portage._unicode_decode(
36084 - proc.communicate()[0]).rstrip("\n")
36085 - proc.wait()
36086 - existsstr = "already exists, for file `"
36087 - if myso:
36088 - if re.search(existsstr, myso):
36089 - # Already exists... Don't increment the count for this.
36090 - pass
36091 - elif myso[:44] == "install-info: warning: no info dir entry in ":
36092 - # This info file doesn't contain a DIR-header: install-info produces this
36093 - # (harmless) warning (the --quiet switch doesn't seem to work).
36094 - # Don't increment the count for this.
36095 - pass
36096 - else:
36097 - badcount += 1
36098 - errmsg += myso + "\n"
36099 - icount += 1
36100 + file_list = os.listdir(inforoot)
36101 + file_list.sort()
36102 + dir_file = os.path.join(inforoot, "dir")
36103 + moved_old_dir = False
36104 + processed_count = 0
36105 + for x in file_list:
36106 + if x.startswith(".") or os.path.isdir(os.path.join(inforoot, x)):
36107 + continue
36108 + if x.startswith("dir"):
36109 + skip = False
36110 + for ext in dir_extensions:
36111 + if x == "dir" + ext or x == "dir" + ext + ".old":
36112 + skip = True
36113 + break
36114 + if skip:
36115 + continue
36116 + if processed_count == 0:
36117 + for ext in dir_extensions:
36118 + try:
36119 + os.rename(dir_file + ext, dir_file + ext + ".old")
36120 + moved_old_dir = True
36121 + except EnvironmentError as e:
36122 + if e.errno != errno.ENOENT:
36123 + raise
36124 + del e
36125 + processed_count += 1
36126 + try:
36127 + proc = subprocess.Popen(
36128 + [
36129 - "/usr/bin/install-info",
36130 ++ # PREFIX LOCAL
36131 ++ "%s/usr/bin/install-info", EPREFIX,
36132 + "--dir-file=%s" % os.path.join(inforoot, "dir"),
36133 + os.path.join(inforoot, x),
36134 + ],
36135 + env=dict(os.environ, LANG="C", LANGUAGE="C"),
36136 + stdout=subprocess.PIPE,
36137 + stderr=subprocess.STDOUT,
36138 + )
36139 + except OSError:
36140 + myso = None
36141 + else:
36142 + myso = portage._unicode_decode(proc.communicate()[0]).rstrip(
36143 + "\n"
36144 + )
36145 + proc.wait()
36146 + existsstr = "already exists, for file `"
36147 + if myso:
36148 + if re.search(existsstr, myso):
36149 + # Already exists... Don't increment the count for this.
36150 + pass
36151 + elif (
36152 + myso[:44] == "install-info: warning: no info dir entry in "
36153 + ):
36154 + # This info file doesn't contain a DIR-header: install-info produces this
36155 + # (harmless) warning (the --quiet switch doesn't seem to work).
36156 + # Don't increment the count for this.
36157 + pass
36158 + else:
36159 + badcount += 1
36160 + errmsg += myso + "\n"
36161 + icount += 1
36162
36163 - if moved_old_dir and not os.path.exists(dir_file):
36164 - # We didn't generate a new dir file, so put the old file
36165 - # back where it was originally found.
36166 - for ext in dir_extensions:
36167 - try:
36168 - os.rename(dir_file + ext + ".old", dir_file + ext)
36169 - except EnvironmentError as e:
36170 - if e.errno != errno.ENOENT:
36171 - raise
36172 - del e
36173 + if moved_old_dir and not os.path.exists(dir_file):
36174 + # We didn't generate a new dir file, so put the old file
36175 + # back where it was originally found.
36176 + for ext in dir_extensions:
36177 + try:
36178 + os.rename(dir_file + ext + ".old", dir_file + ext)
36179 + except EnvironmentError as e:
36180 + if e.errno != errno.ENOENT:
36181 + raise
36182 + del e
36183
36184 - # Clean dir.old cruft so that they don't prevent
36185 - # unmerge of otherwise empty directories.
36186 - for ext in dir_extensions:
36187 - try:
36188 - os.unlink(dir_file + ext + ".old")
36189 - except EnvironmentError as e:
36190 - if e.errno != errno.ENOENT:
36191 - raise
36192 - del e
36193 + # Clean dir.old cruft so that they don't prevent
36194 + # unmerge of otherwise empty directories.
36195 + for ext in dir_extensions:
36196 + try:
36197 + os.unlink(dir_file + ext + ".old")
36198 + except EnvironmentError as e:
36199 + if e.errno != errno.ENOENT:
36200 + raise
36201 + del e
36202
36203 - #update mtime so we can potentially avoid regenerating.
36204 - prev_mtimes[inforoot] = os.stat(inforoot)[stat.ST_MTIME]
36205 + # update mtime so we can potentially avoid regenerating.
36206 + prev_mtimes[inforoot] = os.stat(inforoot)[stat.ST_MTIME]
36207
36208 - if badcount:
36209 - out.eerror("Processed %d info files; %d errors." % \
36210 - (icount, badcount))
36211 - portage.util.writemsg_level(errmsg,
36212 - level=logging.ERROR, noiselevel=-1)
36213 - else:
36214 - if icount > 0 and portage.util.noiselimit >= 0:
36215 - out.einfo("Processed %d info files." % (icount,))
36216 + if badcount:
36217 + out.eerror("Processed %d info files; %d errors." % (icount, badcount))
36218 + portage.util.writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
36219 + else:
36220 + if icount > 0 and portage.util.noiselimit >= 0:
36221 + out.einfo("Processed %d info files." % (icount,))
36222 diff --cc lib/portage/util/_pty.py
36223 index a92f57543,e58f95e0a..40ab1a8db
36224 --- a/lib/portage/util/_pty.py
36225 +++ b/lib/portage/util/_pty.py
36226 @@@ -9,70 -9,68 +9,68 @@@ from portage import o
36227 from portage.output import get_term_size, set_term_size
36228 from portage.util import writemsg
36229
36230 -# Disable the use of openpty on Solaris as it seems Python's openpty
36231 -# implementation doesn't play nice on Solaris with Portage's
36232 -# behaviour causing hangs/deadlocks.
36233 +# Disable the use of openpty on Solaris (and others) as it seems Python's
36234 +# openpty implementation doesn't play nice with Portage's behaviour,
36235 +# causing hangs/deadlocks.
36236 # Additional note for the future: on Interix, pipes do NOT work, so
36237 # _disable_openpty on Interix must *never* be True
36238 -_disable_openpty = platform.system() in ("SunOS",)
36239 +_disable_openpty = platform.system() in ("AIX","FreeMiNT","HP-UX","SunOS",)
36240
36241 - _fbsd_test_pty = platform.system() == 'FreeBSD'
36242 + _fbsd_test_pty = platform.system() == "FreeBSD"
36243 +
36244
36245 def _create_pty_or_pipe(copy_term_size=None):
36246 - """
36247 - Try to create a pty and if then fails then create a normal
36248 - pipe instead.
36249 + """
36250 + Try to create a pty and if then fails then create a normal
36251 + pipe instead.
36252
36253 - @param copy_term_size: If a tty file descriptor is given
36254 - then the term size will be copied to the pty.
36255 - @type copy_term_size: int
36256 - @rtype: tuple
36257 - @return: A tuple of (is_pty, master_fd, slave_fd) where
36258 - is_pty is True if a pty was successfully allocated, and
36259 - False if a normal pipe was allocated.
36260 - """
36261 + @param copy_term_size: If a tty file descriptor is given
36262 + then the term size will be copied to the pty.
36263 + @type copy_term_size: int
36264 + @rtype: tuple
36265 + @return: A tuple of (is_pty, master_fd, slave_fd) where
36266 + is_pty is True if a pty was successfully allocated, and
36267 + False if a normal pipe was allocated.
36268 + """
36269
36270 - got_pty = False
36271 + got_pty = False
36272
36273 - global _disable_openpty, _fbsd_test_pty
36274 + global _disable_openpty, _fbsd_test_pty
36275
36276 - if _fbsd_test_pty and not _disable_openpty:
36277 - # Test for python openpty breakage after freebsd7 to freebsd8
36278 - # upgrade, which results in a 'Function not implemented' error
36279 - # and the process being killed.
36280 - pid = os.fork()
36281 - if pid == 0:
36282 - pty.openpty()
36283 - os._exit(os.EX_OK)
36284 - pid, status = os.waitpid(pid, 0)
36285 - if (status & 0xff) == 140:
36286 - _disable_openpty = True
36287 - _fbsd_test_pty = False
36288 + if _fbsd_test_pty and not _disable_openpty:
36289 + # Test for python openpty breakage after freebsd7 to freebsd8
36290 + # upgrade, which results in a 'Function not implemented' error
36291 + # and the process being killed.
36292 + pid = os.fork()
36293 + if pid == 0:
36294 + pty.openpty()
36295 + os._exit(os.EX_OK)
36296 + pid, status = os.waitpid(pid, 0)
36297 + if (status & 0xFF) == 140:
36298 + _disable_openpty = True
36299 + _fbsd_test_pty = False
36300
36301 - if _disable_openpty:
36302 - master_fd, slave_fd = os.pipe()
36303 - else:
36304 - try:
36305 - master_fd, slave_fd = pty.openpty()
36306 - got_pty = True
36307 - except EnvironmentError as e:
36308 - _disable_openpty = True
36309 - writemsg("openpty failed: '%s'\n" % str(e),
36310 - noiselevel=-1)
36311 - del e
36312 - master_fd, slave_fd = os.pipe()
36313 + if _disable_openpty:
36314 + master_fd, slave_fd = os.pipe()
36315 + else:
36316 + try:
36317 + master_fd, slave_fd = pty.openpty()
36318 + got_pty = True
36319 + except EnvironmentError as e:
36320 + _disable_openpty = True
36321 + writemsg("openpty failed: '%s'\n" % str(e), noiselevel=-1)
36322 + del e
36323 + master_fd, slave_fd = os.pipe()
36324
36325 - if got_pty:
36326 - # Disable post-processing of output since otherwise weird
36327 - # things like \n -> \r\n transformations may occur.
36328 - mode = termios.tcgetattr(slave_fd)
36329 - mode[1] &= ~termios.OPOST
36330 - termios.tcsetattr(slave_fd, termios.TCSANOW, mode)
36331 + if got_pty:
36332 + # Disable post-processing of output since otherwise weird
36333 + # things like \n -> \r\n transformations may occur.
36334 + mode = termios.tcgetattr(slave_fd)
36335 + mode[1] &= ~termios.OPOST
36336 + termios.tcsetattr(slave_fd, termios.TCSANOW, mode)
36337
36338 - if got_pty and \
36339 - copy_term_size is not None and \
36340 - os.isatty(copy_term_size):
36341 - rows, columns = get_term_size()
36342 - set_term_size(rows, columns, slave_fd)
36343 + if got_pty and copy_term_size is not None and os.isatty(copy_term_size):
36344 + rows, columns = get_term_size()
36345 + set_term_size(rows, columns, slave_fd)
36346
36347 - return (got_pty, master_fd, slave_fd)
36348 + return (got_pty, master_fd, slave_fd)
36349 diff --cc lib/portage/util/env_update.py
36350 index 31aacc292,bb0ebf84c..b7f27dfb7
36351 --- a/lib/portage/util/env_update.py
36352 +++ b/lib/portage/util/env_update.py
36353 @@@ -23,374 -28,419 +28,422 @@@ from portage.dbapi.vartree import vartr
36354 from portage.package.ebuild.config import config
36355
36356
36357 - def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None,
36358 - env=None, writemsg_level=None, vardbapi=None):
36359 - """
36360 - Parse /etc/env.d and use it to generate /etc/profile.env, csh.env,
36361 - ld.so.conf, and prelink.conf. Finally, run ldconfig. When ldconfig is
36362 - called, its -X option will be used in order to avoid potential
36363 - interference with installed soname symlinks that are required for
36364 - correct operation of FEATURES=preserve-libs for downgrade operations.
36365 - It's not necessary for ldconfig to create soname symlinks, since
36366 - portage will use NEEDED.ELF.2 data to automatically create them
36367 - after src_install if they happen to be missing.
36368 - @param makelinks: True if ldconfig should be called, False otherwise
36369 - @param target_root: root that is passed to the ldconfig -r option,
36370 - defaults to portage.settings["ROOT"].
36371 - @type target_root: String (Path)
36372 - """
36373 - if vardbapi is None:
36374 - if isinstance(env, config):
36375 - vardbapi = vartree(settings=env).dbapi
36376 - else:
36377 - if target_root is None:
36378 - eprefix = portage.settings["EPREFIX"]
36379 - target_root = portage.settings["ROOT"]
36380 - target_eroot = portage.settings['EROOT']
36381 - else:
36382 - eprefix = portage.const.EPREFIX
36383 - target_eroot = os.path.join(target_root,
36384 - eprefix.lstrip(os.sep))
36385 - target_eroot = target_eroot.rstrip(os.sep) + os.sep
36386 - if hasattr(portage, "db") and target_eroot in portage.db:
36387 - vardbapi = portage.db[target_eroot]["vartree"].dbapi
36388 - else:
36389 - settings = config(config_root=target_root,
36390 - target_root=target_root, eprefix=eprefix)
36391 - target_root = settings["ROOT"]
36392 - if env is None:
36393 - env = settings
36394 - vardbapi = vartree(settings=settings).dbapi
36395 -
36396 - # Lock the config memory file to prevent symlink creation
36397 - # in merge_contents from overlapping with env-update.
36398 - vardbapi._fs_lock()
36399 - try:
36400 - return _env_update(makelinks, target_root, prev_mtimes, contents,
36401 - env, writemsg_level)
36402 - finally:
36403 - vardbapi._fs_unlock()
36404 -
36405 - def _env_update(makelinks, target_root, prev_mtimes, contents, env,
36406 - writemsg_level):
36407 - if writemsg_level is None:
36408 - writemsg_level = portage.util.writemsg_level
36409 - if target_root is None:
36410 - target_root = portage.settings["ROOT"]
36411 - if prev_mtimes is None:
36412 - prev_mtimes = portage.mtimedb["ldpath"]
36413 - if env is None:
36414 - settings = portage.settings
36415 - else:
36416 - settings = env
36417 -
36418 - eprefix = settings.get("EPREFIX", portage.const.EPREFIX)
36419 - eprefix_lstrip = eprefix.lstrip(os.sep)
36420 - eroot = normalize_path(os.path.join(target_root, eprefix_lstrip)).rstrip(os.sep) + os.sep
36421 - envd_dir = os.path.join(eroot, "etc", "env.d")
36422 - ensure_dirs(envd_dir, mode=0o755)
36423 - fns = listdir(envd_dir, EmptyOnError=1)
36424 - fns.sort()
36425 - templist = []
36426 - for x in fns:
36427 - if len(x) < 3:
36428 - continue
36429 - if not x[0].isdigit() or not x[1].isdigit():
36430 - continue
36431 - if x.startswith(".") or x.endswith("~") or x.endswith(".bak"):
36432 - continue
36433 - templist.append(x)
36434 - fns = templist
36435 - del templist
36436 -
36437 - space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"])
36438 - colon_separated = set(["ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
36439 - "CLASSPATH", "INFODIR", "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH",
36440 - "PATH", "PKG_CONFIG_PATH", "PRELINK_PATH", "PRELINK_PATH_MASK",
36441 - "PYTHONPATH", "ROOTPATH"])
36442 -
36443 - config_list = []
36444 -
36445 - for x in fns:
36446 - file_path = os.path.join(envd_dir, x)
36447 - try:
36448 - myconfig = getconfig(file_path, expand=False)
36449 - except ParseError as e:
36450 - writemsg("!!! '%s'\n" % str(e), noiselevel=-1)
36451 - del e
36452 - continue
36453 - if myconfig is None:
36454 - # broken symlink or file removed by a concurrent process
36455 - writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1)
36456 - continue
36457 -
36458 - config_list.append(myconfig)
36459 - if "SPACE_SEPARATED" in myconfig:
36460 - space_separated.update(myconfig["SPACE_SEPARATED"].split())
36461 - del myconfig["SPACE_SEPARATED"]
36462 - if "COLON_SEPARATED" in myconfig:
36463 - colon_separated.update(myconfig["COLON_SEPARATED"].split())
36464 - del myconfig["COLON_SEPARATED"]
36465 -
36466 - env = {}
36467 - specials = {}
36468 - for var in space_separated:
36469 - mylist = []
36470 - for myconfig in config_list:
36471 - if var in myconfig:
36472 - for item in myconfig[var].split():
36473 - if item and not item in mylist:
36474 - mylist.append(item)
36475 - del myconfig[var] # prepare for env.update(myconfig)
36476 - if mylist:
36477 - env[var] = " ".join(mylist)
36478 - specials[var] = mylist
36479 -
36480 - for var in colon_separated:
36481 - mylist = []
36482 - for myconfig in config_list:
36483 - if var in myconfig:
36484 - for item in myconfig[var].split(":"):
36485 - if item and not item in mylist:
36486 - mylist.append(item)
36487 - del myconfig[var] # prepare for env.update(myconfig)
36488 - if mylist:
36489 - env[var] = ":".join(mylist)
36490 - specials[var] = mylist
36491 -
36492 - for myconfig in config_list:
36493 - """Cumulative variables have already been deleted from myconfig so that
36494 - they won't be overwritten by this dict.update call."""
36495 - env.update(myconfig)
36496 -
36497 - ldsoconf_path = os.path.join(eroot, "etc", "ld.so.conf")
36498 - try:
36499 - myld = io.open(_unicode_encode(ldsoconf_path,
36500 - encoding=_encodings['fs'], errors='strict'),
36501 - mode='r', encoding=_encodings['content'], errors='replace')
36502 - myldlines = myld.readlines()
36503 - myld.close()
36504 - oldld = []
36505 - for x in myldlines:
36506 - #each line has at least one char (a newline)
36507 - if x[:1] == "#":
36508 - continue
36509 - oldld.append(x[:-1])
36510 - except (IOError, OSError) as e:
36511 - if e.errno != errno.ENOENT:
36512 - raise
36513 - oldld = None
36514 -
36515 - newld = specials["LDPATH"]
36516 - if oldld != newld:
36517 - #ld.so.conf needs updating and ldconfig needs to be run
36518 - myfd = atomic_ofstream(ldsoconf_path)
36519 - myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
36520 - myfd.write("# contents of /etc/env.d directory\n")
36521 - for x in specials["LDPATH"]:
36522 - myfd.write(x + "\n")
36523 - myfd.close()
36524 -
36525 - potential_lib_dirs = set()
36526 - for lib_dir_glob in ('usr/lib*', 'lib*'):
36527 - x = os.path.join(eroot, lib_dir_glob)
36528 - for y in glob.glob(_unicode_encode(x,
36529 - encoding=_encodings['fs'], errors='strict')):
36530 - try:
36531 - y = _unicode_decode(y,
36532 - encoding=_encodings['fs'], errors='strict')
36533 - except UnicodeDecodeError:
36534 - continue
36535 - if os.path.basename(y) != 'libexec':
36536 - potential_lib_dirs.add(y[len(eroot):])
36537 -
36538 - # Update prelink.conf if we are prelink-enabled
36539 - if prelink_capable:
36540 - prelink_d = os.path.join(eroot, 'etc', 'prelink.conf.d')
36541 - ensure_dirs(prelink_d)
36542 - newprelink = atomic_ofstream(os.path.join(prelink_d, 'portage.conf'))
36543 - newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
36544 - newprelink.write("# contents of /etc/env.d directory\n")
36545 -
36546 - for x in sorted(potential_lib_dirs) + ['bin', 'sbin']:
36547 - newprelink.write('-l /%s\n' % (x,))
36548 - prelink_paths = set()
36549 - prelink_paths |= set(specials.get('LDPATH', []))
36550 - prelink_paths |= set(specials.get('PATH', []))
36551 - prelink_paths |= set(specials.get('PRELINK_PATH', []))
36552 - prelink_path_mask = specials.get('PRELINK_PATH_MASK', [])
36553 - for x in prelink_paths:
36554 - if not x:
36555 - continue
36556 - if x[-1:] != '/':
36557 - x += "/"
36558 - plmasked = 0
36559 - for y in prelink_path_mask:
36560 - if not y:
36561 - continue
36562 - if y[-1] != '/':
36563 - y += "/"
36564 - if y == x[0:len(y)]:
36565 - plmasked = 1
36566 - break
36567 - if not plmasked:
36568 - newprelink.write("-h %s\n" % (x,))
36569 - for x in prelink_path_mask:
36570 - newprelink.write("-b %s\n" % (x,))
36571 - newprelink.close()
36572 -
36573 - # Migration code path. If /etc/prelink.conf was generated by us, then
36574 - # point it to the new stuff until the prelink package re-installs.
36575 - prelink_conf = os.path.join(eroot, 'etc', 'prelink.conf')
36576 - try:
36577 - with open(_unicode_encode(prelink_conf,
36578 - encoding=_encodings['fs'], errors='strict'), 'rb') as f:
36579 - if f.readline() == b'# prelink.conf autogenerated by env-update; make all changes to\n':
36580 - f = atomic_ofstream(prelink_conf)
36581 - f.write('-c /etc/prelink.conf.d/*.conf\n')
36582 - f.close()
36583 - except IOError as e:
36584 - if e.errno != errno.ENOENT:
36585 - raise
36586 -
36587 - current_time = int(time.time())
36588 - mtime_changed = False
36589 -
36590 - lib_dirs = set()
36591 - for lib_dir in set(specials['LDPATH']) | potential_lib_dirs:
36592 - x = os.path.join(eroot, lib_dir.lstrip(os.sep))
36593 - try:
36594 - newldpathtime = os.stat(x)[stat.ST_MTIME]
36595 - lib_dirs.add(normalize_path(x))
36596 - except OSError as oe:
36597 - if oe.errno == errno.ENOENT:
36598 - try:
36599 - del prev_mtimes[x]
36600 - except KeyError:
36601 - pass
36602 - # ignore this path because it doesn't exist
36603 - continue
36604 - raise
36605 - if newldpathtime == current_time:
36606 - # Reset mtime to avoid the potential ambiguity of times that
36607 - # differ by less than 1 second.
36608 - newldpathtime -= 1
36609 - os.utime(x, (newldpathtime, newldpathtime))
36610 - prev_mtimes[x] = newldpathtime
36611 - mtime_changed = True
36612 - elif x in prev_mtimes:
36613 - if prev_mtimes[x] == newldpathtime:
36614 - pass
36615 - else:
36616 - prev_mtimes[x] = newldpathtime
36617 - mtime_changed = True
36618 - else:
36619 - prev_mtimes[x] = newldpathtime
36620 - mtime_changed = True
36621 -
36622 - if makelinks and \
36623 - not mtime_changed and \
36624 - contents is not None:
36625 - libdir_contents_changed = False
36626 - for mypath, mydata in contents.items():
36627 - if mydata[0] not in ("obj", "sym"):
36628 - continue
36629 - head, tail = os.path.split(mypath)
36630 - if head in lib_dirs:
36631 - libdir_contents_changed = True
36632 - break
36633 - if not libdir_contents_changed:
36634 - makelinks = False
36635 -
36636 - if "CHOST" in settings and "CBUILD" in settings and \
36637 - settings["CHOST"] != settings["CBUILD"]:
36638 - ldconfig = find_binary("%s-ldconfig" % settings["CHOST"])
36639 - else:
36640 - ldconfig = os.path.join(eroot, "sbin", "ldconfig")
36641 -
36642 - if ldconfig is None:
36643 - pass
36644 - elif not (os.access(ldconfig, os.X_OK) and os.path.isfile(ldconfig)):
36645 - ldconfig = None
36646 -
36647 - # Only run ldconfig as needed
36648 - if makelinks and ldconfig:
36649 - # ldconfig has very different behaviour between FreeBSD and Linux
36650 - if ostype == "Linux" or ostype.lower().endswith("gnu"):
36651 - # We can't update links if we haven't cleaned other versions first, as
36652 - # an older package installed ON TOP of a newer version will cause ldconfig
36653 - # to overwrite the symlinks we just made. -X means no links. After 'clean'
36654 - # we can safely create links.
36655 - writemsg_level(_(">>> Regenerating %setc/ld.so.cache...\n") % \
36656 - (target_root,))
36657 - os.system("cd / ; %s -X -r '%s'" % (ldconfig, target_root))
36658 - elif ostype in ("FreeBSD", "DragonFly"):
36659 - writemsg_level(_(">>> Regenerating %svar/run/ld-elf.so.hints...\n") % \
36660 - target_root)
36661 - os.system(("cd / ; %s -elf -i " + \
36662 - "-f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'") % \
36663 - (ldconfig, target_root, target_root))
36664 -
36665 - del specials["LDPATH"]
36666 -
36667 - notice = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
36668 - notice += "# DO NOT EDIT THIS FILE."
36669 - penvnotice = notice + " CHANGES TO STARTUP PROFILES\n"
36670 - cenvnotice = penvnotice[:]
36671 - penvnotice += "# GO INTO " + eprefix + "/etc/profile NOT /etc/profile.env\n\n"
36672 - cenvnotice += "# GO INTO " + eprefix + "/etc/csh.cshrc NOT /etc/csh.env\n\n"
36673 -
36674 - #create /etc/profile.env for bash support
36675 - profile_env_path = os.path.join(eroot, "etc", "profile.env")
36676 - with atomic_ofstream(profile_env_path) as outfile:
36677 - outfile.write(penvnotice)
36678 -
36679 - env_keys = [x for x in env if x != "LDPATH"]
36680 - env_keys.sort()
36681 - for k in env_keys:
36682 - v = env[k]
36683 - if v.startswith('$') and not v.startswith('${'):
36684 - outfile.write("export %s=$'%s'\n" % (k, v[1:]))
36685 - else:
36686 - outfile.write("export %s='%s'\n" % (k, v))
36687 -
36688 - # Create the systemd user environment configuration file
36689 - # /etc/environment.d/10-gentoo-env.conf with the
36690 - # environment configuration from /etc/env.d.
36691 - systemd_environment_dir = os.path.join(eroot, "etc", "environment.d")
36692 - os.makedirs(systemd_environment_dir, exist_ok=True)
36693 -
36694 - systemd_gentoo_env_path = os.path.join(systemd_environment_dir,
36695 - "10-gentoo-env.conf")
36696 - with atomic_ofstream(systemd_gentoo_env_path) as systemd_gentoo_env:
36697 - senvnotice = notice + "\n\n"
36698 - systemd_gentoo_env.write(senvnotice)
36699 -
36700 - for env_key in env_keys:
36701 - # Skip PATH since this makes it impossible to use
36702 - # "systemctl --user import-environment PATH".
36703 - if env_key == 'PATH':
36704 - continue
36705 -
36706 - env_key_value = env[env_key]
36707 -
36708 - # Skip variables with the empty string
36709 - # as value. Those sometimes appear in
36710 - # profile.env (e.g. "export GCC_SPECS=''"),
36711 - # but are invalid in systemd's syntax.
36712 - if not env_key_value:
36713 - continue
36714 -
36715 - # Transform into systemd environment.d
36716 - # conf syntax, basically shell variable
36717 - # assignment (without "export ").
36718 - line = f"{env_key}={env_key_value}\n"
36719 -
36720 - systemd_gentoo_env.write(line)
36721 -
36722 - #create /etc/csh.env for (t)csh support
36723 - outfile = atomic_ofstream(os.path.join(eroot, "etc", "csh.env"))
36724 - outfile.write(cenvnotice)
36725 - for x in env_keys:
36726 - outfile.write("setenv %s '%s'\n" % (x, env[x]))
36727 - outfile.close()
36728 + def env_update(
36729 + makelinks=1,
36730 + target_root=None,
36731 + prev_mtimes=None,
36732 + contents=None,
36733 + env=None,
36734 + writemsg_level=None,
36735 + vardbapi=None,
36736 + ):
36737 + """
36738 + Parse /etc/env.d and use it to generate /etc/profile.env, csh.env,
36739 + ld.so.conf, and prelink.conf. Finally, run ldconfig. When ldconfig is
36740 + called, its -X option will be used in order to avoid potential
36741 + interference with installed soname symlinks that are required for
36742 + correct operation of FEATURES=preserve-libs for downgrade operations.
36743 + It's not necessary for ldconfig to create soname symlinks, since
36744 + portage will use NEEDED.ELF.2 data to automatically create them
36745 + after src_install if they happen to be missing.
36746 + @param makelinks: True if ldconfig should be called, False otherwise
36747 + @param target_root: root that is passed to the ldconfig -r option,
36748 + defaults to portage.settings["ROOT"].
36749 + @type target_root: String (Path)
36750 + """
36751 + if vardbapi is None:
36752 + if isinstance(env, config):
36753 + vardbapi = vartree(settings=env).dbapi
36754 + else:
36755 + if target_root is None:
36756 + eprefix = portage.settings["EPREFIX"]
36757 + target_root = portage.settings["ROOT"]
36758 + target_eroot = portage.settings["EROOT"]
36759 + else:
36760 + eprefix = portage.const.EPREFIX
36761 + target_eroot = os.path.join(target_root, eprefix.lstrip(os.sep))
36762 + target_eroot = target_eroot.rstrip(os.sep) + os.sep
36763 + if hasattr(portage, "db") and target_eroot in portage.db:
36764 + vardbapi = portage.db[target_eroot]["vartree"].dbapi
36765 + else:
36766 + settings = config(
36767 + config_root=target_root, target_root=target_root, eprefix=eprefix
36768 + )
36769 + target_root = settings["ROOT"]
36770 + if env is None:
36771 + env = settings
36772 + vardbapi = vartree(settings=settings).dbapi
36773 +
36774 + # Lock the config memory file to prevent symlink creation
36775 + # in merge_contents from overlapping with env-update.
36776 + vardbapi._fs_lock()
36777 + try:
36778 + return _env_update(
36779 + makelinks, target_root, prev_mtimes, contents, env, writemsg_level
36780 + )
36781 + finally:
36782 + vardbapi._fs_unlock()
36783 +
36784 +
36785 + def _env_update(makelinks, target_root, prev_mtimes, contents, env, writemsg_level):
36786 + if writemsg_level is None:
36787 + writemsg_level = portage.util.writemsg_level
36788 + if target_root is None:
36789 + target_root = portage.settings["ROOT"]
36790 + if prev_mtimes is None:
36791 + prev_mtimes = portage.mtimedb["ldpath"]
36792 + if env is None:
36793 + settings = portage.settings
36794 + else:
36795 + settings = env
36796 +
36797 - eprefix = settings.get("EPREFIX", "")
36798 ++ # PREFIX LOCAL
36799 ++ eprefix = settings.get("EPREFIX", portage.const.EPREFIX)
36800 + eprefix_lstrip = eprefix.lstrip(os.sep)
36801 + eroot = (
36802 + normalize_path(os.path.join(target_root, eprefix_lstrip)).rstrip(os.sep)
36803 + + os.sep
36804 + )
36805 + envd_dir = os.path.join(eroot, "etc", "env.d")
36806 + ensure_dirs(envd_dir, mode=0o755)
36807 + fns = listdir(envd_dir, EmptyOnError=1)
36808 + fns.sort()
36809 + templist = []
36810 + for x in fns:
36811 + if len(x) < 3:
36812 + continue
36813 + if not x[0].isdigit() or not x[1].isdigit():
36814 + continue
36815 + if x.startswith(".") or x.endswith("~") or x.endswith(".bak"):
36816 + continue
36817 + templist.append(x)
36818 + fns = templist
36819 + del templist
36820 +
36821 + space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"])
36822 + colon_separated = set(
36823 + [
36824 + "ADA_INCLUDE_PATH",
36825 + "ADA_OBJECTS_PATH",
36826 + "CLASSPATH",
36827 + "INFODIR",
36828 + "INFOPATH",
36829 + "KDEDIRS",
36830 + "LDPATH",
36831 + "MANPATH",
36832 + "PATH",
36833 + "PKG_CONFIG_PATH",
36834 + "PRELINK_PATH",
36835 + "PRELINK_PATH_MASK",
36836 + "PYTHONPATH",
36837 + "ROOTPATH",
36838 + ]
36839 + )
36840 +
36841 + config_list = []
36842 +
36843 + for x in fns:
36844 + file_path = os.path.join(envd_dir, x)
36845 + try:
36846 + myconfig = getconfig(file_path, expand=False)
36847 + except ParseError as e:
36848 + writemsg("!!! '%s'\n" % str(e), noiselevel=-1)
36849 + del e
36850 + continue
36851 + if myconfig is None:
36852 + # broken symlink or file removed by a concurrent process
36853 + writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1)
36854 + continue
36855 +
36856 + config_list.append(myconfig)
36857 + if "SPACE_SEPARATED" in myconfig:
36858 + space_separated.update(myconfig["SPACE_SEPARATED"].split())
36859 + del myconfig["SPACE_SEPARATED"]
36860 + if "COLON_SEPARATED" in myconfig:
36861 + colon_separated.update(myconfig["COLON_SEPARATED"].split())
36862 + del myconfig["COLON_SEPARATED"]
36863 +
36864 + env = {}
36865 + specials = {}
36866 + for var in space_separated:
36867 + mylist = []
36868 + for myconfig in config_list:
36869 + if var in myconfig:
36870 + for item in myconfig[var].split():
36871 + if item and not item in mylist:
36872 + mylist.append(item)
36873 + del myconfig[var] # prepare for env.update(myconfig)
36874 + if mylist:
36875 + env[var] = " ".join(mylist)
36876 + specials[var] = mylist
36877 +
36878 + for var in colon_separated:
36879 + mylist = []
36880 + for myconfig in config_list:
36881 + if var in myconfig:
36882 + for item in myconfig[var].split(":"):
36883 + if item and not item in mylist:
36884 + mylist.append(item)
36885 + del myconfig[var] # prepare for env.update(myconfig)
36886 + if mylist:
36887 + env[var] = ":".join(mylist)
36888 + specials[var] = mylist
36889 +
36890 + for myconfig in config_list:
36891 + """Cumulative variables have already been deleted from myconfig so that
36892 + they won't be overwritten by this dict.update call."""
36893 + env.update(myconfig)
36894 +
36895 + ldsoconf_path = os.path.join(eroot, "etc", "ld.so.conf")
36896 + try:
36897 + myld = io.open(
36898 + _unicode_encode(ldsoconf_path, encoding=_encodings["fs"], errors="strict"),
36899 + mode="r",
36900 + encoding=_encodings["content"],
36901 + errors="replace",
36902 + )
36903 + myldlines = myld.readlines()
36904 + myld.close()
36905 + oldld = []
36906 + for x in myldlines:
36907 + # each line has at least one char (a newline)
36908 + if x[:1] == "#":
36909 + continue
36910 + oldld.append(x[:-1])
36911 + except (IOError, OSError) as e:
36912 + if e.errno != errno.ENOENT:
36913 + raise
36914 + oldld = None
36915 +
36916 + newld = specials["LDPATH"]
36917 + if oldld != newld:
36918 + # ld.so.conf needs updating and ldconfig needs to be run
36919 + myfd = atomic_ofstream(ldsoconf_path)
36920 + myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
36921 + myfd.write("# contents of /etc/env.d directory\n")
36922 + for x in specials["LDPATH"]:
36923 + myfd.write(x + "\n")
36924 + myfd.close()
36925 +
36926 + potential_lib_dirs = set()
36927 + for lib_dir_glob in ("usr/lib*", "lib*"):
36928 + x = os.path.join(eroot, lib_dir_glob)
36929 + for y in glob.glob(
36930 + _unicode_encode(x, encoding=_encodings["fs"], errors="strict")
36931 + ):
36932 + try:
36933 + y = _unicode_decode(y, encoding=_encodings["fs"], errors="strict")
36934 + except UnicodeDecodeError:
36935 + continue
36936 + if os.path.basename(y) != "libexec":
36937 + potential_lib_dirs.add(y[len(eroot) :])
36938 +
36939 + # Update prelink.conf if we are prelink-enabled
36940 + if prelink_capable:
36941 + prelink_d = os.path.join(eroot, "etc", "prelink.conf.d")
36942 + ensure_dirs(prelink_d)
36943 + newprelink = atomic_ofstream(os.path.join(prelink_d, "portage.conf"))
36944 + newprelink.write(
36945 + "# prelink.conf autogenerated by env-update; make all changes to\n"
36946 + )
36947 + newprelink.write("# contents of /etc/env.d directory\n")
36948 +
36949 + for x in sorted(potential_lib_dirs) + ["bin", "sbin"]:
36950 + newprelink.write("-l /%s\n" % (x,))
36951 + prelink_paths = set()
36952 + prelink_paths |= set(specials.get("LDPATH", []))
36953 + prelink_paths |= set(specials.get("PATH", []))
36954 + prelink_paths |= set(specials.get("PRELINK_PATH", []))
36955 + prelink_path_mask = specials.get("PRELINK_PATH_MASK", [])
36956 + for x in prelink_paths:
36957 + if not x:
36958 + continue
36959 + if x[-1:] != "/":
36960 + x += "/"
36961 + plmasked = 0
36962 + for y in prelink_path_mask:
36963 + if not y:
36964 + continue
36965 + if y[-1] != "/":
36966 + y += "/"
36967 + if y == x[0 : len(y)]:
36968 + plmasked = 1
36969 + break
36970 + if not plmasked:
36971 + newprelink.write("-h %s\n" % (x,))
36972 + for x in prelink_path_mask:
36973 + newprelink.write("-b %s\n" % (x,))
36974 + newprelink.close()
36975 +
36976 + # Migration code path. If /etc/prelink.conf was generated by us, then
36977 + # point it to the new stuff until the prelink package re-installs.
36978 + prelink_conf = os.path.join(eroot, "etc", "prelink.conf")
36979 + try:
36980 + with open(
36981 + _unicode_encode(
36982 + prelink_conf, encoding=_encodings["fs"], errors="strict"
36983 + ),
36984 + "rb",
36985 + ) as f:
36986 + if (
36987 + f.readline()
36988 + == b"# prelink.conf autogenerated by env-update; make all changes to\n"
36989 + ):
36990 + f = atomic_ofstream(prelink_conf)
36991 + f.write("-c /etc/prelink.conf.d/*.conf\n")
36992 + f.close()
36993 + except IOError as e:
36994 + if e.errno != errno.ENOENT:
36995 + raise
36996 +
36997 + current_time = int(time.time())
36998 + mtime_changed = False
36999 +
37000 + lib_dirs = set()
37001 + for lib_dir in set(specials["LDPATH"]) | potential_lib_dirs:
37002 + x = os.path.join(eroot, lib_dir.lstrip(os.sep))
37003 + try:
37004 + newldpathtime = os.stat(x)[stat.ST_MTIME]
37005 + lib_dirs.add(normalize_path(x))
37006 + except OSError as oe:
37007 + if oe.errno == errno.ENOENT:
37008 + try:
37009 + del prev_mtimes[x]
37010 + except KeyError:
37011 + pass
37012 + # ignore this path because it doesn't exist
37013 + continue
37014 + raise
37015 + if newldpathtime == current_time:
37016 + # Reset mtime to avoid the potential ambiguity of times that
37017 + # differ by less than 1 second.
37018 + newldpathtime -= 1
37019 + os.utime(x, (newldpathtime, newldpathtime))
37020 + prev_mtimes[x] = newldpathtime
37021 + mtime_changed = True
37022 + elif x in prev_mtimes:
37023 + if prev_mtimes[x] == newldpathtime:
37024 + pass
37025 + else:
37026 + prev_mtimes[x] = newldpathtime
37027 + mtime_changed = True
37028 + else:
37029 + prev_mtimes[x] = newldpathtime
37030 + mtime_changed = True
37031 +
37032 + if makelinks and not mtime_changed and contents is not None:
37033 + libdir_contents_changed = False
37034 + for mypath, mydata in contents.items():
37035 + if mydata[0] not in ("obj", "sym"):
37036 + continue
37037 + head, tail = os.path.split(mypath)
37038 + if head in lib_dirs:
37039 + libdir_contents_changed = True
37040 + break
37041 + if not libdir_contents_changed:
37042 + makelinks = False
37043 +
37044 + if (
37045 + "CHOST" in settings
37046 + and "CBUILD" in settings
37047 + and settings["CHOST"] != settings["CBUILD"]
37048 + ):
37049 + ldconfig = find_binary("%s-ldconfig" % settings["CHOST"])
37050 + else:
37051 + ldconfig = os.path.join(eroot, "sbin", "ldconfig")
37052 +
37053 + if ldconfig is None:
37054 + pass
37055 + elif not (os.access(ldconfig, os.X_OK) and os.path.isfile(ldconfig)):
37056 + ldconfig = None
37057 +
37058 + # Only run ldconfig as needed
37059 + if makelinks and ldconfig:
37060 + # ldconfig has very different behaviour between FreeBSD and Linux
37061 + if ostype == "Linux" or ostype.lower().endswith("gnu"):
37062 + # We can't update links if we haven't cleaned other versions first, as
37063 + # an older package installed ON TOP of a newer version will cause ldconfig
37064 + # to overwrite the symlinks we just made. -X means no links. After 'clean'
37065 + # we can safely create links.
37066 + writemsg_level(
37067 + _(">>> Regenerating %setc/ld.so.cache...\n") % (target_root,)
37068 + )
37069 + os.system("cd / ; %s -X -r '%s'" % (ldconfig, target_root))
37070 + elif ostype in ("FreeBSD", "DragonFly"):
37071 + writemsg_level(
37072 + _(">>> Regenerating %svar/run/ld-elf.so.hints...\n") % target_root
37073 + )
37074 + os.system(
37075 + (
37076 + "cd / ; %s -elf -i "
37077 + + "-f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'"
37078 + )
37079 + % (ldconfig, target_root, target_root)
37080 + )
37081 +
37082 + del specials["LDPATH"]
37083 +
37084 + notice = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
37085 + notice += "# DO NOT EDIT THIS FILE."
37086 + penvnotice = notice + " CHANGES TO STARTUP PROFILES\n"
37087 + cenvnotice = penvnotice[:]
37088 - penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
37089 - cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"
37090 ++ # BEGIN PREFIX LOCAL
37091 ++ penvnotice += "# GO INTO " + eprefix + "/etc/profile NOT " + eprefix + "/etc/profile.env\n\n"
37092 ++ cenvnotice += "# GO INTO " + eprefix + "/etc/csh.cshrc NOT " + eprefix + "/etc/csh.env\n\n"
37093 ++ # END PREFIX LOCAL
37094 +
37095 + # create /etc/profile.env for bash support
37096 + profile_env_path = os.path.join(eroot, "etc", "profile.env")
37097 + with atomic_ofstream(profile_env_path) as outfile:
37098 + outfile.write(penvnotice)
37099 +
37100 + env_keys = [x for x in env if x != "LDPATH"]
37101 + env_keys.sort()
37102 + for k in env_keys:
37103 + v = env[k]
37104 + if v.startswith("$") and not v.startswith("${"):
37105 + outfile.write("export %s=$'%s'\n" % (k, v[1:]))
37106 + else:
37107 + outfile.write("export %s='%s'\n" % (k, v))
37108 +
37109 + # Create the systemd user environment configuration file
37110 + # /etc/environment.d/10-gentoo-env.conf with the
37111 + # environment configuration from /etc/env.d.
37112 + systemd_environment_dir = os.path.join(eroot, "etc", "environment.d")
37113 + os.makedirs(systemd_environment_dir, exist_ok=True)
37114 +
37115 + systemd_gentoo_env_path = os.path.join(
37116 + systemd_environment_dir, "10-gentoo-env.conf"
37117 + )
37118 + with atomic_ofstream(systemd_gentoo_env_path) as systemd_gentoo_env:
37119 + senvnotice = notice + "\n\n"
37120 + systemd_gentoo_env.write(senvnotice)
37121 +
37122 + for env_key in env_keys:
37123 + # Skip PATH since this makes it impossible to use
37124 + # "systemctl --user import-environment PATH".
37125 + if env_key == "PATH":
37126 + continue
37127 +
37128 + env_key_value = env[env_key]
37129 +
37130 + # Skip variables with the empty string
37131 + # as value. Those sometimes appear in
37132 + # profile.env (e.g. "export GCC_SPECS=''"),
37133 + # but are invalid in systemd's syntax.
37134 + if not env_key_value:
37135 + continue
37136 +
37137 + # Transform into systemd environment.d
37138 + # conf syntax, basically shell variable
37139 + # assignment (without "export ").
37140 + line = f"{env_key}={env_key_value}\n"
37141 +
37142 + systemd_gentoo_env.write(line)
37143 +
37144 + # create /etc/csh.env for (t)csh support
37145 + outfile = atomic_ofstream(os.path.join(eroot, "etc", "csh.env"))
37146 + outfile.write(cenvnotice)
37147 + for x in env_keys:
37148 + outfile.write("setenv %s '%s'\n" % (x, env[x]))
37149 + outfile.close()