aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBrian Dolbec <dolsen@gentoo.org>2014-03-26 06:52:42 -0700
committerBrian Dolbec <dolsen@gentoo.org>2014-03-26 06:52:42 -0700
commit276611668673967d8dd76caebb1e427dd10b3ff2 (patch)
tree479b3b5dccbc0dfd96c617908eb21946e6785eeb
parentMerge branch 'master' of git+ssh://git.overlays.gentoo.org/proj/portage into ... (diff)
downloadportage-public_api.tar.gz
portage-public_api.tar.bz2
portage-public_api.zip
Merge branch 'master' of git+ssh://git.overlays.gentoo.org/proj/portage into public_apipublic_api
-rw-r--r--.gitignore5
-rw-r--r--.portage_not_installed0
-rw-r--r--DEVELOPING37
-rw-r--r--Makefile33
-rw-r--r--NEWS22
-rw-r--r--README49
-rw-r--r--RELEASE-NOTES57
-rwxr-xr-xbin/archive-conf56
-rwxr-xr-xbin/banned-helper6
-rw-r--r--bin/bashrc-functions.sh63
-rwxr-xr-xbin/binhost-snapshot31
-rwxr-xr-xbin/check-implicit-pointer-usage.py2
-rwxr-xr-xbin/chpathtool.py78
-rwxr-xr-xbin/clean_locks24
-rwxr-xr-xbin/dispatch-conf36
-rwxr-xr-xbin/dohtml.py56
-rw-r--r--bin/eapi.sh145
-rwxr-xr-xbin/ebuild105
l---------bin/ebuild-helpers/4/dodoc1
l---------bin/ebuild-helpers/4/dohard1
l---------bin/ebuild-helpers/4/dosed1
l---------bin/ebuild-helpers/4/prepalldocs1
-rwxr-xr-xbin/ebuild-helpers/bsd/sed (renamed from bin/ebuild-helpers/sed)14
-rwxr-xr-xbin/ebuild-helpers/dobin13
-rwxr-xr-xbin/ebuild-helpers/doconfd2
-rwxr-xr-xbin/ebuild-helpers/dodir9
-rwxr-xr-xbin/ebuild-helpers/dodoc20
-rwxr-xr-xbin/ebuild-helpers/doenvd2
-rwxr-xr-xbin/ebuild-helpers/doexe16
-rwxr-xr-xbin/ebuild-helpers/dohard14
-rwxr-xr-xbin/ebuild-helpers/doheader19
-rwxr-xr-xbin/ebuild-helpers/dohtml11
-rwxr-xr-xbin/ebuild-helpers/doinfo15
-rwxr-xr-xbin/ebuild-helpers/doinitd2
-rwxr-xr-xbin/ebuild-helpers/doins47
-rwxr-xr-xbin/ebuild-helpers/dolib13
-rwxr-xr-xbin/ebuild-helpers/doman13
-rwxr-xr-xbin/ebuild-helpers/domo11
-rwxr-xr-xbin/ebuild-helpers/dosbin13
-rwxr-xr-xbin/ebuild-helpers/dosed14
-rwxr-xr-xbin/ebuild-helpers/dosym9
-rwxr-xr-xbin/ebuild-helpers/ecompress14
-rwxr-xr-xbin/ebuild-helpers/ecompressdir79
-rwxr-xr-xbin/ebuild-helpers/emake2
-rwxr-xr-xbin/ebuild-helpers/fowners12
-rwxr-xr-xbin/ebuild-helpers/fperms9
-rwxr-xr-xbin/ebuild-helpers/keepdir20
l---------[-rwxr-xr-x]bin/ebuild-helpers/newbin23
l---------[-rwxr-xr-x]bin/ebuild-helpers/newconfd23
l---------[-rwxr-xr-x]bin/ebuild-helpers/newdoc23
l---------[-rwxr-xr-x]bin/ebuild-helpers/newenvd23
l---------[-rwxr-xr-x]bin/ebuild-helpers/newexe23
l---------bin/ebuild-helpers/newheader1
l---------[-rwxr-xr-x]bin/ebuild-helpers/newinitd23
-rwxr-xr-xbin/ebuild-helpers/newins67
l---------[-rwxr-xr-x]bin/ebuild-helpers/newlib.a23
l---------[-rwxr-xr-x]bin/ebuild-helpers/newlib.so23
l---------[-rwxr-xr-x]bin/ebuild-helpers/newman23
l---------[-rwxr-xr-x]bin/ebuild-helpers/newsbin23
-rwxr-xr-xbin/ebuild-helpers/portageq6
-rwxr-xr-xbin/ebuild-helpers/prepall7
-rwxr-xr-xbin/ebuild-helpers/prepalldocs14
-rwxr-xr-xbin/ebuild-helpers/prepallinfo7
-rwxr-xr-xbin/ebuild-helpers/prepallman14
-rwxr-xr-xbin/ebuild-helpers/prepallstrip9
-rwxr-xr-xbin/ebuild-helpers/prepinfo11
-rwxr-xr-xbin/ebuild-helpers/preplib31
-rwxr-xr-xbin/ebuild-helpers/prepman16
-rwxr-xr-xbin/ebuild-helpers/prepstrip205
l---------bin/ebuild-helpers/unprivileged/chgrp1
-rwxr-xr-xbin/ebuild-helpers/unprivileged/chown41
-rwxr-xr-xbin/ebuild-helpers/xattr/install12
-rwxr-xr-xbin/ebuild-ipc6
-rwxr-xr-xbin/ebuild-ipc.py177
-rwxr-xr-xbin/ebuild.sh182
-rwxr-xr-xbin/egencache490
-rwxr-xr-xbin/emaint23
-rwxr-xr-xbin/emerge104
-rwxr-xr-xbin/emerge-webrsync178
-rwxr-xr-xbin/emirrordist13
-rwxr-xr-xbin/env-update16
-rwxr-xr-xbin/etc-update19
-rwxr-xr-xbin/filter-bash-environment.py20
-rwxr-xr-xbin/fixpackages24
-rwxr-xr-xbin/glsa-check198
-rw-r--r--bin/helper-functions.sh71
-rwxr-xr-xbin/install.py253
-rw-r--r--bin/isolated-functions.sh168
-rwxr-xr-xbin/lock-helper.py5
-rwxr-xr-xbin/misc-functions.sh356
-rw-r--r--bin/phase-functions.sh412
-rw-r--r--bin/phase-helpers.sh610
-rwxr-xr-xbin/portageq665
-rwxr-xr-xbin/quickpkg37
-rwxr-xr-xbin/regenworld16
-rwxr-xr-xbin/repoman1687
-rw-r--r--bin/save-ebuild-env.sh67
-rwxr-xr-xbin/xattr-helper.py190
-rwxr-xr-xbin/xpak-helper.py11
-rw-r--r--cnf/dispatch-conf.conf1
-rw-r--r--cnf/make.conf.example (renamed from cnf/make.conf)21
-rw-r--r--cnf/make.conf.example.alpha.diff (renamed from cnf/make.conf.alpha.diff)18
-rw-r--r--cnf/make.conf.example.amd64-fbsd.diff (renamed from cnf/make.conf.amd64-fbsd.diff)18
-rw-r--r--cnf/make.conf.example.amd64.diff (renamed from cnf/make.conf.amd64.diff)18
-rw-r--r--cnf/make.conf.example.arm.diff (renamed from cnf/make.conf.arm.diff)12
-rw-r--r--cnf/make.conf.example.hppa.diff (renamed from cnf/make.conf.hppa.diff)28
-rw-r--r--cnf/make.conf.example.ia64.diff (renamed from cnf/make.conf.ia64.diff)10
-rw-r--r--cnf/make.conf.example.m68k.diff (renamed from cnf/make.conf.m68k.diff)14
-rw-r--r--cnf/make.conf.example.mips.diff (renamed from cnf/make.conf.mips.diff)18
-rw-r--r--cnf/make.conf.example.ppc.diff (renamed from cnf/make.conf.ppc.diff)26
-rw-r--r--cnf/make.conf.example.ppc64.diff (renamed from cnf/make.conf.ppc64.diff)24
-rw-r--r--cnf/make.conf.example.s390.diff (renamed from cnf/make.conf.s390.diff)10
-rw-r--r--cnf/make.conf.example.sh.diff (renamed from cnf/make.conf.sh.diff)17
-rw-r--r--cnf/make.conf.example.sparc-fbsd.diff (renamed from cnf/make.conf.sparc-fbsd.diff)12
-rw-r--r--cnf/make.conf.example.sparc.diff (renamed from cnf/make.conf.sparc.diff)18
-rw-r--r--cnf/make.conf.example.x86-fbsd.diff (renamed from cnf/make.conf.x86-fbsd.diff)18
-rw-r--r--cnf/make.conf.example.x86.diff (renamed from cnf/make.conf.x86.diff)18
-rw-r--r--cnf/make.globals44
-rw-r--r--cnf/metadata.dtd7
-rw-r--r--cnf/repos.conf7
-rw-r--r--cnf/sets/portage.conf2
-rw-r--r--doc/config/sets.docbook5
-rw-r--r--doc/package/ebuild.docbook3
-rw-r--r--doc/package/ebuild/eapi/4-python.docbook44
-rw-r--r--doc/package/ebuild/eapi/4-slot-abi.docbook12
-rw-r--r--doc/package/ebuild/eapi/5-hdepend.docbook32
-rw-r--r--doc/package/ebuild/eapi/5-progress.docbook247
-rw-r--r--doc/package/ebuild/eapi/5.docbook232
-rw-r--r--doc/portage.docbook3
-rw-r--r--doc/qa.docbook2
-rw-r--r--make.conf-repatch.sh40
-rwxr-xr-xmake.conf.example-repatch.sh41
-rw-r--r--man/color.map.518
-rw-r--r--man/dispatch-conf.177
-rw-r--r--man/ebuild.136
-rw-r--r--man/ebuild.51173
-rw-r--r--man/egencache.178
-rw-r--r--man/emaint.115
-rw-r--r--man/emerge.1423
-rw-r--r--man/emirrordist.1148
-rw-r--r--man/env-update.121
-rw-r--r--man/etc-update.146
-rw-r--r--man/make.conf.5326
-rw-r--r--man/portage.5664
-rw-r--r--man/quickpkg.134
-rw-r--r--man/repoman.1139
-rw-r--r--man/ru/color.map.5217
-rw-r--r--man/ru/dispatch-conf.1100
-rw-r--r--man/ru/ebuild.1249
-rw-r--r--man/ru/env-update.135
-rw-r--r--man/ru/etc-update.163
-rw-r--r--man/ru/fixpackages.122
-rw-r--r--man/xpak.55
-rwxr-xr-xmisc/emerge-delta-webrsync809
-rwxr-xr-xmkrelease.sh91
-rw-r--r--pym/_emerge/AbstractDepPriority.py5
-rw-r--r--pym/_emerge/AbstractEbuildProcess.py58
-rw-r--r--pym/_emerge/AbstractPollTask.py2
-rw-r--r--pym/_emerge/AsynchronousLock.py66
-rw-r--r--pym/_emerge/AsynchronousTask.py14
-rw-r--r--pym/_emerge/Binpkg.py7
-rw-r--r--pym/_emerge/BinpkgExtractorAsync.py15
-rw-r--r--pym/_emerge/BinpkgFetcher.py18
-rw-r--r--pym/_emerge/BinpkgVerifier.py143
-rw-r--r--pym/_emerge/BlockerCache.py10
-rw-r--r--pym/_emerge/BlockerDB.py12
-rw-r--r--pym/_emerge/CompositeTask.py4
-rw-r--r--pym/_emerge/DepPriority.py29
-rw-r--r--pym/_emerge/DepPrioritySatisfiedRange.py24
-rw-r--r--pym/_emerge/DependencyArg.py10
-rw-r--r--pym/_emerge/EbuildBuild.py36
-rw-r--r--pym/_emerge/EbuildBuildDir.py11
-rw-r--r--pym/_emerge/EbuildExecuter.py13
-rw-r--r--pym/_emerge/EbuildFetcher.py68
-rw-r--r--pym/_emerge/EbuildMetadataPhase.py66
-rw-r--r--pym/_emerge/EbuildPhase.py63
-rw-r--r--pym/_emerge/EbuildProcess.py12
-rw-r--r--pym/_emerge/EbuildSpawnProcess.py10
-rw-r--r--pym/_emerge/FakeVartree.py123
-rw-r--r--pym/_emerge/FifoIpcDaemon.py43
-rw-r--r--pym/_emerge/JobStatusDisplay.py44
-rw-r--r--pym/_emerge/MergeListItem.py18
-rw-r--r--pym/_emerge/MetadataRegen.py93
-rw-r--r--pym/_emerge/MiscFunctionsProcess.py7
-rw-r--r--pym/_emerge/Package.py317
-rw-r--r--pym/_emerge/PackageMerge.py7
-rw-r--r--pym/_emerge/PackageUninstall.py6
-rw-r--r--pym/_emerge/PackageVirtualDbapi.py4
-rw-r--r--pym/_emerge/PipeReader.py37
-rw-r--r--pym/_emerge/PollScheduler.py129
-rw-r--r--pym/_emerge/QueueScheduler.py105
-rw-r--r--pym/_emerge/RootConfig.py13
-rw-r--r--pym/_emerge/Scheduler.py240
-rw-r--r--pym/_emerge/SpawnProcess.py269
-rw-r--r--pym/_emerge/SubProcess.py30
-rw-r--r--pym/_emerge/Task.py9
-rw-r--r--pym/_emerge/TaskScheduler.py26
-rw-r--r--pym/_emerge/UnmergeDepPriority.py27
-rw-r--r--pym/_emerge/UseFlagDisplay.py10
-rw-r--r--pym/_emerge/actions.py1704
-rw-r--r--pym/_emerge/chk_updated_cfg_files.py42
-rw-r--r--pym/_emerge/clear_caches.py4
-rw-r--r--pym/_emerge/countdown.py18
-rw-r--r--pym/_emerge/create_depgraph_params.py23
-rw-r--r--pym/_emerge/create_world_atom.py25
-rw-r--r--pym/_emerge/depgraph.py2451
-rw-r--r--pym/_emerge/emergelog.py12
-rw-r--r--pym/_emerge/getloadavg.py5
-rw-r--r--pym/_emerge/help.py10
-rw-r--r--pym/_emerge/is_valid_package_atom.py7
-rw-r--r--pym/_emerge/main.py1297
-rw-r--r--pym/_emerge/post_emerge.py165
-rw-r--r--pym/_emerge/resolver/backtracking.py38
-rw-r--r--pym/_emerge/resolver/circular_dependency.py24
-rw-r--r--pym/_emerge/resolver/output.py537
-rw-r--r--pym/_emerge/resolver/output_helpers.py95
-rw-r--r--pym/_emerge/resolver/package_tracker.py301
-rw-r--r--pym/_emerge/resolver/slot_collision.py230
-rw-r--r--pym/_emerge/search.py4
-rw-r--r--pym/_emerge/stdout_spinner.py13
-rw-r--r--pym/_emerge/unmerge.py5
-rw-r--r--pym/portage/__init__.py211
-rw-r--r--pym/portage/_emirrordist/Config.py132
-rw-r--r--pym/portage/_emirrordist/DeletionIterator.py83
-rw-r--r--pym/portage/_emirrordist/DeletionTask.py129
-rw-r--r--pym/portage/_emirrordist/FetchIterator.py147
-rw-r--r--pym/portage/_emirrordist/FetchTask.py629
-rw-r--r--pym/portage/_emirrordist/MirrorDistTask.py219
-rw-r--r--pym/portage/_emirrordist/__init__.py2
-rw-r--r--pym/portage/_emirrordist/main.py463
-rw-r--r--pym/portage/_global_updates.py238
-rw-r--r--pym/portage/_legacy_globals.py3
-rw-r--r--pym/portage/_selinux.py55
-rw-r--r--pym/portage/_sets/__init__.py30
-rw-r--r--pym/portage/_sets/base.py7
-rw-r--r--pym/portage/_sets/dbapi.py111
-rw-r--r--pym/portage/_sets/files.py10
-rw-r--r--pym/portage/_sets/libs.py17
-rw-r--r--pym/portage/_sets/security.py4
-rw-r--r--pym/portage/cache/ebuild_xattr.py2
-rw-r--r--pym/portage/cache/flat_hash.py32
-rw-r--r--pym/portage/cache/flat_list.py134
-rw-r--r--pym/portage/cache/fs_template.py6
-rw-r--r--pym/portage/cache/mappings.py6
-rw-r--r--pym/portage/cache/metadata.py6
-rw-r--r--pym/portage/cache/sqlite.py41
-rw-r--r--pym/portage/cache/template.py14
-rw-r--r--pym/portage/checksum.py100
-rw-r--r--pym/portage/const.py198
-rw-r--r--pym/portage/cvstree.py274
-rw-r--r--pym/portage/data.py76
-rw-r--r--pym/portage/dbapi/_MergeProcess.py214
-rw-r--r--pym/portage/dbapi/_SyncfsProcess.py53
-rw-r--r--pym/portage/dbapi/__init__.py110
-rw-r--r--pym/portage/dbapi/_expand_new_virt.py12
-rw-r--r--pym/portage/dbapi/_similar_name_search.py57
-rw-r--r--pym/portage/dbapi/bintree.py338
-rw-r--r--pym/portage/dbapi/cpv_expand.py4
-rw-r--r--pym/portage/dbapi/dep_expand.py6
-rw-r--r--pym/portage/dbapi/porttree.py144
-rw-r--r--pym/portage/dbapi/vartree.py604
-rw-r--r--pym/portage/dbapi/virtual.py7
-rw-r--r--pym/portage/debug.py10
-rw-r--r--pym/portage/dep/__init__.py309
-rw-r--r--pym/portage/dep/_slot_operator.py (renamed from pym/portage/dep/_slot_abi.py)53
-rw-r--r--pym/portage/dep/dep_check.py113
-rw-r--r--pym/portage/dispatch_conf.py326
-rw-r--r--pym/portage/eapi.py64
-rw-r--r--pym/portage/eclass_cache.py26
-rw-r--r--pym/portage/elog/__init__.py3
-rw-r--r--pym/portage/elog/mod_echo.py3
-rw-r--r--pym/portage/elog/mod_save.py24
-rw-r--r--pym/portage/elog/mod_save_summary.py40
-rw-r--r--pym/portage/elog/mod_syslog.py13
-rw-r--r--pym/portage/emaint/__init__.py4
-rw-r--r--pym/portage/emaint/defaults.py11
-rw-r--r--pym/portage/emaint/main.py157
-rw-r--r--pym/portage/emaint/module.py8
-rw-r--r--pym/portage/emaint/modules/__init__.py4
-rw-r--r--pym/portage/emaint/modules/binhost/__init__.py8
-rw-r--r--pym/portage/emaint/modules/binhost/binhost.py12
-rw-r--r--pym/portage/emaint/modules/config/__init__.py8
-rw-r--r--pym/portage/emaint/modules/config/config.py66
-rw-r--r--pym/portage/emaint/modules/logs/__init__.py22
-rw-r--r--pym/portage/emaint/modules/logs/logs.py17
-rw-r--r--pym/portage/emaint/modules/move/__init__.py9
-rw-r--r--pym/portage/emaint/modules/move/move.py42
-rw-r--r--pym/portage/emaint/modules/resume/__init__.py6
-rw-r--r--pym/portage/emaint/modules/world/__init__.py8
-rw-r--r--pym/portage/env/loaders.py26
-rw-r--r--pym/portage/exception.py54
-rw-r--r--pym/portage/getbinpkg.py255
-rw-r--r--pym/portage/glsa.py313
-rw-r--r--pym/portage/localization.py17
-rw-r--r--pym/portage/locks.py104
-rw-r--r--pym/portage/mail.py7
-rw-r--r--pym/portage/manifest.py114
-rw-r--r--pym/portage/news.py10
-rw-r--r--pym/portage/output.py43
-rw-r--r--pym/portage/package/ebuild/_config/KeywordsManager.py56
-rw-r--r--pym/portage/package/ebuild/_config/LocationsManager.py135
-rw-r--r--pym/portage/package/ebuild/_config/MaskManager.py33
-rw-r--r--pym/portage/package/ebuild/_config/UseManager.py290
-rw-r--r--pym/portage/package/ebuild/_config/special_env_vars.py56
-rw-r--r--pym/portage/package/ebuild/_config/unpack_dependencies.py38
-rw-r--r--pym/portage/package/ebuild/_ipc/QueryCommand.py91
-rw-r--r--pym/portage/package/ebuild/_metadata_invalid.py (renamed from pym/portage/package/ebuild/_eapi_invalid.py)13
-rw-r--r--pym/portage/package/ebuild/_parallel_manifest/ManifestProcess.py43
-rw-r--r--pym/portage/package/ebuild/_parallel_manifest/ManifestScheduler.py93
-rw-r--r--pym/portage/package/ebuild/_parallel_manifest/ManifestTask.py186
-rw-r--r--pym/portage/package/ebuild/_parallel_manifest/__init__.py2
-rw-r--r--pym/portage/package/ebuild/_spawn_nofetch.py23
-rw-r--r--pym/portage/package/ebuild/config.py610
-rw-r--r--pym/portage/package/ebuild/deprecated_profile_check.py63
-rw-r--r--pym/portage/package/ebuild/digestcheck.py15
-rw-r--r--pym/portage/package/ebuild/digestgen.py107
-rw-r--r--pym/portage/package/ebuild/doebuild.py546
-rw-r--r--pym/portage/package/ebuild/fetch.py84
-rw-r--r--pym/portage/package/ebuild/getmaskingreason.py30
-rw-r--r--pym/portage/package/ebuild/getmaskingstatus.py32
-rw-r--r--pym/portage/package/ebuild/prepare_build_dirs.py8
-rw-r--r--pym/portage/process.py333
-rw-r--r--pym/portage/proxy/lazyimport.py5
-rw-r--r--pym/portage/proxy/objectproxy.py9
-rw-r--r--pym/portage/repository/config.py552
-rw-r--r--pym/portage/tests/__init__.py93
-rw-r--r--pym/portage/tests/bin/setup_env.py54
-rw-r--r--pym/portage/tests/dbapi/test_fakedbapi.py10
-rw-r--r--pym/portage/tests/dbapi/test_portdb_cache.py183
-rw-r--r--pym/portage/tests/dep/testAtom.py267
-rw-r--r--pym/portage/tests/dep/testCheckRequiredUse.py192
-rw-r--r--pym/portage/tests/dep/testStandalone.py26
-rw-r--r--pym/portage/tests/dep/test_best_match_to_list.py44
-rw-r--r--pym/portage/tests/dep/test_dep_getcpv.py16
-rw-r--r--pym/portage/tests/dep/test_dep_getrepo.py6
-rw-r--r--pym/portage/tests/dep/test_dep_getslot.py10
-rw-r--r--pym/portage/tests/dep/test_dep_getusedeps.py12
-rw-r--r--pym/portage/tests/dep/test_get_operator.py24
-rw-r--r--pym/portage/tests/dep/test_get_required_use_flags.py4
-rw-r--r--pym/portage/tests/dep/test_isjustname.py14
-rw-r--r--pym/portage/tests/dep/test_isvalidatom.py13
-rw-r--r--pym/portage/tests/dep/test_match_from_list.py136
-rw-r--r--pym/portage/tests/dep/test_paren_reduce.py61
-rw-r--r--pym/portage/tests/dep/test_use_reduce.py519
-rw-r--r--pym/portage/tests/ebuild/test_config.py27
-rw-r--r--pym/portage/tests/ebuild/test_doebuild_fd_pipes.py137
-rw-r--r--pym/portage/tests/ebuild/test_doebuild_spawn.py46
-rw-r--r--pym/portage/tests/ebuild/test_ipc_daemon.py78
-rw-r--r--pym/portage/tests/ebuild/test_spawn.py15
-rw-r--r--pym/portage/tests/emerge/test_emerge_slot_abi.py30
-rw-r--r--pym/portage/tests/emerge/test_simple.py116
-rw-r--r--pym/portage/tests/env/config/test_PackageKeywordsFile.py8
-rw-r--r--pym/portage/tests/env/config/test_PackageUseFile.py6
-rw-r--r--pym/portage/tests/env/config/test_PortageModulesFile.py11
-rw-r--r--pym/portage/tests/glsa/__init__.py2
-rw-r--r--pym/portage/tests/glsa/__test__0
-rw-r--r--pym/portage/tests/glsa/test_security_set.py144
-rw-r--r--pym/portage/tests/lazyimport/test_lazy_import_portage_baseline.py4
-rw-r--r--pym/portage/tests/lint/test_bash_syntax.py26
-rw-r--r--pym/portage/tests/lint/test_compile_modules.py32
-rw-r--r--pym/portage/tests/lint/test_import_modules.py2
-rw-r--r--pym/portage/tests/locks/test_asynchronous_lock.py10
-rw-r--r--pym/portage/tests/process/test_PopenProcess.py85
-rw-r--r--pym/portage/tests/process/test_PopenProcessBlockingIO.py63
-rw-r--r--pym/portage/tests/process/test_poll.py35
-rw-r--r--pym/portage/tests/repoman/test_echangelog.py6
-rw-r--r--pym/portage/tests/repoman/test_simple.py83
-rw-r--r--pym/portage/tests/resolver/ResolverPlayground.py390
-rw-r--r--pym/portage/tests/resolver/test_autounmask.py304
-rw-r--r--pym/portage/tests/resolver/test_autounmask_multilib_use.py85
-rw-r--r--pym/portage/tests/resolver/test_backtracking.py48
-rw-r--r--pym/portage/tests/resolver/test_blocker.py48
-rw-r--r--pym/portage/tests/resolver/test_complete_graph.py4
-rw-r--r--pym/portage/tests/resolver/test_complete_if_new_subslot_without_revbump.py74
-rw-r--r--pym/portage/tests/resolver/test_depclean.py100
-rw-r--r--pym/portage/tests/resolver/test_depclean_order.py57
-rw-r--r--pym/portage/tests/resolver/test_depclean_slot_unavailable.py78
-rw-r--r--pym/portage/tests/resolver/test_features_test_use.py68
-rw-r--r--pym/portage/tests/resolver/test_merge_order.py35
-rw-r--r--pym/portage/tests/resolver/test_multirepo.py88
-rw-r--r--pym/portage/tests/resolver/test_onlydeps.py34
-rw-r--r--pym/portage/tests/resolver/test_or_choices.py134
-rw-r--r--pym/portage/tests/resolver/test_package_tracker.py261
-rw-r--r--pym/portage/tests/resolver/test_regular_slot_change_without_revbump.py59
-rw-r--r--pym/portage/tests/resolver/test_slot_abi.py111
-rw-r--r--pym/portage/tests/resolver/test_slot_abi_downgrade.py8
-rw-r--r--pym/portage/tests/resolver/test_slot_change_without_revbump.py69
-rw-r--r--pym/portage/tests/resolver/test_slot_collisions.py106
-rw-r--r--pym/portage/tests/resolver/test_slot_conflict_mask_update.py41
-rw-r--r--pym/portage/tests/resolver/test_slot_conflict_rebuild.py408
-rw-r--r--pym/portage/tests/resolver/test_slot_conflict_update.py98
-rw-r--r--pym/portage/tests/resolver/test_slot_operator_autounmask.py120
-rw-r--r--pym/portage/tests/resolver/test_slot_operator_unsatisfied.py70
-rw-r--r--pym/portage/tests/resolver/test_slot_operator_unsolved.py88
-rw-r--r--pym/portage/tests/resolver/test_targetroot.py85
-rw-r--r--pym/portage/tests/resolver/test_unpack_dependencies.py65
-rw-r--r--pym/portage/tests/resolver/test_use_aliases.py131
-rw-r--r--pym/portage/tests/resolver/test_useflags.py78
-rwxr-xr-xpym/portage/tests/runTests19
-rw-r--r--pym/portage/tests/unicode/test_string_format.py52
-rw-r--r--pym/portage/tests/update/test_move_ent.py6
-rw-r--r--pym/portage/tests/update/test_move_slot_ent.py6
-rw-r--r--pym/portage/tests/update/test_update_dbentry.py101
-rw-r--r--pym/portage/tests/util/test_getconfig.py31
-rw-r--r--pym/portage/tests/util/test_stackDictList.py12
-rw-r--r--pym/portage/tests/util/test_stackDicts.py41
-rw-r--r--pym/portage/tests/util/test_stackLists.py18
-rw-r--r--pym/portage/tests/util/test_uniqueArray.py14
-rw-r--r--pym/portage/tests/util/test_varExpand.py80
-rw-r--r--pym/portage/tests/util/test_whirlpool.py4
-rw-r--r--pym/portage/tests/versions/test_cpv_sort_key.py7
-rw-r--r--pym/portage/tests/versions/test_vercmp.py38
-rw-r--r--pym/portage/update.py137
-rw-r--r--pym/portage/util/ExtractKernelVersion.py6
-rw-r--r--pym/portage/util/SlotObject.py1
-rw-r--r--pym/portage/util/_ShelveUnicodeWrapper.py45
-rw-r--r--pym/portage/util/__init__.py394
-rw-r--r--pym/portage/util/_argparse.py42
-rw-r--r--pym/portage/util/_async/AsyncScheduler.py102
-rw-r--r--pym/portage/util/_async/FileCopier.py17
-rw-r--r--pym/portage/util/_async/FileDigester.py73
-rw-r--r--pym/portage/util/_async/ForkProcess.py65
-rw-r--r--pym/portage/util/_async/PipeLogger.py163
-rw-r--r--pym/portage/util/_async/PipeReaderBlockingIO.py91
-rw-r--r--pym/portage/util/_async/PopenProcess.py33
-rw-r--r--pym/portage/util/_async/SchedulerInterface.py79
-rw-r--r--pym/portage/util/_async/TaskScheduler.py20
-rw-r--r--pym/portage/util/_async/__init__.py2
-rw-r--r--pym/portage/util/_async/run_main_scheduler.py41
-rw-r--r--pym/portage/util/_ctypes.py47
-rw-r--r--pym/portage/util/_desktop_entry.py85
-rw-r--r--pym/portage/util/_dyn_libs/LinkageMapELF.py24
-rw-r--r--pym/portage/util/_dyn_libs/PreservedLibsRegistry.py3
-rw-r--r--pym/portage/util/_dyn_libs/display_preserved_libs.py98
-rw-r--r--pym/portage/util/_eventloop/EventLoop.py364
-rw-r--r--pym/portage/util/_eventloop/PollSelectAdapter.py2
-rw-r--r--pym/portage/util/_get_vm_info.py80
-rw-r--r--pym/portage/util/_info_files.py138
-rw-r--r--pym/portage/util/_path.py27
-rw-r--r--pym/portage/util/_urlopen.py102
-rw-r--r--pym/portage/util/digraph.py46
-rw-r--r--pym/portage/util/env_update.py78
-rw-r--r--pym/portage/util/lafilefixer.py10
-rw-r--r--pym/portage/util/listdir.py128
-rw-r--r--pym/portage/util/movefile.py220
-rw-r--r--pym/portage/util/whirlpool.py2
-rw-r--r--pym/portage/util/writeable_check.py79
-rw-r--r--pym/portage/versions.py85
-rw-r--r--pym/portage/xml/metadata.py15
-rw-r--r--pym/portage/xpak.py8
-rw-r--r--pym/repoman/checks.py235
-rw-r--r--pym/repoman/errors.py6
-rw-r--r--pym/repoman/herdbase.py11
-rw-r--r--pym/repoman/utilities.py146
-rwxr-xr-xruntests.sh47
-rwxr-xr-xtabcheck.py2
456 files changed, 30379 insertions, 13080 deletions
diff --git a/.gitignore b/.gitignore
index 808cc0c58..074bb864d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,4 @@
*.py[co]
-/pym/portage/public_api.bz2
-/testpath
+__pycache__/
+*.class
+/tags
diff --git a/.portage_not_installed b/.portage_not_installed
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/.portage_not_installed
diff --git a/DEVELOPING b/DEVELOPING
index ebe5d56c3..40b4ca2d4 100644
--- a/DEVELOPING
+++ b/DEVELOPING
@@ -24,7 +24,8 @@ Tabs
----
The current code uses tabs, not spaces. Keep whitespace usage consistent
-between files. New files should use tabs.
+between files. New files should use tabs. Space is sometimes used for
+indentation in Python code. Tab stop should for this reason be set to 4.
Line-Wrapping
-------------
@@ -51,13 +52,13 @@ wrapping is always clear (but you cannot convert spaces as easily as tabwidth).
Comparisons
-----------
-if foo == None
+if foo != None
should be replaced with:
if foo is not None:
-Is not does a reference comparison (address1 = address2 basically) and
+Is not does a reference comparison (address1 = address2 basically) and
the == forces a by value compare (with __eq__())
Dict Lookups
@@ -139,7 +140,7 @@ NO:
Try not to import large numbers of things into the namespace of a module.
I realize this is done all over the place in current code but it really makes it
-a pain to do code reflection when the namespace is cluttered with identifiers
+a pain to do code reflection when the namespace is cluttered with identifiers
from other modules.
YES:
@@ -159,13 +160,29 @@ just COLOR. However it means during introspection of the current namespace
The NO example just imports a set of functions from the output module. It is
somewhat annoying because the import line needs to be modified when functions
are needed and often unused functions are left in the import line until someone
-comes along with a linter to clean up (does not happen often). The color is a
-bit clearer as
+comes along with a linter to clean up (does not happen often).
- print red('blar')
-is shorter than:
+Releases
+--------
- print output.red('blar')
+First update the NEWS and RELEASE-NOTES files and commit.
-Rationale: python -c 'import portage; dir(portage)' (circa 02/2008)
+Second create a git tag for this release:
+ git tag v2.2.8
+
+Then create the tarball and run the tests:
+ ./mkrelease.sh --changelog-rev v2.2.7 --tag --runtests 2.2.8
+Make sure you have all supported python versions installed first
+(see PYTHON_SUPPORTED_VERSIONS in runtests.sh).
+
+Version bump the ebuild and verify it can re-install itself:
+ emerge portage
+ emerge portage
+
+Publish the results (no going back now):
+ - Push the new git tag
+ - Upload the tarball
+ - Commit the new ebuild version
+
+Close the bugs blocking the tracker bug for this release.
diff --git a/Makefile b/Makefile
index f074dcfa3..92ea19562 100644
--- a/Makefile
+++ b/Makefile
@@ -27,13 +27,14 @@ INSMODE = 0644
EXEMODE = 0755
DIRMODE = 0755
SYSCONFDIR_FILES = etc-update.conf dispatch-conf.conf
-PORTAGE_CONFDIR_FILES = make.globals
+PORTAGE_CONFDIR_FILES = make.conf.example make.globals repos.conf
LOGROTATE_FILES = elog-save-summary
BINDIR_FILES = ebuild egencache emerge emerge-webrsync \
- portageq quickpkg repoman
+ emirrordist portageq quickpkg repoman
SBINDIR_FILES = archive-conf dispatch-conf emaint \
env-update etc-update fixpackages regenworld
DOCS = ChangeLog NEWS RELEASE-NOTES
+LINGUAS ?= $(shell cd "$(srcdir)/man" && find -mindepth 1 -type d)
ifdef PYTHONPATH
PYTHONPATH := $(srcdir)/pym:$(PYTHONPATH)
@@ -50,8 +51,6 @@ docbook:
epydoc:
set -e; \
- # workaround for bug 282760 \
- touch "$(srcdir)/pym/pysqlite2.py"; \
env PYTHONPATH="$(PYTHONPATH)" epydoc \
-o "$(WORKDIR)/epydoc" \
--name $(PN) \
@@ -63,9 +62,7 @@ epydoc:
-e s:^pym/:: \
-e s:/:.:g \
| sort); \
- rm -f "$(srcdir)/pym/pysqlite2.py"* \
- "$(WORKDIR)/epydoc/pysqlite2-"* \
- "$(WORKDIR)/epydoc/api-objects.txt"; \
+ rm -f "$(WORKDIR)/epydoc/api-objects.txt"; \
test:
set -e; \
@@ -81,9 +78,6 @@ install:
cd "$(srcdir)/cnf"; \
install -m$(INSMODE) $(PORTAGE_CONFDIR_FILES) \
"$(DESTDIR)$(portage_confdir)"; \
- install -m$(INSMODE) "$(srcdir)/cnf/make.conf" \
- "$(DESTDIR)$(portage_confdir)/make.conf.example"; \
- \
install -d -m$(DIRMODE) "$(DESTDIR)$(portage_setsdir)"; \
cd "$(S)/cnf/sets"; \
install -m$(INSMODE) *.conf "$(DESTDIR)$(portage_setsdir)"; \
@@ -184,10 +178,18 @@ install:
cd "$(srcdir)"; \
install -m $(INSMODE) $(DOCS) "$(DESTDIR)$(docdir)"; \
\
- for x in 1 5 ; do \
- install -d -m$(DIRMODE) "$(DESTDIR)$(mandir)/man$$x"; \
- cd "$(srcdir)/man"; \
- install -m$(INSMODE) *.$$x "$(DESTDIR)$(mandir)/man$$x"; \
+ for x in "" $(LINGUAS); do \
+ for y in 1 5 ; do \
+ if [ -d "$(srcdir)/man/$$x" ]; then \
+ cd "$(srcdir)/man/$$x"; \
+ files=$$(echo *.$$y); \
+ if [ -z "$$files" ] || [ "$$files" = "*.$$y" ]; then \
+ continue; \
+ fi; \
+ install -d -m$(DIRMODE) "$(DESTDIR)$(mandir)/$$x/man$$y"; \
+ install -m$(INSMODE) *.$$y "$(DESTDIR)$(mandir)/$$x/man$$y"; \
+ fi; \
+ done; \
done; \
\
if [ -f "$(srcdir)/doc/portage.html" ] ; then \
@@ -208,7 +210,6 @@ install:
clean:
set -e; \
$(MAKE) -C "$(srcdir)/doc" clean; \
- rm -rf "$(srcdir)/pym/pysqlite2.py"* \
- "$(WORKDIR)/epydoc"; \
+ rm -rf "$(WORKDIR)/epydoc"; \
.PHONY: all clean docbook epydoc install test
diff --git a/NEWS b/NEWS
index 9a2f24f6d..43d1797c2 100644
--- a/NEWS
+++ b/NEWS
@@ -1,22 +1,34 @@
News (mainly features/major bug fixes)
+portage-2.2.1
+-------------
+
+* Add cgroups, ipc-sandbox, and network-sandbox FEATURES.
+
portage-2.2
-------------
-* Add link level dependency awareness to emerge --depclean and --prune actions
- in order to protect against uninstallation of required libraries.
-* Add support for generic package sets (also see RELEASE-NOTES)
+* Add extended set configuration via /etc/portage/sets.conf. See
+ /usr/share/portage/config/sets/portage.conf for examples.
+
+portage-2.1.11.20
+-------------
+* Add support for EAPI 5. Refer to the PMS EAPI Cheat Sheet, portage's html
+ docs installed with USE=doc, or `man 5 ebuild` for more info about EAPI 5.
* Add support for FEATURES=preserve-libs which preserves libraries when the
sonames change during upgrade or downgrade, and the @preserved-rebuild
package set which rebuilds consumers of preserved libraries.
+* Add link level dependency awareness to emerge --depclean and --prune actions
+ in order to protect against uninstallation of required libraries. Refer to
+ the --depclean-lib-check option in the emerge(1) man page.
portage-2.1.11
-------------
* Add support for experimental EAPI "4-slot-abi". Refer to the corresponding
html documentation that is installed with USE=doc, and also to the emerge(1)
- man page for information about the related --ignore-built-slot-abi-deps and
- --rebuild-if-new-slot-abi options.
+ man page for information about the related --ignore-built-slot-operator-deps and
+ --rebuild-if-new-slot options.
portage-2.1.10
-------------
diff --git a/README b/README
new file mode 100644
index 000000000..5558ddead
--- /dev/null
+++ b/README
@@ -0,0 +1,49 @@
+About Portage
+=============
+
+Portage is a package management system based on ports collections. The
+Package Manager Specification Project (PMS) standardises and documents
+the behaviour of Portage so that the Portage tree can be used by other
+package managers.
+
+
+Dependencies
+============
+
+Python and Bash should be the only hard dependencies. Python 2.6 is the
+minimum supported version.
+
+
+Licensing and Legalese
+=======================
+
+Portage is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public License
+version 2 as published by the Free Software Foundation.
+
+Portage is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Portage; if not, write to the Free Software
+Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA.
+
+
+More information
+================
+
+-DEVELOPING contains some code guidelines.
+-LICENSE contains the GNU General Public License version 2.
+-NEWS contains new features/major bug fixes for each version.
+-RELEASE NOTES contains mainly upgrade information for each version.
+-TEST-NOTES contains Portage unit test information.
+
+
+Links
+=====
+Gentoo project page: <http://www.gentoo.org/proj/en/portage/>
+PMS: <https://dev.gentoo.org/~ulm/pms/head/pms.html>
+PMS git repo: <http://git.overlays.gentoo.org/gitweb/?p=proj/pms.git>
diff --git a/RELEASE-NOTES b/RELEASE-NOTES
index 93e67ed5a..72aa21934 100644
--- a/RELEASE-NOTES
+++ b/RELEASE-NOTES
@@ -1,30 +1,53 @@
Release Notes; upgrade information mainly.
Features/major bugfixes are listed in NEWS
+portage-2.2.
+==================================
+* Bug Fixes:
+ - Bug # 450372 Russian translation update.
+ - Bug #497238: Fix unnecessary rebuild caused by equal versions
+ in different repositories.
+ - Bug #501360 Only use Atoms with package_tracker.match
+ - For a complete list of bug fixes, changes, See the Changelog installed at
+ /usr/share/doc/portage-2.2.9/ChangeLog.bz2
+
+portage-2.2.8
+==================================
+* Bug Fixes:
+ - Bug 488972 - sys-apps/portage-2.2.7:
+ "egencache --update --rsync" does not create metadata/timestamp.chk
+ - For a complete list of bug fixes, changes, See the Changelog installed at
+ /usr/share/doc/portage-2.2.8/ChangeLog.bz2
+
portage-2.2
==================================
* Portage now warns if an ebuild repository does not have a name, as several
new features in 2.2 make use of or require named repositories. The repository
name is stored in profiles/repo_name in each repository.
-* Package set support: There are several important notes regarding package
- sets:
- - they may currently only include simple and versioned atoms or other sets,
- use conditionals or any-of constructs aren't possible yet
- - sets can be referenced either in other file-based sets or as argument to
- emerge, but not in ebuilds, config files or other tools at this time.
- - packages won't be unmerged if they are referenced by an installed package
- set (with the exception of the world set, and installed being determined
- by the world_sets file).
-* The "selected" package set, which includes packages listed in
- /var/lib/portage/world, has been extended to include nested sets that may
- be listed /var/lib/portage/world_sets.
+
+portage-2.1.13
+==================================
+
+* FEATURES=userpriv and usersandbox are enabled by default.
+* FEATURES=usersync is enabled by default.
+* New sync-cvs-repo, sync-type and sync-uri attributes in repos.conf replace
+ SYNC variable.
+
+portage-2.1.12
+==================================
+
+* FEATURES=preserve-libs is enabled by default.
+* ACCEPT_RESTRICT variable may be used to mask packages based on RESTRICT.
portage-2.1.11
==================================
* User-defined package sets can now be created by placing files in the
/etc/portage/sets/ directory. Refer to the emerge(1) and portage(5) man
pages for more information.
+* The "selected" package set, which includes packages listed in
+ /var/lib/portage/world, has been extended to include nested sets that may
+ be listed /var/lib/portage/world_sets.
portage-2.1.10.61
==================================
@@ -119,7 +142,7 @@ portage-2.1.6
* The python namespace for portage has been sanitized, all portage related code
is now contained within the portage namespace. External script should be
updated accordingly, though links exist for backward compability.
-* -* support in package.keywords was changed as it was inconsistent with
+* -* support in package.keywords was changed as it was inconsistent with
ACCEPT_KEYWORDS behavior (also see
http://dev.gentoo.org/~genone/docs/KEYWORDS.stupid).
Previously having -* in package.keywords matched packages with KEYWORDS="-*",
@@ -225,7 +248,7 @@ portage-2.1.1
* emerge --search doesn't use regular expressions now anymore by default, so
emerge --search dvd+rw-tools now works as expected. Regular expressions can be enabled
- by prefixing the search string with %.
+ by prefixing the search string with %.
* emerge --depclean algorithm is much safer than the old one.
* emerge --newuse detects changes in IUSE that previously went undetected.
@@ -238,9 +261,9 @@ portage-2.1
by the name of --alphabetical. Adding the option to EMERGE_DEFAULT_OPTS
in make.conf will restore the old behaviour permanently.
* The deprecated --inject has been removed, use /etc/portage/profile/package.provided
-* The deprecated --upgradeonly has been removed, use /etc/portage/package.*
+* The deprecated --upgradeonly has been removed, use /etc/portage/package.*
instead.
-* 'emerge sync' has been deprecated, use 'emerge --sync' instead (same
+* 'emerge sync' has been deprecated, use 'emerge --sync' instead (same
for other actions)
* Tools that call emerge should override the EMERGE_DEFAULT_OPTS environment
variable or use the emerge --ignore-default-opts option.
@@ -249,6 +272,6 @@ portage-2.1
* autouse (use.defaults) has been deprecated by specifying USE_ORDER in make.defaults
Users may still turn this back on by specifying USE_ORDER="env:pkg:conf:auto:defaults"
in make.conf. Interested in figuring out what use flags were turned off? Check out
- /usr/portage/profiles/base/use.defaults and other use.defaults files that correspond
+ /usr/portage/profiles/base/use.defaults and other use.defaults files that correspond
to your profile.
diff --git a/bin/archive-conf b/bin/archive-conf
index 797866817..f73ca425e 100755
--- a/bin/archive-conf
+++ b/bin/archive-conf
@@ -1,5 +1,5 @@
-#!/usr/bin/python
-# Copyright 1999-2006 Gentoo Foundation
+#!/usr/bin/python -b
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
#
@@ -12,43 +12,21 @@
from __future__ import print_function
import sys
-try:
- import portage
-except ImportError:
- from os import path as osp
- sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
- import portage
+from os import path as osp
+pym_path = osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym")
+sys.path.insert(0, pym_path)
+import portage
+portage._internal_caller = True
+
+import portage.dispatch_conf
from portage import os
-from portage import dispatch_conf
+from portage.checksum import perform_md5
FIND_EXTANT_CONTENTS = "find %s -name CONTENTS"
MANDATORY_OPTS = [ 'archive-dir' ]
-try:
- import fchksum
- def perform_checksum(filename): return fchksum.fmd5t(filename)
-except ImportError:
- import md5
- def md5_to_hex(md5sum):
- hexform = ""
- for ix in range(len(md5sum)):
- hexform = hexform + "%02x" % ord(md5sum[ix])
- return hexform.lower()
-
- def perform_checksum(filename):
- f = open(filename, 'rb')
- blocksize=32768
- data = f.read(blocksize)
- size = 0
- sum = md5.new()
- while data:
- sum.update(data)
- size = size + len(data)
- data = f.read(blocksize)
- return (md5_to_hex(sum.digest()),size)
-
def archive_conf():
args = []
content_files = []
@@ -63,19 +41,19 @@ def archive_conf():
md5_match_hash[conf] = ''
# Find all the CONTENT files in VDB_PATH.
- content_files += os.popen(FIND_EXTANT_CONTENTS %
- (os.path.join(portage.settings['EROOT'], portage.VDB_PATH))).readlines()
+ with os.popen(FIND_EXTANT_CONTENTS % (os.path.join(portage.settings['EROOT'], portage.VDB_PATH))) as f:
+ content_files += f.readlines()
# Search for the saved md5 checksum of all the specified config files
# and see if the current file is unmodified or not.
try:
todo_cnt = len(args)
- for file in content_files:
- file = file.rstrip()
+ for filename in content_files:
+ filename = filename.rstrip()
try:
- contents = open(file, "r")
+ contents = open(filename, "r")
except IOError as e:
- print('archive-conf: Unable to open %s: %s' % (file, e), file=sys.stderr)
+ print('archive-conf: Unable to open %s: %s' % (filename, e), file=sys.stderr)
sys.exit(1)
lines = contents.readlines()
for line in lines:
@@ -84,7 +62,7 @@ def archive_conf():
for conf in args:
if items[1] == conf:
stored = items[2].lower()
- real = perform_checksum(conf)[0].lower()
+ real = perform_md5(conf).lower()
if stored == real:
md5_match_hash[conf] = conf
todo_cnt -= 1
diff --git a/bin/banned-helper b/bin/banned-helper
deleted file mode 100755
index 17ea9915d..000000000
--- a/bin/banned-helper
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/bash
-# Copyright 2009 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-die "'${0##*/}' has been banned for EAPI '$EAPI'"
-exit 1
diff --git a/bin/bashrc-functions.sh b/bin/bashrc-functions.sh
index 4da558581..503b17224 100644
--- a/bin/bashrc-functions.sh
+++ b/bin/bashrc-functions.sh
@@ -1,9 +1,9 @@
#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
portageq() {
- PYTHONPATH=${PORTAGE_PYM_PATH}${PYTHONPATH:+:}${PYTHONPATH} \
+ PYTHONPATH=${PORTAGE_PYTHONPATH:-${PORTAGE_PYM_PATH}}\
"${PORTAGE_PYTHON:-/usr/bin/python}" "${PORTAGE_BIN_PATH}/portageq" "$@"
}
@@ -23,71 +23,16 @@ register_success_hook() {
done
}
-strip_duplicate_slashes() {
+__strip_duplicate_slashes() {
if [[ -n $1 ]] ; then
local removed=$1
while [[ ${removed} == *//* ]] ; do
removed=${removed//\/\///}
done
- echo ${removed}
+ echo "${removed}"
fi
}
-# this is a function for removing any directory matching a passed in pattern from
-# PATH
-remove_path_entry() {
- save_IFS
- IFS=":"
- stripped_path="${PATH}"
- while [ -n "$1" ]; do
- cur_path=""
- for p in ${stripped_path}; do
- if [ "${p/${1}}" == "${p}" ]; then
- cur_path="${cur_path}:${p}"
- fi
- done
- stripped_path="${cur_path#:*}"
- shift
- done
- restore_IFS
- PATH="${stripped_path}"
-}
-
-# Set given variables unless these variable have been already set (e.g. during emerge
-# invocation) to values different than values set in make.conf.
-set_unless_changed() {
- if [[ $# -lt 1 ]]; then
- die "${FUNCNAME}() requires at least 1 argument: VARIABLE=VALUE"
- fi
-
- local argument value variable
- for argument in "$@"; do
- if [[ ${argument} != *=* ]]; then
- die "${FUNCNAME}(): Argument '${argument}' has incorrect syntax"
- fi
- variable="${argument%%=*}"
- value="${argument#*=}"
- if eval "[[ \${${variable}} == \$(env -u ${variable} portageq envvar ${variable}) ]]"; then
- eval "${variable}=\"\${value}\""
- fi
- done
-}
-
-# Unset given variables unless these variable have been set (e.g. during emerge
-# invocation) to values different than values set in make.conf.
-unset_unless_changed() {
- if [[ $# -lt 1 ]]; then
- die "${FUNCNAME}() requires at least 1 argument: VARIABLE"
- fi
-
- local variable
- for variable in "$@"; do
- if eval "[[ \${${variable}} == \$(env -u ${variable} portageq envvar ${variable}) ]]"; then
- unset ${variable}
- fi
- done
-}
-
KV_major() {
[[ -z $1 ]] && return 1
diff --git a/bin/binhost-snapshot b/bin/binhost-snapshot
index 9d2697d03..c2204f03d 100755
--- a/bin/binhost-snapshot
+++ b/bin/binhost-snapshot
@@ -1,9 +1,8 @@
-#!/usr/bin/python
-# Copyright 2010-2011 Gentoo Foundation
+#!/usr/bin/python -b
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import io
-import optparse
import os
import sys
import textwrap
@@ -13,13 +12,12 @@ try:
except ImportError:
from urlparse import urlparse
-try:
- import portage
-except ImportError:
- from os import path as osp
- sys.path.insert(0, osp.join(osp.dirname(osp.dirname(
- osp.realpath(__file__))), "pym"))
- import portage
+from os import path as osp
+pym_path = osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym")
+sys.path.insert(0, pym_path)
+import portage
+portage._internal_caller = True
+from portage.util._argparse import ArgumentParser
def parse_args(argv):
prog_name = os.path.basename(argv[0])
@@ -33,7 +31,7 @@ def parse_args(argv):
usage += "\n\n"
for line in textwrap.wrap(prog_desc, 70):
- usage += line + "\n"
+ usage += line + "\n"
usage += "\n"
usage += "Required Arguments:\n\n"
@@ -47,11 +45,12 @@ def parse_args(argv):
"write Packages index with\n" + \
" snapshot_uri"
- parser = optparse.OptionParser(usage=usage)
- parser.add_option('--hardlinks', help='create hardlinks (y or n, default is y)',
- choices=('y', 'n'))
- parser.set_defaults(hardlinks='y')
- options, args = parser.parse_args(argv[1:])
+ parser = ArgumentParser(usage=usage)
+ parser.add_argument('--hardlinks',
+ help='create hardlinks (y or n, default is y)',
+ choices=('y', 'n'),
+ default='y')
+ options, args = parser.parse_known_args(argv[1:])
if len(args) != 4:
parser.error("Required 4 arguments, got %d" % (len(args),))
diff --git a/bin/check-implicit-pointer-usage.py b/bin/check-implicit-pointer-usage.py
index 8822c4504..242436c1d 100755
--- a/bin/check-implicit-pointer-usage.py
+++ b/bin/check-implicit-pointer-usage.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/python -b
# Ripped from HP and updated from Debian
# Update by Gentoo to support unicode output
diff --git a/bin/chpathtool.py b/bin/chpathtool.py
index d0d49cb6d..64606623a 100755
--- a/bin/chpathtool.py
+++ b/bin/chpathtool.py
@@ -1,15 +1,26 @@
-#!/usr/bin/python
-# Copyright 2011 Gentoo Foundation
+#!/usr/bin/python -b
+# Copyright 2011-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+"""Helper tool for converting installed files to custom prefixes.
+
+In other words, eprefixy $D for Gentoo/Prefix."""
+
import io
-import optparse
import os
import stat
import sys
-CONTENT_ENCODING = "utf_8"
-FS_ENCODING = "utf_8"
+from portage.util._argparse import ArgumentParser
+
+# Argument parsing compatibility for Python 2.6 using optparse.
+if sys.hexversion < 0x2070000:
+ from optparse import OptionParser
+
+from optparse import OptionError
+
+CONTENT_ENCODING = 'utf_8'
+FS_ENCODING = 'utf_8'
try:
import magic
@@ -41,7 +52,9 @@ class IsTextFile(object):
def _is_text_magic(self, filename):
mime_type = self._m.file(filename)
- return mime_type.startswith("text/")
+ if isinstance(mime_type, bytes):
+ mime_type = mime_type.decode('ascii', 'replace')
+ return mime_type.startswith('text/')
def _is_text_encoding(self, filename):
try:
@@ -64,7 +77,7 @@ def chpath_inplace(filename, is_text_file, old, new):
try:
orig_mode = stat.S_IMODE(os.lstat(filename).st_mode)
except OSError as e:
- sys.stderr.write("%s: %s\n" % (e, filename))
+ sys.stderr.write('%s: %s\n' % (e, filename))
return
temp_mode = 0o200 | orig_mode
os.chmod(filename, temp_mode)
@@ -121,8 +134,12 @@ def chpath_inplace(filename, is_text_file, old, new):
f.close()
if modified:
- orig_mtime = orig_stat[stat.ST_MTIME]
- os.utime(filename, (orig_mtime, orig_mtime))
+ if sys.hexversion >= 0x3030000:
+ orig_mtime = orig_stat.st_mtime_ns
+ os.utime(filename, ns=(orig_mtime, orig_mtime))
+ else:
+ orig_mtime = orig_stat[stat.ST_MTIME]
+ os.utime(filename, (orig_mtime, orig_mtime))
return modified
def chpath_inplace_symlink(filename, st, old, new):
@@ -135,14 +152,37 @@ def chpath_inplace_symlink(filename, st, old, new):
def main(argv):
- usage = "%s [options] <location> <old> <new>" % (os.path.basename(argv[0],))
- parser = optparse.OptionParser(usage=usage)
- options, args = parser.parse_args(argv[1:])
-
- if len(args) != 3:
- parser.error("3 args required, got %s" % (len(args),))
-
- location, old, new = args
+ parser = ArgumentParser(description=__doc__)
+ try:
+ parser.add_argument('location', default=None,
+ help='root directory (e.g. $D)')
+ parser.add_argument('old', default=None,
+ help='original build prefix (e.g. /)')
+ parser.add_argument('new', default=None,
+ help='new install prefix (e.g. $EPREFIX)')
+ opts = parser.parse_args(argv)
+
+ location, old, new = opts.location, opts.old, opts.new
+ except OptionError:
+ # Argument parsing compatibility for Python 2.6 using optparse.
+ if sys.hexversion < 0x2070000:
+ parser = OptionParser(description=__doc__,
+ usage="usage: %prog [-h] location old new\n\n" + \
+ " location: root directory (e.g. $D)\n" + \
+ " old: original build prefix (e.g. /)\n" + \
+ " new: new install prefix (e.g. $EPREFIX)")
+
+ (opts, args) = parser.parse_args()
+
+ if len(args) != 3:
+ parser.print_usage()
+ print("%s: error: expected 3 arguments, got %i"
+ % (__file__, len(args)))
+ return
+
+ location, old, new = args[0:3]
+ else:
+ raise
is_text_file = IsTextFile()
@@ -178,5 +218,5 @@ def main(argv):
return os.EX_OK
-if __name__ == "__main__":
- sys.exit(main(sys.argv))
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:]))
diff --git a/bin/clean_locks b/bin/clean_locks
index 8c4299c92..3e969f2c6 100755
--- a/bin/clean_locks
+++ b/bin/clean_locks
@@ -1,21 +1,17 @@
-#!/usr/bin/python -O
-# Copyright 1999-2006 Gentoo Foundation
+#!/usr/bin/python -bO
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
import sys, errno
-try:
- import portage
-except ImportError:
- from os import path as osp
- sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
- import portage
-
-from portage import os
+from os import path as osp
+pym_path = osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym")
+sys.path.insert(0, pym_path)
+import portage
+portage._internal_caller = True
if not sys.argv[1:] or "--help" in sys.argv or "-h" in sys.argv:
- import portage
print()
print("You must specify directories with hardlink-locks to clean.")
print("You may optionally specify --force, which will remove all")
@@ -26,11 +22,11 @@ if not sys.argv[1:] or "--help" in sys.argv or "-h" in sys.argv:
print("%s --force %s/.locks" % (sys.argv[0], portage.settings["DISTDIR"]))
print()
sys.exit(1)
-
+
force = False
if "--force" in sys.argv[1:]:
force=True
-
+
for x in sys.argv[1:]:
if x == "--force":
continue
@@ -38,7 +34,7 @@ for x in sys.argv[1:]:
for y in portage.locks.hardlock_cleanup(x, remove_all_locks=force):
print(y)
print()
-
+
except OSError as e:
if e.errno in (errno.ENOENT, errno.ENOTDIR):
print("!!! %s is not a directory or does not exist" % x)
diff --git a/bin/dispatch-conf b/bin/dispatch-conf
index 139a001e8..4b0c0ac8f 100755
--- a/bin/dispatch-conf
+++ b/bin/dispatch-conf
@@ -1,5 +1,5 @@
-#!/usr/bin/python -O
-# Copyright 1999-2011 Gentoo Foundation
+#!/usr/bin/python -bO
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
#
@@ -16,19 +16,15 @@ from __future__ import print_function
from stat import ST_GID, ST_MODE, ST_UID
from random import random
import atexit, re, shutil, stat, sys
-
-try:
- import portage
-except ImportError:
- from os import path as osp
- sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
- import portage
-
+from os import path as osp
+pym_path = osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym")
+sys.path.insert(0, pym_path)
+import portage
+portage._internal_caller = True
from portage import os
-from portage import dispatch_conf
from portage import _unicode_decode
from portage.dispatch_conf import diffstatusoutput
-from portage.process import find_binary
+from portage.process import find_binary, spawn
FIND_EXTANT_CONFIGS = "find '%s' %s -name '._cfg????_%s' ! -name '.*~' ! -iname '.*.bak' -print"
DIFF_CONTENTS = "diff -Nu '%s' '%s'"
@@ -83,7 +79,7 @@ class dispatch:
confs = []
count = 0
- config_root = portage.const.EPREFIX or os.sep
+ config_root = portage.settings["EPREFIX"] or os.sep
self.options = portage.dispatch_conf.read_config(MANDATORY_OPTS)
if "log-file" in self.options:
@@ -411,7 +407,8 @@ class dispatch:
def do_help (self):
- print(); print
+ print()
+ print()
print(' u -- update current config with new config and continue')
print(' z -- zap (delete) new config and continue')
@@ -431,7 +428,7 @@ class dispatch:
def getch ():
# from ASPN - Danny Yoo
#
- import sys, tty, termios
+ import tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
@@ -456,17 +453,18 @@ def clear_screen():
pass
os.system("clear 2>/dev/null")
-from portage.process import find_binary, spawn
shell = os.environ.get("SHELL")
if not shell or not os.access(shell, os.EX_OK):
shell = find_binary("sh")
def spawn_shell(cmd):
if shell:
+ sys.__stdout__.flush()
+ sys.__stderr__.flush()
spawn([shell, "-c", cmd], env=os.environ,
- fd_pipes = { 0 : sys.stdin.fileno(),
- 1 : sys.stdout.fileno(),
- 2 : sys.stderr.fileno()})
+ fd_pipes = { 0 : portage._get_stdin().fileno(),
+ 1 : sys.__stdout__.fileno(),
+ 2 : sys.__stderr__.fileno()})
else:
os.system(cmd)
diff --git a/bin/dohtml.py b/bin/dohtml.py
index 3e80ef5f6..5359f5e89 100755
--- a/bin/dohtml.py
+++ b/bin/dohtml.py
@@ -1,5 +1,5 @@
-#!/usr/bin/python
-# Copyright 1999-2012 Gentoo Foundation
+#!/usr/bin/python -b
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
#
@@ -31,13 +31,25 @@
from __future__ import print_function
import os
+import shutil
import sys
+from portage.util import normalize_path
+
+# Change back to original cwd _after_ all imports (bug #469338).
+os.chdir(os.environ["__PORTAGE_HELPER_CWD"])
+
def dodir(path):
- os.spawnlp(os.P_WAIT, "install", "install", "-d", path)
+ try:
+ os.makedirs(path, 0o755)
+ except OSError:
+ if not os.path.isdir(path):
+ raise
+ os.chmod(path, 0o755)
def dofile(src,dst):
- os.spawnlp(os.P_WAIT, "install", "install", "-m0644", src, dst)
+ shutil.copy(src, dst)
+ os.chmod(dst, 0o644)
def eqawarn(lines):
cmd = "source '%s/isolated-functions.sh' ; " % \
@@ -55,14 +67,18 @@ unwarned_skipped_files = os.environ.get("PORTAGE_DOHTML_UNWARNED_SKIPPED_FILES",
def install(basename, dirname, options, prefix=""):
fullpath = basename
if prefix:
- fullpath = prefix + "/" + fullpath
+ fullpath = os.path.join(prefix, fullpath)
if dirname:
- fullpath = dirname + "/" + fullpath
+ fullpath = os.path.join(dirname, fullpath)
if options.DOCDESTTREE:
- destdir = options.ED + "usr/share/doc/" + options.PF + "/" + options.DOCDESTTREE + "/" + options.doc_prefix + "/" + prefix
+ desttree = options.DOCDESTTREE
else:
- destdir = options.ED + "usr/share/doc/" + options.PF + "/html/" + options.doc_prefix + "/" + prefix
+ desttree = "html"
+
+ destdir = os.path.join(options.ED, "usr", "share", "doc",
+ options.PF.lstrip(os.sep), desttree.lstrip(os.sep),
+ options.doc_prefix.lstrip(os.sep), prefix).rstrip(os.sep)
if not os.path.exists(fullpath):
sys.stderr.write("!!! dohtml: %s does not exist\n" % fullpath)
@@ -71,14 +87,15 @@ def install(basename, dirname, options, prefix=""):
ext = os.path.splitext(basename)[1][1:]
if ext in options.allowed_exts or basename in options.allowed_files:
dodir(destdir)
- dofile(fullpath, destdir + "/" + basename)
+ dofile(fullpath, os.path.join(destdir, basename))
elif warn_on_skipped_files and ext not in unwarned_skipped_extensions and basename not in unwarned_skipped_files:
skipped_files.append(fullpath)
elif options.recurse and os.path.isdir(fullpath) and \
basename not in options.disallowed_dirs:
for i in os.listdir(fullpath):
pfx = basename
- if prefix: pfx = prefix + "/" + pfx
+ if prefix:
+ pfx = os.path.join(prefix, pfx)
install(i, dirname, options, pfx)
elif not options.recurse and os.path.isdir(fullpath):
global skipped_directories
@@ -97,16 +114,22 @@ class OptionsClass:
if "PF" in os.environ:
self.PF = os.environ["PF"]
+ if self.PF:
+ self.PF = normalize_path(self.PF)
if "force-prefix" not in os.environ.get("FEATURES", "").split() and \
os.environ.get("EAPI", "0") in ("0", "1", "2"):
self.ED = os.environ.get("D", "")
else:
self.ED = os.environ.get("ED", "")
+ if self.ED:
+ self.ED = normalize_path(self.ED)
if "_E_DOCDESTTREE_" in os.environ:
self.DOCDESTTREE = os.environ["_E_DOCDESTTREE_"]
+ if self.DOCDESTTREE:
+ self.DOCDESTTREE = normalize_path(self.DOCDESTTREE)
self.allowed_exts = ['css', 'gif', 'htm', 'html', 'jpeg', 'jpg', 'js', 'png']
- if os.environ.get("EAPI", "0") in ("4-python",):
+ if os.environ.get("EAPI", "0") in ("4-python", "5-progress"):
self.allowed_exts += ['ico', 'svg', 'xhtml', 'xml']
self.allowed_files = []
self.disallowed_dirs = ['CVS']
@@ -153,6 +176,8 @@ def parse_args():
sys.exit(0)
elif arg == "-p":
options.doc_prefix = sys.argv[x]
+ if options.doc_prefix:
+ options.doc_prefix = normalize_path(options.doc_prefix)
else:
values = sys.argv[x].split(",")
if arg == "-A":
@@ -179,8 +204,17 @@ def main():
print("Allowed files :", options.allowed_files)
success = False
+ endswith_slash = (os.sep, os.sep + ".")
for x in args:
+ trailing_slash = x.endswith(endswith_slash)
+ x = normalize_path(x)
+ if trailing_slash:
+ # Modify behavior of basename and dirname
+ # as noted in bug #425214, causing foo/ to
+ # behave similarly to the way that foo/*
+ # behaves.
+ x += os.sep
basename = os.path.basename(x)
dirname = os.path.dirname(x)
success |= install(basename, dirname, options)
diff --git a/bin/eapi.sh b/bin/eapi.sh
new file mode 100644
index 000000000..623b89fee
--- /dev/null
+++ b/bin/eapi.sh
@@ -0,0 +1,145 @@
+#!/bin/bash
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+# PHASES
+
+___eapi_has_pkg_pretend() {
+ [[ ! ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+___eapi_has_src_prepare() {
+ [[ ! ${1-${EAPI}} =~ ^(0|1)$ ]]
+}
+
+___eapi_has_src_configure() {
+ [[ ! ${1-${EAPI}} =~ ^(0|1)$ ]]
+}
+
+___eapi_default_src_test_disables_parallel_jobs() {
+ [[ ${1-${EAPI}} =~ ^(0|1|2|3|4|4-python|4-slot-abi)$ ]]
+}
+
+___eapi_has_S_WORKDIR_fallback() {
+ [[ ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+# VARIABLES
+
+___eapi_has_prefix_variables() {
+ [[ ! ${1-${EAPI}} =~ ^(0|1|2)$ || " ${FEATURES} " == *" force-prefix "* ]]
+}
+
+___eapi_has_HDEPEND() {
+ [[ ${1-${EAPI}} =~ ^(5-hdepend)$ ]]
+}
+
+___eapi_has_RDEPEND_DEPEND_fallback() {
+ [[ ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+# HELPERS PRESENCE
+
+___eapi_has_dohard() {
+ [[ ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+___eapi_has_dosed() {
+ [[ ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+___eapi_has_docompress() {
+ [[ ! ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+___eapi_has_nonfatal() {
+ [[ ! ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+___eapi_has_doheader() {
+ [[ ! ${1-${EAPI}} =~ ^(0|1|2|3|4|4-python|4-slot-abi)$ ]]
+}
+
+___eapi_has_usex() {
+ [[ ! ${1-${EAPI}} =~ ^(0|1|2|3|4|4-python|4-slot-abi)$ ]]
+}
+
+___eapi_has_master_repositories() {
+ [[ ${1-${EAPI}} =~ ^(5-progress)$ ]]
+}
+
+___eapi_has_repository_path() {
+ [[ ${1-${EAPI}} =~ ^(5-progress)$ ]]
+}
+
+___eapi_has_available_eclasses() {
+ [[ ${1-${EAPI}} =~ ^(5-progress)$ ]]
+}
+
+___eapi_has_eclass_path() {
+ [[ ${1-${EAPI}} =~ ^(5-progress)$ ]]
+}
+
+___eapi_has_license_path() {
+ [[ ${1-${EAPI}} =~ ^(5-progress)$ ]]
+}
+
+___eapi_has_package_manager_build_user() {
+ [[ ${1-${EAPI}} =~ ^(5-progress)$ ]]
+}
+
+___eapi_has_package_manager_build_group() {
+ [[ ${1-${EAPI}} =~ ^(5-progress)$ ]]
+}
+
+# HELPERS BEHAVIOR
+
+___eapi_best_version_and_has_version_support_--host-root() {
+ [[ ! ${1-${EAPI}} =~ ^(0|1|2|3|4|4-python|4-slot-abi)$ ]]
+}
+
+___eapi_unpack_supports_xz() {
+ [[ ! ${1-${EAPI}} =~ ^(0|1|2)$ ]]
+}
+
+___eapi_econf_passes_--disable-dependency-tracking() {
+ [[ ! ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+___eapi_econf_passes_--disable-silent-rules() {
+ [[ ! ${1-${EAPI}} =~ ^(0|1|2|3|4|4-python|4-slot-abi)$ ]]
+}
+
+___eapi_use_enable_and_use_with_support_empty_third_argument() {
+ [[ ! ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+___eapi_dodoc_supports_-r() {
+ [[ ! ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+___eapi_doins_and_newins_preserve_symlinks() {
+ [[ ! ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+___eapi_newins_supports_reading_from_standard_input() {
+ [[ ! ${1-${EAPI}} =~ ^(0|1|2|3|4|4-python|4-slot-abi)$ ]]
+}
+
+___eapi_helpers_can_die() {
+ [[ ! ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+___eapi_disallows_helpers_in_global_scope() {
+ [[ ${1-${EAPI}} =~ ^(4-python|5-progress)$ ]]
+}
+
+___eapi_unpack_is_case_sensitive() {
+ [[ ${1-${EAPI}} =~ ^(0|1|2|3|4|4-python|4-slot-abi|5|5-hdepend)$ ]]
+}
+
+# OTHERS
+
+___eapi_enables_globstar() {
+ [[ ${1-${EAPI}} =~ ^(4-python|5-progress)$ ]]
+}
diff --git a/bin/ebuild b/bin/ebuild
index 65e5bef63..8f4b103c6 100755
--- a/bin/ebuild
+++ b/bin/ebuild
@@ -1,5 +1,5 @@
-#!/usr/bin/python -O
-# Copyright 1999-2012 Gentoo Foundation
+#!/usr/bin/python -bO
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -10,7 +10,7 @@ import sys
# This block ensures that ^C interrupts are handled quietly.
try:
- def exithandler(signum,frame):
+ def exithandler(signum, _frame):
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
sys.exit(128 + signum)
@@ -24,7 +24,7 @@ try:
except KeyboardInterrupt:
sys.exit(128 + signal.SIGINT)
-def debug_signal(signum, frame):
+def debug_signal(_signum, _frame):
import pdb
pdb.set_trace()
@@ -35,51 +35,50 @@ else:
signal.signal(debug_signum, debug_signal)
-import imp
import io
-import optparse
import os
+from os import path as osp
+pym_path = osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym")
+sys.path.insert(0, pym_path)
+import portage
+portage._internal_caller = True
+from portage import os
+from portage import _encodings
+from portage import _shell_quote
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.const import VDB_PATH
+from portage.util._argparse import ArgumentParser
+from _emerge.Package import Package
+from _emerge.RootConfig import RootConfig
description = "See the ebuild(1) man page for more info"
usage = "Usage: ebuild <ebuild file> <command> [command] ..."
-parser = optparse.OptionParser(description=description, usage=usage)
+parser = ArgumentParser(description=description, usage=usage)
force_help = "When used together with the digest or manifest " + \
"command, this option forces regeneration of digests for all " + \
"distfiles associated with the current ebuild. Any distfiles " + \
"that do not already exist in ${DISTDIR} will be automatically fetched."
-parser.add_option("--force", help=force_help, action="store_true", dest="force")
-parser.add_option("--color", help="enable or disable color output",
- type="choice", choices=("y", "n"))
-parser.add_option("--debug", help="show debug output",
- action="store_true", dest="debug")
-parser.add_option("--version", help="show version and exit",
- action="store_true", dest="version")
-parser.add_option("--ignore-default-opts",
+parser.add_argument("--force", help=force_help, action="store_true")
+parser.add_argument("--color", help="enable or disable color output",
+ choices=("y", "n"))
+parser.add_argument("--debug", help="show debug output",
+ action="store_true")
+parser.add_argument("--version", help="show version and exit",
+ action="store_true")
+parser.add_argument("--ignore-default-opts",
action="store_true",
help="do not use the EBUILD_DEFAULT_OPTS environment variable")
-parser.add_option("--skip-manifest", help="skip all manifest checks",
- action="store_true", dest="skip_manifest")
-
-opts, pargs = parser.parse_args(args=sys.argv[1:])
+parser.add_argument("--skip-manifest", help="skip all manifest checks",
+ action="store_true")
-try:
- import portage
-except ImportError:
- from os import path as osp
- sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
- import portage
+opts, pargs = parser.parse_known_args(args=sys.argv[1:])
-portage.dep._internal_warnings = True
-from portage import os
-from portage import _encodings
-from portage import _shell_quote
-from portage import _unicode_decode
-from portage import _unicode_encode
-from portage.const import VDB_PATH
-from _emerge.Package import Package
-from _emerge.RootConfig import RootConfig
+def err(txt):
+ portage.writemsg('ebuild: %s\n' % (txt,), noiselevel=-1)
+ sys.exit(1)
if opts.version:
print("Portage", portage.VERSION)
@@ -89,8 +88,9 @@ if len(pargs) < 2:
parser.error("missing required args")
if not opts.ignore_default_opts:
- default_opts = portage.settings.get("EBUILD_DEFAULT_OPTS", "").split()
- opts, pargs = parser.parse_args(default_opts + sys.argv[1:])
+ default_opts = portage.util.shlex_split(
+ portage.settings.get("EBUILD_DEFAULT_OPTS", ""))
+ opts, pargs = parser.parse_known_args(default_opts + sys.argv[1:])
debug = opts.debug
force = opts.force
@@ -119,9 +119,7 @@ if ebuild.endswith(".ebuild"):
pf = os.path.basename(ebuild)[:-7]
if pf is None:
- portage.writemsg("'%s' does not end with '.ebuild'.\n" % \
- (ebuild,), noiselevel=-1)
- sys.exit(1)
+ err("%s: does not end with '.ebuild'" % (ebuild,))
if not os.path.isabs(ebuild):
mycwd = os.getcwd()
@@ -160,15 +158,14 @@ if ebuild_portdir != vdb_path and \
encoding=_encodings['content'], errors='strict')
print("Appending %s to PORTDIR_OVERLAY..." % ebuild_portdir)
- imp.reload(portage)
+ portage._reset_legacy_globals()
myrepo = None
if ebuild_portdir != vdb_path:
myrepo = portage.portdb.getRepositoryName(ebuild_portdir)
if not os.path.exists(ebuild):
- print("'%s' does not exist." % ebuild)
- sys.exit(1)
+ err('%s: does not exist' % (ebuild,))
ebuild_split = ebuild.split("/")
cpv = "%s/%s" % (ebuild_split[-3], pf)
@@ -179,8 +176,7 @@ with io.open(_unicode_encode(ebuild, encoding=_encodings['fs'], errors='strict')
if eapi is None:
eapi = "0"
if not portage.catpkgsplit(cpv, eapi=eapi):
- print("!!! %s does not follow correct package syntax." % (cpv))
- sys.exit(1)
+ err('%s: %s: does not follow correct package syntax' % (ebuild, cpv))
if ebuild.startswith(vdb_path):
mytree = "vartree"
@@ -189,8 +185,7 @@ if ebuild.startswith(vdb_path):
portage_ebuild = portage.db[portage.root][mytree].dbapi.findname(cpv, myrepo=myrepo)
if os.path.realpath(portage_ebuild) != ebuild:
- print("!!! Portage seems to think that %s is at %s" % (cpv, portage_ebuild))
- sys.exit(1)
+ err('Portage seems to think that %s is at %s' % (cpv, portage_ebuild))
else:
mytree = "porttree"
@@ -199,12 +194,10 @@ else:
portage_ebuild = portage.portdb.findname(cpv, myrepo=myrepo)
if not portage_ebuild or portage_ebuild != ebuild:
- print("!!! %s does not seem to have a valid PORTDIR structure." % ebuild)
- sys.exit(1)
+ err('%s: does not seem to have a valid PORTDIR structure' % (ebuild,))
if len(pargs) > 1 and "config" in pargs:
- print("config must be called on it's own, not combined with any other phase")
- sys.exit(1)
+ err('"config" must not be called with any other phase')
def discard_digests(myebuild, mysettings, mydbapi):
"""Discard all distfiles digests for the given ebuild. This is useful when
@@ -313,14 +306,16 @@ def stale_env_warning():
if ebuild_changed:
open(os.path.join(tmpsettings['PORTAGE_BUILDDIR'],
- '.ebuild_changed'), 'w')
+ '.ebuild_changed'), 'w').close()
from portage.exception import PermissionDenied, \
PortagePackageException, UnsupportedAPIException
-if 'digest' in tmpsettings.features and \
- not set(["digest", "manifest"]).intersection(pargs):
- pargs = ['digest'] + pargs
+if 'digest' in tmpsettings.features:
+ if pargs and pargs[0] not in ("digest", "manifest"):
+ pargs = ['digest'] + pargs
+ # We only need to build digests on the first pass.
+ tmpsettings.features.discard('digest')
checked_for_stale_env = False
@@ -334,7 +329,7 @@ for arg in pargs:
if arg in ("digest", "manifest") and force:
discard_digests(ebuild, tmpsettings, portage.portdb)
- a = portage.doebuild(ebuild, arg, portage.root, tmpsettings,
+ a = portage.doebuild(ebuild, arg, settings=tmpsettings,
debug=debug, tree=mytree,
vartree=portage.db[portage.root]['vartree'])
except KeyboardInterrupt:
diff --git a/bin/ebuild-helpers/4/dodoc b/bin/ebuild-helpers/4/dodoc
deleted file mode 120000
index 35080ada3..000000000
--- a/bin/ebuild-helpers/4/dodoc
+++ /dev/null
@@ -1 +0,0 @@
-../doins \ No newline at end of file
diff --git a/bin/ebuild-helpers/4/dohard b/bin/ebuild-helpers/4/dohard
deleted file mode 120000
index 1a6b57a39..000000000
--- a/bin/ebuild-helpers/4/dohard
+++ /dev/null
@@ -1 +0,0 @@
-../../banned-helper \ No newline at end of file
diff --git a/bin/ebuild-helpers/4/dosed b/bin/ebuild-helpers/4/dosed
deleted file mode 120000
index 1a6b57a39..000000000
--- a/bin/ebuild-helpers/4/dosed
+++ /dev/null
@@ -1 +0,0 @@
-../../banned-helper \ No newline at end of file
diff --git a/bin/ebuild-helpers/4/prepalldocs b/bin/ebuild-helpers/4/prepalldocs
deleted file mode 120000
index 1a6b57a39..000000000
--- a/bin/ebuild-helpers/4/prepalldocs
+++ /dev/null
@@ -1 +0,0 @@
-../../banned-helper \ No newline at end of file
diff --git a/bin/ebuild-helpers/sed b/bin/ebuild-helpers/bsd/sed
index b21e8569c..01b88471d 100755
--- a/bin/ebuild-helpers/sed
+++ b/bin/ebuild-helpers/bsd/sed
@@ -1,27 +1,27 @@
#!/bin/bash
-# Copyright 2007 Gentoo Foundation
+# Copyright 2007-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
scriptpath=${BASH_SOURCE[0]}
scriptname=${scriptpath##*/}
-if [[ sed == ${scriptname} ]] && [[ -n ${ESED} ]]; then
+if [[ sed == ${scriptname} && -n ${ESED} ]]; then
exec ${ESED} "$@"
elif type -P g${scriptname} > /dev/null ; then
exec g${scriptname} "$@"
else
old_IFS="${IFS}"
IFS=":"
-
+
for path in $PATH; do
- [[ ${path}/${scriptname} == ${scriptpath} ]] && continue
if [[ -x ${path}/${scriptname} ]]; then
- exec ${path}/${scriptname} "$@"
+ [[ ${path}/${scriptname} -ef ${scriptpath} ]] && continue
+ exec "${path}/${scriptname}" "$@"
exit 0
fi
done
-
+
IFS="${old_IFS}"
fi
-
+
exit 1
diff --git a/bin/ebuild-helpers/dobin b/bin/ebuild-helpers/dobin
index f90d8933c..0ba1eb0c4 100755
--- a/bin/ebuild-helpers/dobin
+++ b/bin/ebuild-helpers/dobin
@@ -1,19 +1,20 @@
#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
if [[ $# -lt 1 ]] ; then
- helpers_die "${0##*/}: at least one argument needed"
+ __helpers_die "${0##*/}: at least one argument needed"
exit 1
fi
-[[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) ED=${D} ;; esac
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
if [[ ! -d ${ED}${DESTTREE}/bin ]] ; then
- install -d "${ED}${DESTTREE}/bin" || { helpers_die "${0##*/}: failed to install ${ED}${DESTTREE}/bin"; exit 2; }
+ install -d "${ED}${DESTTREE}/bin" || { __helpers_die "${0##*/}: failed to install ${ED}${DESTTREE}/bin"; exit 2; }
fi
ret=0
@@ -28,5 +29,5 @@ for x in "$@" ; do
((ret|=$?))
done
-[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+[[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
exit ${ret}
diff --git a/bin/ebuild-helpers/doconfd b/bin/ebuild-helpers/doconfd
index e14600022..a3c09a50e 100755
--- a/bin/ebuild-helpers/doconfd
+++ b/bin/ebuild-helpers/doconfd
@@ -4,7 +4,7 @@
if [[ $# -lt 1 ]] ; then
source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
- helpers_die "${0##*/}: at least one argument needed"
+ __helpers_die "${0##*/}: at least one argument needed"
exit 1
fi
diff --git a/bin/ebuild-helpers/dodir b/bin/ebuild-helpers/dodir
index 90a3efed4..e03ba9a58 100755
--- a/bin/ebuild-helpers/dodir
+++ b/bin/ebuild-helpers/dodir
@@ -1,13 +1,14 @@
#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
-[[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) ED=${D} ;; esac
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
install -d ${DIROPTIONS} "${@/#/${ED}/}"
ret=$?
-[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+[[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
exit $ret
diff --git a/bin/ebuild-helpers/dodoc b/bin/ebuild-helpers/dodoc
index 1f333a615..99122c443 100755
--- a/bin/ebuild-helpers/dodoc
+++ b/bin/ebuild-helpers/dodoc
@@ -1,16 +1,24 @@
#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+if ___eapi_dodoc_supports_-r; then
+ exec \
+ env \
+ __PORTAGE_HELPER="dodoc" \
+ doins "$@"
+fi
+
if [ $# -lt 1 ] ; then
- helpers_die "${0##*/}: at least one argument needed"
- exit 1
+ __helpers_die "${0##*/}: at least one argument needed"
+ exit 1
fi
-[[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) ED=${D} ;; esac
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
dir="${ED}usr/share/doc/${PF}/${_E_DOCDESTTREE_}"
if [ ! -d "${dir}" ] ; then
@@ -30,5 +38,5 @@ for x in "$@" ; do
fi
done
-[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+[[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
exit ${ret}
diff --git a/bin/ebuild-helpers/doenvd b/bin/ebuild-helpers/doenvd
index 28ab5d234..9287933a9 100755
--- a/bin/ebuild-helpers/doenvd
+++ b/bin/ebuild-helpers/doenvd
@@ -4,7 +4,7 @@
if [[ $# -lt 1 ]] ; then
source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
- helpers_die "${0##*/}: at least one argument needed"
+ __helpers_die "${0##*/}: at least one argument needed"
exit 1
fi
diff --git a/bin/ebuild-helpers/doexe b/bin/ebuild-helpers/doexe
index fb228f905..c34fcae74 100755
--- a/bin/ebuild-helpers/doexe
+++ b/bin/ebuild-helpers/doexe
@@ -1,23 +1,23 @@
#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
if [[ $# -lt 1 ]] ; then
- helpers_die "${0##*/}: at least one argument needed"
+ __helpers_die "${0##*/}: at least one argument needed"
exit 1
fi
-[[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) ED=${D} ;; esac
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
if [[ ! -d ${ED}${_E_EXEDESTTREE_} ]] ; then
install -d "${ED}${_E_EXEDESTTREE_}"
fi
-TMP=$T/.doexe_tmp
-mkdir "$TMP"
+TMP=$(mktemp -d "${T}/.doexe_tmp_XXXXXX")
ret=0
@@ -26,7 +26,7 @@ for x in "$@" ; do
cp "$x" "$TMP"
mysrc=$TMP/${x##*/}
elif [ -d "${x}" ] ; then
- vecho "doexe: warning, skipping directory ${x}"
+ __vecho "doexe: warning, skipping directory ${x}"
continue
else
mysrc="${x}"
@@ -42,5 +42,5 @@ done
rm -rf "$TMP"
-[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+[[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
exit $ret
diff --git a/bin/ebuild-helpers/dohard b/bin/ebuild-helpers/dohard
index b52fd7c00..e0a44faf1 100755
--- a/bin/ebuild-helpers/dohard
+++ b/bin/ebuild-helpers/dohard
@@ -1,14 +1,22 @@
#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if ! ___eapi_has_dohard; then
+ die "'${0##*/}' has been banned for EAPI '$EAPI'"
+ exit 1
+fi
+
if [[ $# -ne 2 ]] ; then
echo "$0: two arguments needed" 1>&2
exit 1
fi
-[[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) ED=${D} ;; esac
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
destdir=${2%/*}
[[ ! -d ${ED}${destdir} ]] && dodir "${destdir}"
diff --git a/bin/ebuild-helpers/doheader b/bin/ebuild-helpers/doheader
new file mode 100755
index 000000000..37953658b
--- /dev/null
+++ b/bin/ebuild-helpers/doheader
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if ! ___eapi_has_doheader; then
+ die "${0##*/} is not supported in EAPI ${EAPI}"
+fi
+
+if [[ $# -lt 1 ]] || [[ $1 == -r && $# -lt 2 ]] ; then
+ __helpers_die "${0##*/}: at least one argument needed"
+ exit 1
+fi
+
+exec \
+env \
+INSDESTTREE="/usr/include/" \
+doins "$@"
diff --git a/bin/ebuild-helpers/dohtml b/bin/ebuild-helpers/dohtml
index 630629a4b..75d3d00ab 100755
--- a/bin/ebuild-helpers/dohtml
+++ b/bin/ebuild-helpers/dohtml
@@ -1,14 +1,19 @@
#!/bin/bash
-# Copyright 2009-2010 Gentoo Foundation
+# Copyright 2009-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
PORTAGE_BIN_PATH=${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}
PORTAGE_PYM_PATH=${PORTAGE_PYM_PATH:-/usr/lib/portage/pym}
-PYTHONPATH=$PORTAGE_PYM_PATH${PYTHONPATH:+:}$PYTHONPATH \
+# Use safe cwd, avoiding unsafe import for bug #469338.
+export __PORTAGE_HELPER_CWD=${PWD}
+cd "${PORTAGE_PYM_PATH}"
+PYTHONPATH=${PORTAGE_PYTHONPATH:-${PORTAGE_PYM_PATH}} \
"${PORTAGE_PYTHON:-/usr/bin/python}" "$PORTAGE_BIN_PATH/dohtml.py" "$@"
ret=$?
-[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+# Restore cwd for display by __helpers_die
+cd "${__PORTAGE_HELPER_CWD}"
+[[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
exit $ret
diff --git a/bin/ebuild-helpers/doinfo b/bin/ebuild-helpers/doinfo
index 8fd7d45f8..2edbdc592 100755
--- a/bin/ebuild-helpers/doinfo
+++ b/bin/ebuild-helpers/doinfo
@@ -1,19 +1,20 @@
#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
if [[ -z $1 ]] ; then
- helpers_die "${0##*/}: at least one argument needed"
- exit 1
+ __helpers_die "${0##*/}: at least one argument needed"
+ exit 1
fi
-[[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) ED=${D} ;; esac
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
if [[ ! -d ${ED}usr/share/info ]] ; then
- install -d "${ED}usr/share/info" || { helpers_die "${0##*/}: failed to install ${ED}usr/share/info"; exit 1; }
+ install -d "${ED}usr/share/info" || { __helpers_die "${0##*/}: failed to install ${ED}usr/share/info"; exit 1; }
fi
install -m0644 "$@" "${ED}usr/share/info"
@@ -22,6 +23,6 @@ if [ $rval -ne 0 ] ; then
for x in "$@" ; do
[ -e "$x" ] || echo "!!! ${0##*/}: $x does not exist" 1>&2
done
- helpers_die "${0##*/} failed"
+ __helpers_die "${0##*/} failed"
fi
exit $rval
diff --git a/bin/ebuild-helpers/doinitd b/bin/ebuild-helpers/doinitd
index b711e190a..476b858f6 100755
--- a/bin/ebuild-helpers/doinitd
+++ b/bin/ebuild-helpers/doinitd
@@ -4,7 +4,7 @@
if [[ $# -lt 1 ]] ; then
source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
- helpers_die "${0##*/}: at least one argument needed"
+ __helpers_die "${0##*/}: at least one argument needed"
exit 1
fi
diff --git a/bin/ebuild-helpers/doins b/bin/ebuild-helpers/doins
index 443bfdb21..c60e05789 100755
--- a/bin/ebuild-helpers/doins
+++ b/bin/ebuild-helpers/doins
@@ -1,14 +1,17 @@
#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
-if [[ ${0##*/} == dodoc ]] ; then
+helper=${__PORTAGE_HELPER:-${0##*/}}
+
+if [[ ${helper} == dodoc ]] ; then
if [ $# -eq 0 ] ; then
# default_src_install may call dodoc with no arguments
# when DOC is defined but empty, so simply return
# sucessfully in this case.
+ eqawarn "QA Notice: dodoc called with no arguments"
exit 0
fi
export INSOPTIONS=-m0644
@@ -16,7 +19,7 @@ if [[ ${0##*/} == dodoc ]] ; then
fi
if [ $# -lt 1 ] ; then
- helpers_die "${0##*/}: at least one argument needed"
+ __helpers_die "${helper}: at least one argument needed"
exit 1
fi
@@ -27,28 +30,26 @@ else
DOINSRECUR=n
fi
-[[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) export ED="${D}" ;; esac
+if ! ___eapi_has_prefix_variables; then
+ export ED="${D}"
+fi
if [[ ${INSDESTTREE#${ED}} != "${INSDESTTREE}" ]]; then
- vecho "-------------------------------------------------------" 1>&2
- vecho "You should not use \${D} or \${ED} with helpers." 1>&2
- vecho " --> ${INSDESTTREE}" 1>&2
- vecho "-------------------------------------------------------" 1>&2
- helpers_die "${0##*/} used with \${D} or \${ED}"
+ __vecho "-------------------------------------------------------" 1>&2
+ __vecho "You should not use \${D} or \${ED} with helpers." 1>&2
+ __vecho " --> ${INSDESTTREE}" 1>&2
+ __vecho "-------------------------------------------------------" 1>&2
+ __helpers_die "${helper} used with \${D} or \${ED}"
exit 1
fi
-case "$EAPI" in
- 0|1|2|3|3_pre2)
- PRESERVE_SYMLINKS=n
- ;;
- *)
- PRESERVE_SYMLINKS=y
- ;;
-esac
+if ___eapi_doins_and_newins_preserve_symlinks; then
+ PRESERVE_SYMLINKS=y
+else
+ PRESERVE_SYMLINKS=n
+fi
-export TMP=$T/.doins_tmp
+export TMP=$(mktemp -d "${T}/.doins_tmp_XXXXXX")
# Use separate directories to avoid potential name collisions.
mkdir -p "$TMP"/{1,2}
@@ -79,7 +80,7 @@ _doins() {
install ${INSOPTIONS} "${mysrc}" "${ED}${INSDESTTREE}/${mydir}"
rval=$?
[[ -n ${cleanup} ]] && rm -f "${cleanup}"
- [ $rval -ne 0 ] && echo "!!! ${0##*/}: $mysrc does not exist" 1>&2
+ [ $rval -ne 0 ] && echo "!!! ${helper}: $mysrc does not exist" 1>&2
return $rval
}
@@ -99,8 +100,8 @@ for x in "$@" ; do
if [[ $PRESERVE_SYMLINKS = n && -d $x ]] || \
[[ $PRESERVE_SYMLINKS = y && -d $x && ! -L $x ]] ; then
if [ "${DOINSRECUR}" == "n" ] ; then
- if [[ ${0##*/} == dodoc ]] ; then
- echo "!!! ${0##*/}: $x is a directory" 1>&2
+ if [[ ${helper} == dodoc ]] ; then
+ echo "!!! ${helper}: $x is a directory" 1>&2
((failed|=1))
fi
continue
@@ -155,4 +156,4 @@ for x in "$@" ; do
fi
done
rm -rf "$TMP"
-[[ $failed -ne 0 || $success -eq 0 ]] && { helpers_die "${0##*/} failed"; exit 1; } || exit 0
+[[ $failed -ne 0 || $success -eq 0 ]] && { __helpers_die "${helper} failed"; exit 1; } || exit 0
diff --git a/bin/ebuild-helpers/dolib b/bin/ebuild-helpers/dolib
index 9af541890..fd92d7f03 100755
--- a/bin/ebuild-helpers/dolib
+++ b/bin/ebuild-helpers/dolib
@@ -1,11 +1,12 @@
#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
-[[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) ED=${D} ;; esac
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
# Setup ABI cruft
LIBDIR_VAR="LIBDIR_${ABI}"
@@ -19,11 +20,11 @@ libdir="${ED}${DESTTREE}/${CONF_LIBDIR}"
if [[ $# -lt 1 ]] ; then
- helpers_die "${0##*/}: at least one argument needed"
+ __helpers_die "${0##*/}: at least one argument needed"
exit 1
fi
if [[ ! -d ${libdir} ]] ; then
- install -d "${libdir}" || { helpers_die "${0##*/}: failed to install ${libdir}"; exit 1; }
+ install -d "${libdir}" || { __helpers_die "${0##*/}: failed to install ${libdir}"; exit 1; }
fi
ret=0
@@ -42,5 +43,5 @@ for x in "$@" ; do
((ret|=$?))
done
-[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+[[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
exit ${ret}
diff --git a/bin/ebuild-helpers/doman b/bin/ebuild-helpers/doman
index b4047ce40..d6808597a 100755
--- a/bin/ebuild-helpers/doman
+++ b/bin/ebuild-helpers/doman
@@ -1,16 +1,17 @@
#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
if [[ $# -lt 1 ]] ; then
- helpers_die "${0##*/}: at least one argument needed"
+ __helpers_die "${0##*/}: at least one argument needed"
exit 1
fi
-[[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) ED=${D} ;; esac
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
i18n=""
@@ -58,10 +59,10 @@ for x in "$@" ; do
((ret|=1))
fi
else
- vecho "doman: '${x}' is probably not a man page; skipping" 1>&2
+ __vecho "doman: '${x}' is probably not a man page; skipping" 1>&2
((ret|=1))
fi
done
-[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+[[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
exit ${ret}
diff --git a/bin/ebuild-helpers/domo b/bin/ebuild-helpers/domo
index d994343a9..9a8dda38a 100755
--- a/bin/ebuild-helpers/domo
+++ b/bin/ebuild-helpers/domo
@@ -1,17 +1,18 @@
#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
mynum=${#}
if [ ${mynum} -lt 1 ] ; then
- helpers_die "${0}: at least one argument needed"
+ __helpers_die "${0}: at least one argument needed"
exit 1
fi
-[[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) ED=${D} ;; esac
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
if [ ! -d "${ED}${DESTTREE}/share/locale" ] ; then
install -d "${ED}${DESTTREE}/share/locale/"
@@ -34,5 +35,5 @@ for x in "$@" ; do
((ret|=$?))
done
-[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+[[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
exit $ret
diff --git a/bin/ebuild-helpers/dosbin b/bin/ebuild-helpers/dosbin
index d101c8a6d..361ca83ca 100755
--- a/bin/ebuild-helpers/dosbin
+++ b/bin/ebuild-helpers/dosbin
@@ -1,19 +1,20 @@
#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
if [[ $# -lt 1 ]] ; then
- helpers_die "${0##*/}: at least one argument needed"
+ __helpers_die "${0##*/}: at least one argument needed"
exit 1
fi
-[[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) ED=${D} ;; esac
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
if [[ ! -d ${ED}${DESTTREE}/sbin ]] ; then
- install -d "${ED}${DESTTREE}/sbin" || { helpers_die "${0##*/}: failed to install ${ED}${DESTTREE}/sbin"; exit 2; }
+ install -d "${ED}${DESTTREE}/sbin" || { __helpers_die "${0##*/}: failed to install ${ED}${DESTTREE}/sbin"; exit 2; }
fi
ret=0
@@ -28,5 +29,5 @@ for x in "$@" ; do
((ret|=$?))
done
-[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+[[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
exit ${ret}
diff --git a/bin/ebuild-helpers/dosed b/bin/ebuild-helpers/dosed
index f202df7a7..7db062963 100755
--- a/bin/ebuild-helpers/dosed
+++ b/bin/ebuild-helpers/dosed
@@ -1,14 +1,22 @@
#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if ! ___eapi_has_dosed; then
+ die "'${0##*/}' has been banned for EAPI '$EAPI'"
+ exit 1
+fi
+
if [[ $# -lt 1 ]] ; then
echo "!!! ${0##*/}: at least one argument needed" >&2
exit 1
fi
-[[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) ED=${D} ;; esac
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
ret=0
file_found=0
diff --git a/bin/ebuild-helpers/dosym b/bin/ebuild-helpers/dosym
index 2489e22a2..649b100de 100755
--- a/bin/ebuild-helpers/dosym
+++ b/bin/ebuild-helpers/dosym
@@ -5,12 +5,13 @@
source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
if [[ $# -ne 2 ]] ; then
- helpers_die "${0##*/}: two arguments needed"
+ __helpers_die "${0##*/}: two arguments needed"
exit 1
fi
-[[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) ED=${D} ;; esac
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
if [[ ${2} == */ ]] || \
[[ -d ${ED}${2} && ! -L ${ED}${2} ]] ; then
@@ -26,5 +27,5 @@ target="${1}"
ln -snf "${target}" "${ED}${2}"
ret=$?
-[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+[[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
exit $ret
diff --git a/bin/ebuild-helpers/ecompress b/bin/ebuild-helpers/ecompress
index b61421b00..71287b4b0 100755
--- a/bin/ebuild-helpers/ecompress
+++ b/bin/ebuild-helpers/ecompress
@@ -5,7 +5,7 @@
source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
if [[ -z $1 ]] ; then
- helpers_die "${0##*/}: at least one argument needed"
+ __helpers_die "${0##*/}: at least one argument needed"
exit 1
fi
@@ -68,7 +68,7 @@ decompress_args() {
case $1 in
--suffix)
- [[ -n $2 ]] && vecho "${0##*/}: --suffix takes no additional arguments" 1>&2
+ [[ -n $2 ]] && __vecho "${0##*/}: --suffix takes no additional arguments" 1>&2
if [[ ! -e ${T}/.ecompress.suffix ]] ; then
set -e
@@ -93,7 +93,7 @@ case $1 in
cat "${T}/.ecompress.suffix"
;;
--bin)
- [[ -n $2 ]] && vecho "${0##*/}: --bin takes no additional arguments" 1>&2
+ [[ -n $2 ]] && __vecho "${0##*/}: --bin takes no additional arguments" 1>&2
echo "${PORTAGE_COMPRESS} ${PORTAGE_COMPRESS_FLAGS}"
;;
@@ -104,18 +104,18 @@ case $1 in
>> "$x"
((ret|=$?))
done
- [[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+ [[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
exit $ret
;;
--dequeue)
- [[ -n $2 ]] && vecho "${0##*/}: --dequeue takes no additional arguments" 1>&2
+ [[ -n $2 ]] && __vecho "${0##*/}: --dequeue takes no additional arguments" 1>&2
find "${D}" -name '*.ecompress.file' -print0 \
| sed -e 's:\.ecompress\.file::g' \
| ${XARGS} -0 ecompress
find "${D}" -name '*.ecompress.file' -print0 | ${XARGS} -0 rm -f
;;
--*)
- helpers_die "${0##*/}: unknown arguments '$*'"
+ __helpers_die "${0##*/}: unknown arguments '$*'"
exit 1
;;
*)
@@ -155,7 +155,7 @@ case $1 in
# Finally, let's actually do some real work
"${PORTAGE_COMPRESS}" ${PORTAGE_COMPRESS_FLAGS} "$@"
ret=$?
- [[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+ [[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
exit $ret
;;
esac
diff --git a/bin/ebuild-helpers/ecompressdir b/bin/ebuild-helpers/ecompressdir
index 6801a07d4..eca588869 100755
--- a/bin/ebuild-helpers/ecompressdir
+++ b/bin/ebuild-helpers/ecompressdir
@@ -1,18 +1,21 @@
#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/helper-functions.sh
if [[ -z $1 ]] ; then
- helpers_die "${0##*/}: at least one argument needed"
+ __helpers_die "${0##*/}: at least one argument needed"
exit 1
fi
-[[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) ED=${D} EPREFIX= ;; esac
+if ! ___eapi_has_prefix_variables; then
+ ED=${D} EPREFIX=
+fi
-case $1 in
+SIZE_LIMIT=''
+while [[ $# -gt 0 ]] ; do
+ case $1 in
--ignore)
shift
for skip in "$@" ; do
@@ -27,45 +30,66 @@ case $1 in
set -- "${@/#/${ED}}"
ret=0
for x in "$@" ; do
- >> "$x"
+ # Stash the limit in the .dir file so we can reload it later.
+ printf "${SIZE_LIMIT}" > "${x}"
((ret|=$?))
done
- [[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+ [[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
exit $ret
;;
--dequeue)
- [[ -n $2 ]] && vecho "${0##*/}: --dequeue takes no additional arguments" 1>&2
+ [[ -n $2 ]] && __vecho "${0##*/}: --dequeue takes no additional arguments" 1>&2
find "${ED}" -name '*.ecompress.dir' -print0 \
| sed -e 's:\.ecompress\.dir::g' -e "s:${ED}:/:g" \
| ${XARGS} -0 ecompressdir
find "${ED}" -name '*.ecompress.skip' -print0 | ${XARGS} -0 rm -f
exit 0
;;
+ --limit)
+ SIZE_LIMIT=$2
+ shift
+ ;;
--*)
- helpers_die "${0##*/}: unknown arguments '$*'"
+ __helpers_die "${0##*/}: unknown arguments '$*'"
exit 1
;;
-esac
+ *)
+ break
+ ;;
+ esac
+ shift
+done
# figure out the new suffix
suffix=$(ecompress --suffix)
-# funk_up_dir(action, suffix, binary)
+# funk_up_dir(action, suffix, binary, [size_limit])
# - action: compress or decompress
# - suffix: the compression suffix to work with
# - binary: the program to execute that'll compress/decompress
+# - size_limit: if compressing, skip files smaller than this
# The directory we act on is implied in the ${dir} variable
funk_up_dir() {
- local act=$1 suffix=$2 binary=$3
+ local act=$1 suffix=$2 binary=$3 size_limit=$4
local negate=""
[[ ${act} == "compress" ]] && negate="!"
local ret=0
# first we act on all the files
- find "${dir}" -type f ${negate} -iname '*'${suffix} -print0 | ${XARGS} -0 ${binary}
+ local args=(
+ -type f
+ ${negate} -iname "*${suffix}"
+ )
+ [[ -n ${size_limit} ]] && args+=( -size "+${size_limit}c" )
+ find "${dir}" "${args[@]}" -print0 | ${XARGS} -0 ${binary}
((ret|=$?))
+ # Repeat until nothing changes, in order to handle multiple
+ # levels of indirection (see bug #470916).
+ local -i indirection=0
+ while true ; do
+ local something_changed=
while read -r -d $'\0' brokenlink ; do
[[ -e ${brokenlink} ]] && continue
olddest=$(readlink "${brokenlink}")
@@ -91,12 +115,22 @@ funk_up_dir() {
else
[[ -f "${dir}/${brokenlink%/*}/${newdest}" ]] || continue
fi
+ something_changed=${brokenlink}
rm -f "${brokenlink}"
[[ ${act} == "compress" ]] \
&& ln -snf "${newdest}" "${brokenlink}${suffix}" \
|| ln -snf "${newdest}" "${brokenlink%${suffix}}"
((ret|=$?))
done < <(find "${dir}" -type l -print0)
+ [[ -n ${something_changed} ]] || break
+ (( indirection++ ))
+ if (( indirection >= 100 )) ; then
+ # Protect against possibility of a bug triggering an endless loop.
+ eerror "ecompressdir: too many levels of indirection for" \
+ "'${actual_dir#${ED}}/${something_changed#./}'"
+ break
+ fi
+ done
return ${ret}
}
@@ -133,13 +167,13 @@ decompressors=(
".lzma" "unxz -f"
)
-multijob_init
+__multijob_init
for dir in "$@" ; do
dir=${dir#/}
dir="${ED}${dir}"
if [[ ! -d ${dir} ]] ; then
- vecho "${0##*/}: /${dir#${ED}} does not exist!"
+ __vecho "${0##*/}: /${dir#${ED}} does not exist!"
continue
fi
cd "${dir}"
@@ -151,24 +185,25 @@ for dir in "$@" ; do
# since we've been requested to compress the whole dir,
# delete any individual queued requests
+ size_limit=${SIZE_LIMIT:-$(<"${actual_dir}.ecompress.dir")}
rm -f "${actual_dir}.ecompress.dir"
find "${dir}" -type f -name '*.ecompress.file' -print0 | ${XARGS} -0 rm -f
# not uncommon for packages to compress doc files themselves
- for (( d = 0; d < ${#decompressors[@]}; d += 2 )) ; do
+ for (( i = 0; i < ${#decompressors[@]}; i += 2 )) ; do
# It's faster to parallelize at this stage than to try to
# parallelize the compressors. This is because the find|xargs
# ends up launching less compressors overall, so the overhead
# of forking children ends up dominating.
(
- multijob_child_init
+ __multijob_child_init
funk_up_dir "decompress" "${decompressors[i]}" "${decompressors[i+1]}"
) &
- multijob_post_fork
+ __multijob_post_fork
: $(( ret |= $? ))
done
- multijob_finish
+ __multijob_finish
: $(( ret |= $? ))
# forcibly break all hard links as some compressors whine about it
@@ -177,8 +212,8 @@ for dir in "$@" ; do
# now lets do our work
if [[ -n ${suffix} ]] ; then
- vecho "${0##*/}: $(ecompress --bin) /${actual_dir#${ED}}"
- funk_up_dir "compress" "${suffix}" "ecompress"
+ __vecho "${0##*/}: $(ecompress --bin) /${actual_dir#${ED}}"
+ funk_up_dir "compress" "${suffix}" "ecompress" "${size_limit}"
: $(( ret |= $? ))
fi
@@ -186,5 +221,5 @@ for dir in "$@" ; do
restore_skip_dirs
done
-[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+[[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
exit ${ret}
diff --git a/bin/ebuild-helpers/emake b/bin/ebuild-helpers/emake
index d842781a7..69d836f1a 100755
--- a/bin/ebuild-helpers/emake
+++ b/bin/ebuild-helpers/emake
@@ -24,5 +24,5 @@ fi
${MAKE:-make} ${MAKEOPTS} ${EXTRA_EMAKE} "$@"
ret=$?
-[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+[[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
exit $ret
diff --git a/bin/ebuild-helpers/fowners b/bin/ebuild-helpers/fowners
index a213c9eb1..cee4108ce 100755
--- a/bin/ebuild-helpers/fowners
+++ b/bin/ebuild-helpers/fowners
@@ -4,8 +4,9 @@
source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
-[[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) EPREFIX= ED=${D} ;; esac
+if ! ___eapi_has_prefix_variables; then
+ EPREFIX= ED=${D}
+fi
# we can't prefix all arguments because
# chown takes random options
@@ -13,10 +14,5 @@ slash="/"
chown "${@/#${slash}/${ED}${slash}}"
ret=$?
-if [[ ${ret} != 0 && -n ${EPREFIX} && ${EUID} != 0 ]] ; then
- ewarn "fowners failure ignored in Prefix with non-privileged user"
- exit 0
-fi
-
-[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+[[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
exit $ret
diff --git a/bin/ebuild-helpers/fperms b/bin/ebuild-helpers/fperms
index a2f77ea00..d854ebbdf 100755
--- a/bin/ebuild-helpers/fperms
+++ b/bin/ebuild-helpers/fperms
@@ -1,16 +1,17 @@
#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
-[[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) ED=${D} ;; esac
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
# we can't prefix all arguments because
# chmod takes random options
slash="/"
chmod "${@/#${slash}/${ED}${slash}}"
ret=$?
-[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+[[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
exit $ret
diff --git a/bin/ebuild-helpers/keepdir b/bin/ebuild-helpers/keepdir
new file mode 100755
index 000000000..bec2feb77
--- /dev/null
+++ b/bin/ebuild-helpers/keepdir
@@ -0,0 +1,20 @@
+#!/bin/bash
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
+
+dodir "$@"
+ret=$?
+
+for x in "$@"; do
+ >> "${ED}${x}/.keep_${CATEGORY}_${PN}-${SLOT%/*}" || \
+ { echo "!!! ${0##*/}: cannot write .keep in ${ED}${x}" 1>&2; ret=1; }
+done
+
+[[ ${ret} -ne 0 ]] && __helpers_die "${0##*/} failed"
+exit ${ret}
diff --git a/bin/ebuild-helpers/newbin b/bin/ebuild-helpers/newbin
index bf9874472..59a0db27b 100755..120000
--- a/bin/ebuild-helpers/newbin
+++ b/bin/ebuild-helpers/newbin
@@ -1,22 +1 @@
-#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
-
-if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
- helpers_die "${0##*/}: Need two arguments, old file and new file"
- exit 1
-fi
-
-if [ ! -e "$1" ] ; then
- helpers_die "!!! ${0##*/}: $1 does not exist"
- exit 1
-fi
-
-(($#>2)) && \
- eqawarn "QA Notice: ${0##*/} called with more than 2 arguments: ${@:3}"
-
-rm -rf "${T}/${2}" && \
-cp -f "${1}" "${T}/${2}" && \
-exec dobin "${T}/${2}"
+newins \ No newline at end of file
diff --git a/bin/ebuild-helpers/newconfd b/bin/ebuild-helpers/newconfd
index fa3710d8f..59a0db27b 100755..120000
--- a/bin/ebuild-helpers/newconfd
+++ b/bin/ebuild-helpers/newconfd
@@ -1,22 +1 @@
-#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
-
-if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
- helpers_die "${0##*/}: Need two arguments, old file and new file"
- exit 1
-fi
-
-if [ ! -e "$1" ] ; then
- helpers_die "!!! ${0##*/}: $1 does not exist"
- exit 1
-fi
-
-(($#>2)) && \
- eqawarn "QA Notice: ${0##*/} called with more than 2 arguments: ${@:3}"
-
-rm -rf "${T}/${2}" && \
-cp -f "${1}" "${T}/${2}" && \
-exec doconfd "${T}/${2}"
+newins \ No newline at end of file
diff --git a/bin/ebuild-helpers/newdoc b/bin/ebuild-helpers/newdoc
index df6fb1d58..59a0db27b 100755..120000
--- a/bin/ebuild-helpers/newdoc
+++ b/bin/ebuild-helpers/newdoc
@@ -1,22 +1 @@
-#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
-
-if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
- helpers_die "${0##*/}: Need two arguments, old file and new file"
- exit 1
-fi
-
-if [ ! -e "$1" ] ; then
- helpers_die "!!! ${0##*/}: $1 does not exist"
- exit 1
-fi
-
-(($#>2)) && \
- eqawarn "QA Notice: ${0##*/} called with more than 2 arguments: ${@:3}"
-
-rm -rf "${T}/${2}" && \
-cp -f "${1}" "${T}/${2}" && \
-exec dodoc "${T}/${2}"
+newins \ No newline at end of file
diff --git a/bin/ebuild-helpers/newenvd b/bin/ebuild-helpers/newenvd
index c54af0520..59a0db27b 100755..120000
--- a/bin/ebuild-helpers/newenvd
+++ b/bin/ebuild-helpers/newenvd
@@ -1,22 +1 @@
-#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
-
-if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
- helpers_die "${0##*/}: Need two arguments, old file and new file"
- exit 1
-fi
-
-if [ ! -e "$1" ] ; then
- helpers_die "!!! ${0##*/}: $1 does not exist"
- exit 1
-fi
-
-(($#>2)) && \
- eqawarn "QA Notice: ${0##*/} called with more than 2 arguments: ${@:3}"
-
-rm -rf "${T}/${2}" && \
-cp -f "${1}" "${T}/${2}" && \
-exec doenvd "${T}/${2}"
+newins \ No newline at end of file
diff --git a/bin/ebuild-helpers/newexe b/bin/ebuild-helpers/newexe
index 9bcf64b31..59a0db27b 100755..120000
--- a/bin/ebuild-helpers/newexe
+++ b/bin/ebuild-helpers/newexe
@@ -1,22 +1 @@
-#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
-
-if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
- helpers_die "${0##*/}: Need two arguments, old file and new file"
- exit 1
-fi
-
-if [ ! -e "$1" ] ; then
- helpers_die "!!! ${0##*/}: $1 does not exist"
- exit 1
-fi
-
-(($#>2)) && \
- eqawarn "QA Notice: ${0##*/} called with more than 2 arguments: ${@:3}"
-
-rm -rf "${T}/${2}" && \
-cp -f "${1}" "${T}/${2}" && \
-exec doexe "${T}/${2}"
+newins \ No newline at end of file
diff --git a/bin/ebuild-helpers/newheader b/bin/ebuild-helpers/newheader
new file mode 120000
index 000000000..59a0db27b
--- /dev/null
+++ b/bin/ebuild-helpers/newheader
@@ -0,0 +1 @@
+newins \ No newline at end of file
diff --git a/bin/ebuild-helpers/newinitd b/bin/ebuild-helpers/newinitd
index 03bbe68a7..59a0db27b 100755..120000
--- a/bin/ebuild-helpers/newinitd
+++ b/bin/ebuild-helpers/newinitd
@@ -1,22 +1 @@
-#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
-
-if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
- helpers_die "${0##*/}: Need two arguments, old file and new file"
- exit 1
-fi
-
-if [ ! -e "$1" ] ; then
- helpers_die "!!! ${0##*/}: $1 does not exist"
- exit 1
-fi
-
-(($#>2)) && \
- eqawarn "QA Notice: ${0##*/} called with more than 2 arguments: ${@:3}"
-
-rm -rf "${T}/${2}" && \
-cp -f "${1}" "${T}/${2}" && \
-exec doinitd "${T}/${2}"
+newins \ No newline at end of file
diff --git a/bin/ebuild-helpers/newins b/bin/ebuild-helpers/newins
index adf2d809e..03359851a 100755
--- a/bin/ebuild-helpers/newins
+++ b/bin/ebuild-helpers/newins
@@ -1,38 +1,57 @@
#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+helper=${0##*/}
+
if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
- helpers_die "${0##*/}: Need two arguments, old file and new file"
+ __helpers_die "${helper}: Need two arguments, old file and new file"
exit 1
fi
-if [ ! -e "$1" ] ; then
- helpers_die "!!! ${0##*/}: $1 does not exist"
- exit 1
+(($#>2)) && \
+ eqawarn "QA Notice: ${helper} called with more than 2 arguments: ${@:3}"
+
+stdin=
+if ___eapi_newins_supports_reading_from_standard_input && [[ $1 == "-" ]]; then
+ stdin=yes
fi
-(($#>2)) && \
- eqawarn "QA Notice: ${0##*/} called with more than 2 arguments: ${@:3}"
-
-rm -rf "${T}/${2}" || exit $?
-case "$EAPI" in
- 0|1|2|3|3_pre2)
- cp "$1" "$T/$2" || exit $?
- ;;
- *)
- cp -P "$1" "$T/$2"
- ret=$?
- if [[ $ret -ne 0 ]] ; then
- helpers_die "${0##*/} failed"
- exit $ret
+TMP=$(mktemp -d "${T}/.newins_tmp_XXXXXX")
+trap 'rm -rf "${TMP}"' EXIT
+
+if [[ ${stdin} ]] ; then
+ if [[ -t 0 ]] ; then
+ __helpers_die "!!! ${helper}: Input is from a terminal"
+ exit 1
+ fi
+ cat > "${TMP}/$2"
+ ret=$?
+else
+ if [[ ! -e $1 ]] ; then
+ __helpers_die "!!! ${helper}: $1 does not exist"
+ exit 1
+ fi
+
+ cp_args="-f"
+ if [[ ${helper} == newins ]] ; then
+ if ___eapi_doins_and_newins_preserve_symlinks; then
+ cp_args+=" -P"
fi
- ;;
-esac
-doins "${T}/${2}"
+ fi
+
+ cp ${cp_args} "$1" "${TMP}/$2"
+ ret=$?
+fi
+
+if [[ ${ret} -ne 0 ]] ; then
+ __helpers_die "${0##*/} failed"
+ exit ${ret}
+fi
+
+do${helper#new} "${TMP}/$2"
ret=$?
-rm -rf "${T}/${2}"
-[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+[[ $ret -ne 0 ]] && __helpers_die "${helper} failed"
exit $ret
diff --git a/bin/ebuild-helpers/newlib.a b/bin/ebuild-helpers/newlib.a
index 7ff819547..59a0db27b 100755..120000
--- a/bin/ebuild-helpers/newlib.a
+++ b/bin/ebuild-helpers/newlib.a
@@ -1,22 +1 @@
-#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
-
-if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
- helpers_die "${0##*/}: Need two arguments, old file and new file"
- exit 1
-fi
-
-if [ ! -e "$1" ] ; then
- helpers_die "!!! ${0##*/}: $1 does not exist"
- exit 1
-fi
-
-(($#>2)) && \
- eqawarn "QA Notice: ${0##*/} called with more than 2 arguments: ${@:3}"
-
-rm -rf "${T}/${2}" && \
-cp -f "${1}" "${T}/${2}" && \
-exec dolib.a "${T}/${2}"
+newins \ No newline at end of file
diff --git a/bin/ebuild-helpers/newlib.so b/bin/ebuild-helpers/newlib.so
index fd4c097d7..59a0db27b 100755..120000
--- a/bin/ebuild-helpers/newlib.so
+++ b/bin/ebuild-helpers/newlib.so
@@ -1,22 +1 @@
-#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
-
-if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
- helpers_die "${0##*/}: Need two arguments, old file and new file"
- exit 1
-fi
-
-if [ ! -e "$1" ] ; then
- helpers_die "!!! ${0##*/}: $1 does not exist"
- exit 1
-fi
-
-(($#>2)) && \
- eqawarn "QA Notice: ${0##*/} called with more than 2 arguments: ${@:3}"
-
-rm -rf "${T}/${2}" && \
-cp -f "${1}" "${T}/${2}" && \
-exec dolib.so "${T}/${2}"
+newins \ No newline at end of file
diff --git a/bin/ebuild-helpers/newman b/bin/ebuild-helpers/newman
index 889e0f985..59a0db27b 100755..120000
--- a/bin/ebuild-helpers/newman
+++ b/bin/ebuild-helpers/newman
@@ -1,22 +1 @@
-#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
-
-if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
- helpers_die "${0##*/}: Need two arguments, old file and new file"
- exit 1
-fi
-
-if [ ! -e "$1" ] ; then
- helpers_die "!!! ${0##*/}: $1 does not exist"
- exit 1
-fi
-
-(($#>2)) && \
- eqawarn "QA Notice: ${0##*/} called with more than 2 arguments: ${@:3}"
-
-rm -rf "${T}/${2}" && \
-cp -f "${1}" "${T}/${2}" && \
-exec doman "${T}/${2}"
+newins \ No newline at end of file
diff --git a/bin/ebuild-helpers/newsbin b/bin/ebuild-helpers/newsbin
index 9df0af275..59a0db27b 100755..120000
--- a/bin/ebuild-helpers/newsbin
+++ b/bin/ebuild-helpers/newsbin
@@ -1,22 +1 @@
-#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
-
-if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
- helpers_die "${0##*/}: Need two arguments, old file and new file"
- exit 1
-fi
-
-if [ ! -e "$1" ] ; then
- helpers_die "!!! ${0##*/}: $1 does not exist"
- exit 1
-fi
-
-(($#>2)) && \
- eqawarn "QA Notice: ${0##*/} called with more than 2 arguments: ${@:3}"
-
-rm -rf "${T}/${2}" && \
-cp -f "${1}" "${T}/${2}" && \
-exec dosbin "${T}/${2}"
+newins \ No newline at end of file
diff --git a/bin/ebuild-helpers/portageq b/bin/ebuild-helpers/portageq
index ec30b66cb..b67b03f33 100755
--- a/bin/ebuild-helpers/portageq
+++ b/bin/ebuild-helpers/portageq
@@ -1,8 +1,10 @@
#!/bin/bash
-# Copyright 2009-2010 Gentoo Foundation
+# Copyright 2009-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
PORTAGE_BIN_PATH=${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}
PORTAGE_PYM_PATH=${PORTAGE_PYM_PATH:-/usr/lib/portage/pym}
-PYTHONPATH=$PORTAGE_PYM_PATH${PYTHONPATH:+:}$PYTHONPATH \
+# Use safe cwd, avoiding unsafe import for bug #469338.
+cd "${PORTAGE_PYM_PATH}"
+PYTHONPATH=${PORTAGE_PYTHONPATH:-${PORTAGE_PYM_PATH}} \
exec "${PORTAGE_PYTHON:-/usr/bin/python}" "$PORTAGE_BIN_PATH/portageq" "$@"
diff --git a/bin/ebuild-helpers/prepall b/bin/ebuild-helpers/prepall
index 49e646cd2..fb5c2db55 100755
--- a/bin/ebuild-helpers/prepall
+++ b/bin/ebuild-helpers/prepall
@@ -1,11 +1,12 @@
#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
-[[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) ED=${D} ;; esac
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
if has chflags $FEATURES ; then
# Save all the file flags for restoration at the end of prepall.
diff --git a/bin/ebuild-helpers/prepalldocs b/bin/ebuild-helpers/prepalldocs
index 560a02bcb..3094661f5 100755
--- a/bin/ebuild-helpers/prepalldocs
+++ b/bin/ebuild-helpers/prepalldocs
@@ -1,15 +1,21 @@
#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+if ___eapi_has_docompress; then
+ die "'${0##*/}' has been banned for EAPI '$EAPI'"
+ exit 1
+fi
+
if [[ -n $1 ]] ; then
- vecho "${0##*/}: invalid usage; takes no arguments" 1>&2
+ __vecho "${0##*/}: invalid usage; takes no arguments" 1>&2
fi
-[[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) ED=${D} ;; esac
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
[[ -d ${ED}usr/share/doc ]] || exit 0
diff --git a/bin/ebuild-helpers/prepallinfo b/bin/ebuild-helpers/prepallinfo
index db9bbfacb..1a2027580 100755
--- a/bin/ebuild-helpers/prepallinfo
+++ b/bin/ebuild-helpers/prepallinfo
@@ -1,11 +1,12 @@
#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
-[[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) ED=${D} ;; esac
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
[[ -d ${ED}usr/share/info ]] || exit 0
diff --git a/bin/ebuild-helpers/prepallman b/bin/ebuild-helpers/prepallman
index dee1c7236..5331eaf01 100755
--- a/bin/ebuild-helpers/prepallman
+++ b/bin/ebuild-helpers/prepallman
@@ -1,22 +1,22 @@
#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
# replaced by controllable compression in EAPI 4
-has "${EAPI}" 0 1 2 3 || exit 0
+___eapi_has_docompress && exit 0
-[[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) ED=${D} ;; esac
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
ret=0
-find "${ED}" -type d -name man > "${T}"/prepallman.filelist
-while read -r mandir ; do
+while IFS= read -r -d '' mandir ; do
mandir=${mandir#${ED}}
prepman "${mandir%/man}"
((ret|=$?))
-done < "${T}"/prepallman.filelist
+done < <(find "${ED}" -type d -name man -print0)
exit ${ret}
diff --git a/bin/ebuild-helpers/prepallstrip b/bin/ebuild-helpers/prepallstrip
index 28320d975..1aa6686cd 100755
--- a/bin/ebuild-helpers/prepallstrip
+++ b/bin/ebuild-helpers/prepallstrip
@@ -1,8 +1,11 @@
#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-[[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) ED=${D} ;; esac
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
exec prepstrip "${ED}"
diff --git a/bin/ebuild-helpers/prepinfo b/bin/ebuild-helpers/prepinfo
index ffe2ecec3..5afc18a71 100755
--- a/bin/ebuild-helpers/prepinfo
+++ b/bin/ebuild-helpers/prepinfo
@@ -1,11 +1,12 @@
#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
-[[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) ED=${D} ;; esac
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
if [[ -z $1 ]] ; then
infodir="/usr/share/info"
@@ -19,7 +20,7 @@ fi
if [[ ! -d ${ED}${infodir} ]] ; then
if [[ -n $1 ]] ; then
- vecho "${0##*/}: '${infodir}' does not exist!"
+ __vecho "${0##*/}: '${infodir}' does not exist!"
exit 1
else
exit 0
@@ -33,5 +34,5 @@ find "${ED}${infodir}" -type d -print0 | while read -r -d $'\0' x ; do
rm -f "${x}"/dir{,.info}{,.gz,.bz2}
done
-has "${EAPI}" 0 1 2 3 || exit 0
+___eapi_has_docompress && exit 0
exec ecompressdir --queue "${infodir}"
diff --git a/bin/ebuild-helpers/preplib b/bin/ebuild-helpers/preplib
deleted file mode 100755
index 6e91cf33d..000000000
--- a/bin/ebuild-helpers/preplib
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
-
-eqawarn "QA Notice: Deprecated call to 'preplib'"
-
-[[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) ED=${D} ;; esac
-
-LIBDIR_VAR="LIBDIR_${ABI}"
-if [ -n "${ABI}" -a -n "${!LIBDIR_VAR}" ]; then
- CONF_LIBDIR="${!LIBDIR_VAR}"
-fi
-unset LIBDIR_VAR
-
-if [ -z "${CONF_LIBDIR}" ]; then
- # we need this to default to lib so that things dont break
- CONF_LIBDIR="lib"
-fi
-
-if [ -z "$1" ] ; then
- z="${ED}usr/${CONF_LIBDIR}"
-else
- z="${ED}$1/${CONF_LIBDIR}"
-fi
-
-if [ -d "${z}" ] ; then
- ldconfig -n -N "${z}"
-fi
diff --git a/bin/ebuild-helpers/prepman b/bin/ebuild-helpers/prepman
index f96b64147..fb5dcb4a5 100755
--- a/bin/ebuild-helpers/prepman
+++ b/bin/ebuild-helpers/prepman
@@ -1,13 +1,17 @@
#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+# Do not compress man pages which are smaller than this (in bytes). #169260
+SIZE_LIMIT='128'
+
source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
-[[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) ED=${D} ;; esac
+if ! ___eapi_has_prefix_variables; then
+ ED=${D}
+fi
-if [[ -z $1 ]] ; then
+if [[ -z $1 ]] ; then
mandir="${ED}usr/share/man"
else
mandir="${ED}$1/man"
@@ -19,7 +23,7 @@ if [[ ! -d ${mandir} ]] ; then
fi
# replaced by controllable compression in EAPI 4
-has "${EAPI}" 0 1 2 3 || exit 0
+___eapi_has_docompress && exit 0
shopt -s nullglob
@@ -30,6 +34,6 @@ for subdir in "${mandir}"/man* "${mandir}"/*/man* ; do
[[ -d ${subdir} ]] && really_is_mandir=1 && break
done
-[[ ${really_is_mandir} == 1 ]] && exec ecompressdir --queue "${mandir#${ED}}"
+[[ ${really_is_mandir} == 1 ]] && exec ecompressdir --limit ${SIZE_LIMIT} --queue "${mandir#${ED}}"
exit 0
diff --git a/bin/ebuild-helpers/prepstrip b/bin/ebuild-helpers/prepstrip
index fe5c1bc32..2ef8a1ace 100755
--- a/bin/ebuild-helpers/prepstrip
+++ b/bin/ebuild-helpers/prepstrip
@@ -1,7 +1,8 @@
#!/bin/bash
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+PORTAGE_PYM_PATH=${PORTAGE_PYM_PATH:-/usr/lib/portage/pym}
source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/helper-functions.sh
# avoid multiple calls to `has`. this creates things like:
@@ -15,11 +16,12 @@ exp_tf() {
eval ${var}_${flag}=$(tf has ${flag} ${!var})
done
}
-exp_tf FEATURES compressdebug installsources nostrip splitdebug
-exp_tf RESTRICT binchecks installsources strip
+exp_tf FEATURES compressdebug installsources nostrip splitdebug xattr
+exp_tf RESTRICT binchecks installsources splitdebug strip
-[[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "${EAPI}" in 0|1|2) EPREFIX= ED=${D} ;; esac
+if ! ___eapi_has_prefix_variables; then
+ EPREFIX= ED=${D}
+fi
banner=false
SKIP_STRIP=false
@@ -29,6 +31,30 @@ if ${RESTRICT_strip} || ${FEATURES_nostrip} ; then
${FEATURES_installsources} || exit 0
fi
+PRESERVE_XATTR=false
+if [[ ${KERNEL} == linux ]] && ${FEATURES_xattr} ; then
+ PRESERVE_XATTR=true
+ if type -P getfattr >/dev/null && type -P setfattr >/dev/null ; then
+ dump_xattrs() {
+ getfattr -d --absolute-names "$1"
+ }
+ restore_xattrs() {
+ setfattr --restore=-
+ }
+ else
+ dump_xattrs() {
+ PYTHONPATH=${PORTAGE_PYTHONPATH:-${PORTAGE_PYM_PATH}} \
+ "${PORTAGE_PYTHON:-/usr/bin/python}" \
+ "${PORTAGE_BIN_PATH}/xattr-helper.py" --dump < <(echo -n "$1")
+ }
+ restore_xattrs() {
+ PYTHONPATH=${PORTAGE_PYTHONPATH:-${PORTAGE_PYM_PATH}} \
+ "${PORTAGE_PYTHON:-/usr/bin/python}" \
+ "${PORTAGE_BIN_PATH}/xattr-helper.py" --restore
+ }
+ fi
+fi
+
# look up the tools we might be using
for t in STRIP:strip OBJCOPY:objcopy READELF:readelf ; do
v=${t%:*} # STRIP
@@ -51,7 +77,7 @@ case $(${STRIP} --version 2>/dev/null) in
# We'll leave out -R .note for now until we can check out the relevance
# of the section when it has the ALLOC flag set on it ...
SAFE_STRIP_FLAGS="--strip-unneeded"
- DEF_STRIP_FLAGS="-R .comment -R .GCC.command.line"
+ DEF_STRIP_FLAGS="-R .comment -R .GCC.command.line -R .note.gnu.gold-version"
SPLIT_STRIP_FLAGS=
;;
esac
@@ -62,23 +88,13 @@ prepstrip_sources_dir=${EPREFIX}/usr/src/debug/${CATEGORY}/${PF}
type -P debugedit >/dev/null && debugedit_found=true || debugedit_found=false
debugedit_warned=false
-multijob_init
+__multijob_init
# Setup $T filesystem layout that we care about.
tmpdir="${T}/prepstrip"
rm -rf "${tmpdir}"
mkdir -p "${tmpdir}"/{inodes,splitdebug,sources}
-# Usage: inode_var_name: <file>
-inode_file_link() {
- echo -n "${tmpdir}/inodes/"
- if [[ ${USERLAND} == "BSD" ]] ; then
- stat -f '%i' "$1"
- else
- stat -c '%i' "$1"
- fi
-}
-
# Usage: save_elf_sources <elf>
save_elf_sources() {
${FEATURES_installsources} || return 0
@@ -93,7 +109,6 @@ save_elf_sources() {
fi
local x=$1
- [[ -f $(inode_file_link "${x}") ]] && return 0
# since we're editing the ELF here, we should recompute the build-id
# (the -i flag below). save that output so we don't need to recompute
@@ -101,20 +116,22 @@ save_elf_sources() {
buildid=$(debugedit -i \
-b "${WORKDIR}" \
-d "${prepstrip_sources_dir}" \
- -l "${tmpdir}/sources/${x##*/}.${BASHPID}" \
+ -l "${tmpdir}/sources/${x##*/}.${BASHPID:-$(__bashpid)}" \
"${x}")
}
# Usage: save_elf_debug <elf> [splitdebug file]
save_elf_debug() {
${FEATURES_splitdebug} || return 0
+ ${RESTRICT_splitdebug} && return 0
# NOTE: Debug files must be installed in
# ${EPREFIX}/usr/lib/debug/${EPREFIX} (note that ${EPREFIX} occurs
# twice in this path) in order for gdb's debug-file-directory
# lookup to work correctly.
local x=$1
- local splitdebug=$2
+ local inode_debug=$2
+ local splitdebug=$3
local y=${ED}usr/lib/debug/${x:${#D}}.debug
# dont save debug info twice
@@ -122,9 +139,8 @@ save_elf_debug() {
mkdir -p "${y%/*}"
- local inode=$(inode_file_link "${x}")
- if [[ -f ${inode} ]] ; then
- ln "${inode}" "${y}"
+ if [ -f "${inode_debug}" ] ; then
+ ln "${inode_debug}" "${y}" || die "ln failed unexpectedly"
else
if [[ -n ${splitdebug} ]] ; then
mv "${splitdebug}" "${y}"
@@ -134,64 +150,89 @@ save_elf_debug() {
${OBJCOPY} ${objcopy_flags} "${x}" "${y}"
${OBJCOPY} --add-gnu-debuglink="${y}" "${x}"
fi
- local args="a-x,o-w"
- [[ -g ${x} || -u ${x} ]] && args+=",go-r"
- chmod ${args} "${y}"
- ln "${y}" "${inode}"
+ # Only do the following if the debug file was
+ # successfully created (see bug #446774).
+ if [ $? -eq 0 ] ; then
+ local args="a-x,o-w"
+ [[ -g ${x} || -u ${x} ]] && args+=",go-r"
+ chmod ${args} "${y}"
+ ln "${y}" "${inode_debug}" || die "ln failed unexpectedly"
+ fi
fi
# if we don't already have build-id from debugedit, look it up
if [[ -z ${buildid} ]] ; then
# convert the readelf output to something useful
- buildid=$(${READELF} -x .note.gnu.build-id "${x}" 2>/dev/null \
- | awk '$NF ~ /GNU/ { getline; printf $2$3$4$5; getline; print $2 }')
+ buildid=$(${READELF} -n "${x}" 2>/dev/null | awk '/Build ID:/{ print $NF; exit }')
fi
if [[ -n ${buildid} ]] ; then
local buildid_dir="${ED}usr/lib/debug/.build-id/${buildid:0:2}"
local buildid_file="${buildid_dir}/${buildid:2}"
mkdir -p "${buildid_dir}"
- ln -s "../../${x:${#D}}.debug" "${buildid_file}.debug"
- ln -s "/${x:${#D}}" "${buildid_file}"
+ [ -L "${buildid_file}".debug ] || ln -s "../../${x:${#D}}.debug" "${buildid_file}.debug"
+ [ -L "${buildid_file}" ] || ln -s "/${x:${#D}}" "${buildid_file}"
fi
}
# Usage: process_elf <elf>
process_elf() {
- local x=$1 strip_flags=${*:2}
-
- vecho " ${x:${#ED}}"
- save_elf_sources "${x}"
+ local x=$1 inode_link=$2 strip_flags=${*:3}
+ local already_stripped lockfile xt_data
+
+ __vecho " ${x:${#ED}}"
+
+ # If two processes try to debugedit or strip the same hardlink at the
+ # same time, it may corrupt files or cause loss of splitdebug info.
+ # So, use a lockfile to prevent interference (easily observed with
+ # dev-vcs/git which creates ~111 hardlinks to one file in
+ # /usr/libexec/git-core).
+ lockfile=${inode_link}_lockfile
+ if ! ln "${inode_link}" "${lockfile}" 2>/dev/null ; then
+ while [[ -f ${lockfile} ]] ; do
+ sleep 1
+ done
+ unset lockfile
+ fi
- if ${strip_this} ; then
+ [ -f "${inode_link}_stripped" ] && already_stripped=true || already_stripped=false
- # If two processes try to strip the same hardlink at the same
- # time, it will cause one of them to lose the splitdebug info.
- # So, use a lockfile to prevent interference (easily observed
- # with dev-vcs/git which creates ~109 hardlinks to one file in
- # /usr/libexec/git-core).
- local lockfile=$(inode_file_link "${x}")_lockfile
- if ! ln "${x}" "${lockfile}" ; then
- while [[ -f ${lockfile} ]] ; do
- sleep 1
- done
- unset lockfile
+ if ! ${already_stripped} ; then
+ if ${PRESERVE_XATTR} ; then
+ xt_data=$(dump_xattrs "${x}")
fi
+ save_elf_sources "${x}"
+ fi
+
+ if ${strip_this} ; then
# see if we can split & strip at the same time
if [[ -n ${SPLIT_STRIP_FLAGS} ]] ; then
local shortname="${x##*/}.debug"
- local splitdebug="${tmpdir}/splitdebug/${shortname}.${BASHPID}"
+ local splitdebug="${tmpdir}/splitdebug/${shortname}.${BASHPID:-$(__bashpid)}"
+ ${already_stripped} || \
${STRIP} ${strip_flags} \
-f "${splitdebug}" \
-F "${shortname}" \
"${x}"
- save_elf_debug "${x}" "${splitdebug}"
+ save_elf_debug "${x}" "${inode_link}_debug" "${splitdebug}"
else
- save_elf_debug "${x}"
+ save_elf_debug "${x}" "${inode_link}_debug"
+ ${already_stripped} || \
${STRIP} ${strip_flags} "${x}"
fi
- [[ -n ${lockfile} ]] && rm -f "${lockfile}"
fi
+
+ if ${already_stripped} ; then
+ rm -f "${x}" || die "rm failed unexpectedly"
+ ln "${inode_link}_stripped" "${x}" || die "ln failed unexpectedly"
+ else
+ ln "${x}" "${inode_link}_stripped" || die "ln failed unexpectedly"
+ if [[ ${xt_data} ]] ; then
+ restore_xattrs <<< "${xt_data}"
+ fi
+ fi
+
+ [[ -n ${lockfile} ]] && rm -f "${lockfile}"
}
# The existance of the section .symtab tells us that a binary is stripped.
@@ -204,7 +245,7 @@ if ! ${RESTRICT_binchecks} && ! ${RESTRICT_strip} ; then
log=${tmpdir}/scanelf-already-stripped.log
scanelf -yqRBF '#k%F' -k '!.symtab' "$@" | sed -e "s#^${ED}##" > "${log}"
(
- multijob_child_init
+ __multijob_child_init
qa_var="QA_PRESTRIPPED_${ARCH/-/_}"
[[ -n ${!qa_var} ]] && QA_PRESTRIPPED="${!qa_var}"
if [[ -n ${QA_PRESTRIPPED} && -s ${log} && \
@@ -219,28 +260,49 @@ if ! ${RESTRICT_binchecks} && ! ${RESTRICT_strip} ; then
fi
sed -e "/^\$/d" -e "s#^#/#" -i "${log}"
if [[ -s ${log} ]] ; then
- vecho -e "\n"
+ __vecho -e "\n"
eqawarn "QA Notice: Pre-stripped files found:"
eqawarn "$(<"${log}")"
else
rm -f "${log}"
fi
) &
- multijob_post_fork
+ __multijob_post_fork
+fi
+
+# Since strip creates a new inode, we need to know the initial set of
+# inodes in advance, so that we can avoid interference due to trying
+# to strip the same (hardlinked) file multiple times in parallel.
+# See bug #421099.
+if [[ ${USERLAND} == BSD ]] ; then
+ get_inode_number() { stat -f '%i' "$1"; }
+else
+ get_inode_number() { stat -c '%i' "$1"; }
fi
+cd "${tmpdir}/inodes" || die "cd failed unexpectedly"
+while read -r x ; do
+ inode_link=$(get_inode_number "${x}") || die "stat failed unexpectedly"
+ echo "${x}" >> "${inode_link}" || die "echo failed unexpectedly"
+done < <(
+ # Use sort -u to eliminate duplicates for bug #445336.
+ (
+ scanelf -yqRBF '#k%F' -k '.symtab' "$@"
+ find "$@" -type f ! -type l -name '*.a'
+ ) | LC_ALL=C sort -u
+)
# Now we look for unstripped binaries.
-for x in \
- $(scanelf -yqRBF '#k%F' -k '.symtab' "$@") \
- $(find "$@" -type f -name '*.a')
+for inode_link in $(shopt -s nullglob; echo *) ; do
+while read -r x
do
+
if ! ${banner} ; then
- vecho "strip: ${STRIP} ${PORTAGE_STRIP_FLAGS}"
+ __vecho "strip: ${STRIP} ${PORTAGE_STRIP_FLAGS}"
banner=true
fi
(
- multijob_child_init
+ __multijob_child_init
f=$(file "${x}") || exit 0
[[ -z ${f} ]] && exit 0
@@ -275,27 +337,34 @@ do
buildid=
if [[ ${f} == *"current ar archive"* ]] ; then
- vecho " ${x:${#ED}}"
+ __vecho " ${x:${#ED}}"
if ${strip_this} ; then
- # hmm, can we split debug/sources for .a ?
- ${STRIP} -g "${x}"
+ # If we have split debug enabled, then do not strip this.
+ # There is no concept of splitdebug for objects not yet
+ # linked in (only for finally linked ELFs), so we have to
+ # retain the debug info in the archive itself.
+ if ! ${FEATURES_splitdebug} || ${RESTRICT_splitdebug} ; then
+ ${STRIP} -g "${x}"
+ fi
fi
elif [[ ${f} == *"SB executable"* || ${f} == *"SB shared object"* ]] ; then
- process_elf "${x}" ${PORTAGE_STRIP_FLAGS}
+ process_elf "${x}" "${inode_link}" ${PORTAGE_STRIP_FLAGS}
elif [[ ${f} == *"SB relocatable"* ]] ; then
- process_elf "${x}" ${SAFE_STRIP_FLAGS}
+ process_elf "${x}" "${inode_link}" ${SAFE_STRIP_FLAGS}
fi
if ${was_not_writable} ; then
chmod u-w "${x}"
fi
) &
- multijob_post_fork
+ __multijob_post_fork
+
+done < "${inode_link}"
done
# With a bit more work, we could run the rsync processes below in
# parallel, but not sure that'd be an overall improvement.
-multijob_finish
+__multijob_finish
cd "${tmpdir}"/sources/ && cat * > "${tmpdir}/debug.sources" 2>/dev/null
if [[ -s ${tmpdir}/debug.sources ]] && \
@@ -303,11 +372,11 @@ if [[ -s ${tmpdir}/debug.sources ]] && \
! ${RESTRICT_installsources} && \
${debugedit_found}
then
- vecho "installsources: rsyncing source files"
+ __vecho "installsources: rsyncing source files"
[[ -d ${D}${prepstrip_sources_dir} ]] || mkdir -p "${D}${prepstrip_sources_dir}"
grep -zv '/<[^/>]*>$' "${tmpdir}"/debug.sources | \
(cd "${WORKDIR}"; LANG=C sort -z -u | \
- rsync -tL0 --files-from=- "${WORKDIR}/" "${D}${prepstrip_sources_dir}/" )
+ rsync -tL0 --chmod=ugo-st,a+r,go-w,Da+x,Fa-x --files-from=- "${WORKDIR}/" "${D}${prepstrip_sources_dir}/" )
# Preserve directory structure.
# Needed after running save_elf_sources.
diff --git a/bin/ebuild-helpers/unprivileged/chgrp b/bin/ebuild-helpers/unprivileged/chgrp
new file mode 120000
index 000000000..6fb0fcd80
--- /dev/null
+++ b/bin/ebuild-helpers/unprivileged/chgrp
@@ -0,0 +1 @@
+chown \ No newline at end of file
diff --git a/bin/ebuild-helpers/unprivileged/chown b/bin/ebuild-helpers/unprivileged/chown
new file mode 100755
index 000000000..08fa650c5
--- /dev/null
+++ b/bin/ebuild-helpers/unprivileged/chown
@@ -0,0 +1,41 @@
+#!/bin/bash
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+scriptpath=${BASH_SOURCE[0]}
+scriptname=${scriptpath##*/}
+
+IFS=':'
+
+for path in ${PATH}; do
+ [[ -x ${path}/${scriptname} ]] || continue
+ [[ ${path}/${scriptname} -ef ${scriptpath} ]] && continue
+ IFS=$' \t\n'
+ output=$("${path}/${scriptname}" "$@" 2>&1)
+ if [[ $? -ne 0 ]] ; then
+
+ # Avoid an extreme performance problem when the
+ # output is very long (bug #470992).
+ if [[ $(wc -l <<< "${output}") -gt 100 ]]; then
+ output=$(head -n100 <<< "${output}")
+ output="${output}\n ... (further messages truncated)"
+ fi
+
+ source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+ if ! ___eapi_has_prefix_variables; then
+ EPREFIX=
+ fi
+ msg="${scriptname} failure ignored with unprivileged user:\n ${scriptname} $*\n ${output}"
+ # Reverse expansion of ${D} and ${EPREFIX}, for readability.
+ msg=${msg//${D}/'${D}'}
+ if [[ -n ${EPREFIX} ]] ; then
+ msg=${msg//${EPREFIX}/'${EPREFIX}'}
+ msg=${msg//${EPREFIX#/}/'${EPREFIX}'}
+ fi
+ ewarn "${msg}"
+ fi
+ exit 0
+done
+
+exit 1
diff --git a/bin/ebuild-helpers/xattr/install b/bin/ebuild-helpers/xattr/install
new file mode 100755
index 000000000..f51f621bc
--- /dev/null
+++ b/bin/ebuild-helpers/xattr/install
@@ -0,0 +1,12 @@
+#!/bin/bash
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+PORTAGE_BIN_PATH=${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}
+PORTAGE_PYM_PATH=${PORTAGE_PYM_PATH:-/usr/lib/portage/pym}
+# Use safe cwd, avoiding unsafe import for bug #469338.
+export __PORTAGE_HELPER_CWD=${PWD}
+cd "${PORTAGE_PYM_PATH}"
+export __PORTAGE_HELPER_PATH=${BASH_SOURCE[0]}
+PYTHONPATH=${PORTAGE_PYTHONPATH:-${PORTAGE_PYM_PATH}} \
+ exec "${PORTAGE_PYTHON:-/usr/bin/python}" "${PORTAGE_BIN_PATH}/install.py" "$@"
diff --git a/bin/ebuild-ipc b/bin/ebuild-ipc
index 43e4a02ae..820005fbb 100755
--- a/bin/ebuild-ipc
+++ b/bin/ebuild-ipc
@@ -1,8 +1,10 @@
#!/bin/bash
-# Copyright 2010 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
PORTAGE_BIN_PATH=${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}
PORTAGE_PYM_PATH=${PORTAGE_PYM_PATH:-/usr/lib/portage/pym}
-PYTHONPATH=$PORTAGE_PYM_PATH${PYTHONPATH:+:}$PYTHONPATH \
+# Use safe cwd, avoiding unsafe import for bug #469338.
+cd "${PORTAGE_PYM_PATH}"
+PYTHONPATH=${PORTAGE_PYTHONPATH:-${PORTAGE_PYM_PATH}} \
exec "${PORTAGE_PYTHON:-/usr/bin/python}" "$PORTAGE_BIN_PATH/ebuild-ipc.py" "$@"
diff --git a/bin/ebuild-ipc.py b/bin/ebuild-ipc.py
index 3caf2d185..00337ee22 100755
--- a/bin/ebuild-ipc.py
+++ b/bin/ebuild-ipc.py
@@ -1,20 +1,17 @@
-#!/usr/bin/python
-# Copyright 2010-2012 Gentoo Foundation
+#!/usr/bin/python -b
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
#
# This is a helper which ebuild processes can use
# to communicate with portage's main python process.
-import errno
import logging
import os
import pickle
import platform
-import select
import signal
import sys
import time
-import traceback
def debug_signal(signum, frame):
import pdb
@@ -38,14 +35,28 @@ if os.environ.get("SANDBOX_ON") == "1":
":".join(filter(None, sandbox_write))
import portage
+portage._internal_caller = True
portage._disable_legacy_globals()
+from portage.util._async.ForkProcess import ForkProcess
+from portage.util._eventloop.global_event_loop import global_event_loop
+from _emerge.PipeReader import PipeReader
+
+class FifoWriter(ForkProcess):
+
+ __slots__ = ('buf', 'fifo',)
+
+ def _run(self):
+ # Atomically write the whole buffer into the fifo.
+ with open(self.fifo, 'wb', 0) as f:
+ f.write(self.buf)
+ return os.EX_OK
+
class EbuildIpc(object):
# Timeout for each individual communication attempt (we retry
# as long as the daemon process appears to be alive).
- _COMMUNICATE_RETRY_TIMEOUT_SECONDS = 15
- _BUFSIZE = 4096
+ _COMMUNICATE_RETRY_TIMEOUT_MS = 15000
def __init__(self):
self.fifo_dir = os.environ['PORTAGE_BUILDDIR']
@@ -89,7 +100,7 @@ class EbuildIpc(object):
'ebuild-ipc: daemon process not detected\n'),
level=logging.ERROR, noiselevel=-1)
- def _wait(self, pid, pr, msg):
+ def _run_writer(self, fifo_writer, msg):
"""
Wait on pid and return an appropriate exit code. This
may return unsuccessfully due to timeout if the daemon
@@ -98,88 +109,48 @@ class EbuildIpc(object):
start_time = time.time()
- while True:
- try:
- events = select.select([pr], [], [],
- self._COMMUNICATE_RETRY_TIMEOUT_SECONDS)
- except select.error as e:
- portage.util.writemsg_level(
- "ebuild-ipc: %s: %s\n" % \
- (portage.localization._('during select'), e),
- level=logging.ERROR, noiselevel=-1)
- continue
+ fifo_writer.start()
+ eof = fifo_writer.poll() is not None
- if events[0]:
- break
+ while not eof:
+ fifo_writer._wait_loop(timeout=self._COMMUNICATE_RETRY_TIMEOUT_MS)
- if self._daemon_is_alive():
+ eof = fifo_writer.poll() is not None
+ if eof:
+ break
+ elif self._daemon_is_alive():
self._timeout_retry_msg(start_time, msg)
else:
+ fifo_writer.cancel()
self._no_daemon_msg()
- try:
- os.kill(pid, signal.SIGKILL)
- os.waitpid(pid, 0)
- except OSError as e:
- portage.util.writemsg_level(
- "ebuild-ipc: %s\n" % (e,),
- level=logging.ERROR, noiselevel=-1)
+ fifo_writer.wait()
return 2
- try:
- wait_retval = os.waitpid(pid, 0)
- except OSError as e:
- portage.util.writemsg_level(
- "ebuild-ipc: %s: %s\n" % (msg, e),
- level=logging.ERROR, noiselevel=-1)
- return 2
+ return fifo_writer.wait()
- if not os.WIFEXITED(wait_retval[1]):
- portage.util.writemsg_level(
- "ebuild-ipc: %s: %s\n" % (msg,
- portage.localization._('subprocess failure: %s') % \
- wait_retval[1]),
- level=logging.ERROR, noiselevel=-1)
- return 2
+ def _receive_reply(self, input_fd):
- return os.WEXITSTATUS(wait_retval[1])
+ start_time = time.time()
- def _receive_reply(self, input_fd):
+ pipe_reader = PipeReader(input_files={"input_fd":input_fd},
+ scheduler=global_event_loop())
+ pipe_reader.start()
- # Timeouts are handled by the parent process, so just
- # block until input is available. For maximum portability,
- # use a single atomic read.
- buf = None
- while True:
- try:
- events = select.select([input_fd], [], [])
- except select.error as e:
- portage.util.writemsg_level(
- "ebuild-ipc: %s: %s\n" % \
- (portage.localization._('during select for read'), e),
- level=logging.ERROR, noiselevel=-1)
- continue
-
- if events[0]:
- # For maximum portability, use os.read() here since
- # array.fromfile() and file.read() are both known to
- # erroneously return an empty string from this
- # non-blocking fifo stream on FreeBSD (bug #337465).
- try:
- buf = os.read(input_fd, self._BUFSIZE)
- except OSError as e:
- if e.errno != errno.EAGAIN:
- portage.util.writemsg_level(
- "ebuild-ipc: %s: %s\n" % \
- (portage.localization._('read error'), e),
- level=logging.ERROR, noiselevel=-1)
- break
- # Assume that another event will be generated
- # if there's any relevant data.
- continue
-
- # Only one (atomic) read should be necessary.
- if buf:
- break
+ eof = pipe_reader.poll() is not None
+
+ while not eof:
+ pipe_reader._wait_loop(timeout=self._COMMUNICATE_RETRY_TIMEOUT_MS)
+ eof = pipe_reader.poll() is not None
+ if not eof:
+ if self._daemon_is_alive():
+ self._timeout_retry_msg(start_time,
+ portage.localization._('during read'))
+ else:
+ pipe_reader.cancel()
+ self._no_daemon_msg()
+ return 2
+
+ buf = pipe_reader.getvalue()
retval = 2
@@ -232,32 +203,9 @@ class EbuildIpc(object):
# un-interrupted, while the parent handles all timeout
# considerations. This helps to avoid possible race conditions
# from interference between timeouts and blocking IO operations.
- pr, pw = os.pipe()
- pid = os.fork()
-
- if pid == 0:
- retval = 2
- try:
- os.close(pr)
-
- # File streams are in unbuffered mode since we do atomic
- # read and write of whole pickles.
- output_file = open(self.ipc_in_fifo, 'wb', 0)
- output_file.write(pickle.dumps(args))
- output_file.close()
- retval = os.EX_OK
- except SystemExit:
- raise
- except:
- traceback.print_exc()
- finally:
- os._exit(retval)
-
- os.close(pw)
-
msg = portage.localization._('during write')
- retval = self._wait(pid, pr, msg)
- os.close(pr)
+ retval = self._run_writer(FifoWriter(buf=pickle.dumps(args),
+ fifo=self.ipc_in_fifo, scheduler=global_event_loop()), msg)
if retval != os.EX_OK:
portage.util.writemsg_level(
@@ -270,26 +218,7 @@ class EbuildIpc(object):
self._no_daemon_msg()
return 2
- pr, pw = os.pipe()
- pid = os.fork()
-
- if pid == 0:
- retval = 2
- try:
- os.close(pr)
- retval = self._receive_reply(input_fd)
- except SystemExit:
- raise
- except:
- traceback.print_exc()
- finally:
- os._exit(retval)
-
- os.close(pw)
- retval = self._wait(pid, pr, portage.localization._('during read'))
- os.close(pr)
- os.close(input_fd)
- return retval
+ return self._receive_reply(input_fd)
def ebuild_ipc_main(args):
ebuild_ipc = EbuildIpc()
diff --git a/bin/ebuild.sh b/bin/ebuild.sh
index 9829f68b3..be044e08d 100755
--- a/bin/ebuild.sh
+++ b/bin/ebuild.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
PORTAGE_BIN_PATH="${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"
@@ -21,21 +21,23 @@ else
# in global scope, even though they are completely useless during
# the "depend" phase.
for x in diropts docompress exeopts get_KV insopts \
- keepdir KV_major KV_micro KV_minor KV_to_int \
+ KV_major KV_micro KV_minor KV_to_int \
libopts register_die_hook register_success_hook \
- remove_path_entry set_unless_changed strip_duplicate_slashes \
- unset_unless_changed use_with use_enable ; do
+ __strip_duplicate_slashes \
+ use_with use_enable ; do
eval "${x}() {
- if has \"\${EAPI:-0}\" 4-python; then
+ if ___eapi_disallows_helpers_in_global_scope; then
die \"\${FUNCNAME}() calls are not allowed in global scope\"
fi
}"
done
- # These dummy functions return false in older EAPIs, in order to ensure that
+ # These dummy functions return false in non-strict EAPIs, in order to ensure that
# `use multislot` is false for the "depend" phase.
- for x in use useq usev ; do
+ funcs="use useq usev"
+ ___eapi_has_usex && funcs+=" usex"
+ for x in ${funcs} ; do
eval "${x}() {
- if has \"\${EAPI:-0}\" 4-python; then
+ if ___eapi_disallows_helpers_in_global_scope; then
die \"\${FUNCNAME}() calls are not allowed in global scope\"
else
return 1
@@ -44,10 +46,16 @@ else
done
# These functions die because calls to them during the "depend" phase
# are considered to be severe QA violations.
- for x in best_version has_version portageq ; do
+ funcs="best_version has_version portageq"
+ ___eapi_has_master_repositories && funcs+=" master_repositories"
+ ___eapi_has_repository_path && funcs+=" repository_path"
+ ___eapi_has_available_eclasses && funcs+=" available_eclasses"
+ ___eapi_has_eclass_path && funcs+=" eclass_path"
+ ___eapi_has_license_path && funcs+=" license_path"
+ for x in ${funcs} ; do
eval "${x}() { die \"\${FUNCNAME}() calls are not allowed in global scope\"; }"
done
- unset x
+ unset funcs x
fi
# Don't use sandbox's BASH_ENV for new shells because it does
@@ -66,7 +74,7 @@ export PORTAGE_BZIP2_COMMAND=${PORTAGE_BZIP2_COMMAND:-bzip2}
# with shell opts (shopts). Ebuilds/eclasses changing shopts should reset them
# when they are done.
-qa_source() {
+__qa_source() {
local shopts=$(shopt) OLDIFS="$IFS"
local retval
source "$@"
@@ -79,7 +87,7 @@ qa_source() {
return $retval
}
-qa_call() {
+__qa_call() {
local shopts=$(shopt) OLDIFS="$IFS"
local retval
"$@"
@@ -102,20 +110,19 @@ unset GZIP BZIP BZIP2 CDPATH GREP_OPTIONS GREP_COLOR GLOBIGNORE
[[ $PORTAGE_QUIET != "" ]] && export PORTAGE_QUIET
# sandbox support functions; defined prior to profile.bashrc srcing, since the profile might need to add a default exception (/usr/lib64/conftest fex)
-_sb_append_var() {
+__sb_append_var() {
local _v=$1 ; shift
local var="SANDBOX_${_v}"
- [[ -z $1 || -n $2 ]] && die "Usage: add$(echo ${_v} | \
- LC_ALL=C tr [:upper:] [:lower:]) <colon-delimited list of paths>"
+ [[ -z $1 || -n $2 ]] && die "Usage: add$(LC_ALL=C tr "[:upper:]" "[:lower:]" <<< "${_v}") <colon-delimited list of paths>"
export ${var}="${!var:+${!var}:}$1"
}
# bash-4 version:
# local var="SANDBOX_${1^^}"
-# addread() { _sb_append_var ${0#add} "$@" ; }
-addread() { _sb_append_var READ "$@" ; }
-addwrite() { _sb_append_var WRITE "$@" ; }
-adddeny() { _sb_append_var DENY "$@" ; }
-addpredict() { _sb_append_var PREDICT "$@" ; }
+# addread() { __sb_append_var ${0#add} "$@" ; }
+addread() { __sb_append_var READ "$@" ; }
+addwrite() { __sb_append_var WRITE "$@" ; }
+adddeny() { __sb_append_var DENY "$@" ; }
+addpredict() { __sb_append_var PREDICT "$@" ; }
addwrite "${PORTAGE_TMPDIR}"
addread "/:${PORTAGE_TMPDIR}"
@@ -136,19 +143,11 @@ fi
# the sandbox is disabled by default except when overridden in the relevant stages
export SANDBOX_ON=0
-esyslog() {
- # Custom version of esyslog() to take care of the "Red Star" bug.
- # MUST follow functions.sh to override the "" parameter problem.
- return 0
-}
-
# Ensure that $PWD is sane whenever possible, to protect against
# exploitation of insecure search path for python -c in ebuilds.
-# See bug #239560.
-if ! has "$EBUILD_PHASE" clean cleanrm depend help ; then
- cd "$PORTAGE_BUILDDIR" || \
- die "PORTAGE_BUILDDIR does not exist: '$PORTAGE_BUILDDIR'"
-fi
+# See bug #239560 and bug #469338.
+cd "${PORTAGE_PYM_PATH}" || \
+ die "PORTAGE_PYM_PATH does not exist: '${PORTAGE_PYM_PATH}'"
#if no perms are specified, dirs/files will have decent defaults
#(not secretive, but not stupid)
@@ -178,8 +177,8 @@ debug-print() {
# default target
printf '%s\n' "${@}" >> "${T}/eclass-debug.log"
# let the portage user own/write to this file
- chgrp portage "${T}/eclass-debug.log" &>/dev/null
- chmod g+w "${T}/eclass-debug.log" &>/dev/null
+ chgrp "${PORTAGE_GRPNAME:-portage}" "${T}/eclass-debug.log"
+ chmod g+w "${T}/eclass-debug.log"
fi
}
@@ -208,8 +207,9 @@ inherit() {
| fmt -w 75 | while read -r ; do eqawarn "$REPLY" ; done
fi
+ local repo_location
local location
- local olocation
+ local potential_location
local x
# These variables must be restored before returning.
@@ -221,9 +221,10 @@ inherit() {
local B_DEPEND
local B_RDEPEND
local B_PDEPEND
+ local B_HDEPEND
while [ "$1" ]; do
- location="${ECLASSDIR}/${1}.eclass"
- olocation=""
+ location=""
+ potential_location=""
export ECLASS="$1"
__export_funcs_var=__export_functions_$ECLASS_DEPTH
@@ -244,43 +245,36 @@ inherit() {
fi
fi
- # any future resolution code goes here
- if [ -n "$PORTDIR_OVERLAY" ]; then
- local overlay
- for overlay in ${PORTDIR_OVERLAY}; do
- olocation="${overlay}/eclass/${1}.eclass"
- if [ -e "$olocation" ]; then
- location="${olocation}"
- debug-print " eclass exists: ${location}"
- fi
- done
- fi
+ for repo_location in "${PORTAGE_ECLASS_LOCATIONS[@]}"; do
+ potential_location="${repo_location}/eclass/${1}.eclass"
+ if [[ -f ${potential_location} ]]; then
+ location="${potential_location}"
+ debug-print " eclass exists: ${location}"
+ break
+ fi
+ done
debug-print "inherit: $1 -> $location"
- [ ! -e "$location" ] && die "${1}.eclass could not be found by inherit()"
-
- if [ "${location}" == "${olocation}" ] && \
- ! has "${location}" ${EBUILD_OVERLAY_ECLASSES} ; then
- EBUILD_OVERLAY_ECLASSES="${EBUILD_OVERLAY_ECLASSES} ${location}"
- fi
+ [[ -z ${location} ]] && die "${1}.eclass could not be found by inherit()"
- #We need to back up the value of DEPEND and RDEPEND to B_DEPEND and B_RDEPEND
+ #We need to back up the values of *DEPEND to B_*DEPEND
#(if set).. and then restore them after the inherit call.
#turn off glob expansion
set -f
# Retain the old data and restore it later.
- unset B_IUSE B_REQUIRED_USE B_DEPEND B_RDEPEND B_PDEPEND
+ unset B_IUSE B_REQUIRED_USE B_DEPEND B_RDEPEND B_PDEPEND B_HDEPEND
[ "${IUSE+set}" = set ] && B_IUSE="${IUSE}"
[ "${REQUIRED_USE+set}" = set ] && B_REQUIRED_USE="${REQUIRED_USE}"
[ "${DEPEND+set}" = set ] && B_DEPEND="${DEPEND}"
[ "${RDEPEND+set}" = set ] && B_RDEPEND="${RDEPEND}"
[ "${PDEPEND+set}" = set ] && B_PDEPEND="${PDEPEND}"
- unset IUSE REQUIRED_USE DEPEND RDEPEND PDEPEND
+ [ "${HDEPEND+set}" = set ] && B_HDEPEND="${HDEPEND}"
+ unset IUSE REQUIRED_USE DEPEND RDEPEND PDEPEND HDEPEND
#turn on glob expansion
set +f
- qa_source "$location" || die "died sourcing $location in inherit()"
+ __qa_source "$location" || die "died sourcing $location in inherit()"
#turn off glob expansion
set -f
@@ -292,6 +286,7 @@ inherit() {
[ "${DEPEND+set}" = set ] && E_DEPEND+="${E_DEPEND:+ }${DEPEND}"
[ "${RDEPEND+set}" = set ] && E_RDEPEND+="${E_RDEPEND:+ }${RDEPEND}"
[ "${PDEPEND+set}" = set ] && E_PDEPEND+="${E_PDEPEND:+ }${PDEPEND}"
+ [ "${HDEPEND+set}" = set ] && E_HDEPEND+="${E_HDEPEND:+ }${HDEPEND}"
[ "${B_IUSE+set}" = set ] && IUSE="${B_IUSE}"
[ "${B_IUSE+set}" = set ] || unset IUSE
@@ -308,6 +303,9 @@ inherit() {
[ "${B_PDEPEND+set}" = set ] && PDEPEND="${B_PDEPEND}"
[ "${B_PDEPEND+set}" = set ] || unset PDEPEND
+ [ "${B_HDEPEND+set}" = set ] && HDEPEND="${B_HDEPEND}"
+ [ "${B_HDEPEND+set}" = set ] || unset HDEPEND
+
#turn on glob expansion
set +f
@@ -348,7 +346,7 @@ EXPORT_FUNCTIONS() {
PORTAGE_BASHRCS_SOURCED=0
-# @FUNCTION: source_all_bashrcs
+# @FUNCTION: __source_all_bashrcs
# @DESCRIPTION:
# Source a relevant bashrc files and perform other miscellaneous
# environment initialization when appropriate.
@@ -359,7 +357,7 @@ PORTAGE_BASHRCS_SOURCED=0
# * A "default" function which is an alias for the default phase
# function for the current phase.
#
-source_all_bashrcs() {
+__source_all_bashrcs() {
[[ $PORTAGE_BASHRCS_SOURCED = 1 ]] && return 0
PORTAGE_BASHRCS_SOURCED=1
local x
@@ -373,7 +371,7 @@ source_all_bashrcs() {
local path_array=($PROFILE_PATHS)
restore_IFS
for x in "${path_array[@]}" ; do
- [ -f "$x/profile.bashrc" ] && qa_source "$x/profile.bashrc"
+ [ -f "$x/profile.bashrc" ] && __qa_source "$x/profile.bashrc"
done
fi
@@ -390,7 +388,7 @@ source_all_bashrcs() {
if [[ $EBUILD_PHASE != depend ]] ; then
# The user's bashrc is the ONLY non-portage bit of code that can
# change shopts without a QA violation.
- for x in "${PM_EBUILD_HOOK_DIR}"/${CATEGORY}/{${PN},${PN}:${SLOT},${P},${PF}}; do
+ for x in "${PM_EBUILD_HOOK_DIR}"/${CATEGORY}/{${PN},${PN}:${SLOT%/*},${P},${PF}}; do
if [ -r "${x}" ]; then
# If $- contains x, then tracing has already been enabled
# elsewhere for some reason. We preserve it's state so as
@@ -470,7 +468,7 @@ if [[ -n ${QA_INTERCEPTORS} ]] ; then
fi
# Subshell/helper die support (must export for the die helper).
-export EBUILD_MASTER_PID=$BASHPID
+export EBUILD_MASTER_PID=${BASHPID:-$(__bashpid)}
trap 'exit 1' SIGTERM
if ! has "$EBUILD_PHASE" clean cleanrm depend && \
@@ -479,7 +477,7 @@ if ! has "$EBUILD_PHASE" clean cleanrm depend && \
# may have come from another version of ebuild.sh or something.
# In any case, preprocess it to prevent any potential interference.
# NOTE: export ${FOO}=... requires quoting, unlike normal exports
- preprocess_ebuild_env || \
+ __preprocess_ebuild_env || \
die "error processing environment"
# Colon separated SANDBOX_* variables need to be cumulative.
for x in SANDBOX_DENY SANDBOX_READ SANDBOX_PREDICT SANDBOX_WRITE ; do
@@ -512,17 +510,22 @@ if ! has "$EBUILD_PHASE" clean cleanrm depend && \
[[ -n $EAPI ]] || EAPI=0
fi
-if has "${EAPI:-0}" 4-python; then
+if ___eapi_enables_globstar; then
shopt -s globstar
fi
+# Convert quoted paths to array.
+eval "PORTAGE_ECLASS_LOCATIONS=(${PORTAGE_ECLASS_LOCATIONS})"
+
+# Source the ebuild every time for FEATURES=noauto, so that ebuild
+# modifications take effect immediately.
if ! has "$EBUILD_PHASE" clean cleanrm ; then
if [[ $EBUILD_PHASE = depend || ! -f $T/environment || \
- -f $PORTAGE_BUILDDIR/.ebuild_changed ]] || \
- has noauto $FEATURES ; then
+ -f $PORTAGE_BUILDDIR/.ebuild_changed || \
+ " ${FEATURES} " == *" noauto "* ]] ; then
# The bashrcs get an opportunity here to set aliases that will be expanded
# during sourcing of ebuilds and eclasses.
- source_all_bashrcs
+ __source_all_bashrcs
# When EBUILD_PHASE != depend, INHERITED comes pre-initialized
# from cache. In order to make INHERITED content independent of
@@ -534,8 +537,9 @@ if ! has "$EBUILD_PHASE" clean cleanrm ; then
# In order to ensure correct interaction between ebuilds and
# eclasses, they need to be unset before this process of
# interaction begins.
- unset EAPI DEPEND RDEPEND PDEPEND INHERITED IUSE REQUIRED_USE \
- ECLASS E_IUSE E_REQUIRED_USE E_DEPEND E_RDEPEND E_PDEPEND
+ unset EAPI DEPEND RDEPEND PDEPEND HDEPEND INHERITED IUSE REQUIRED_USE \
+ ECLASS E_IUSE E_REQUIRED_USE E_DEPEND E_RDEPEND E_PDEPEND \
+ E_HDEPEND
if [[ $PORTAGE_DEBUG != 1 || ${-/x/} != $- ]] ; then
source "$EBUILD" || die "error sourcing ebuild"
@@ -556,7 +560,7 @@ if ! has "$EBUILD_PHASE" clean cleanrm ; then
# export EAPI for helpers (especially since we unset it above)
export EAPI
- if has "$EAPI" 0 1 2 3 3_pre2 ; then
+ if ___eapi_has_RDEPEND_DEPEND_fallback; then
export RDEPEND=${RDEPEND-${DEPEND}}
debug-print "RDEPEND: not set... Setting to: ${DEPEND}"
fi
@@ -566,19 +570,20 @@ if ! has "$EBUILD_PHASE" clean cleanrm ; then
DEPEND+="${DEPEND:+ }${E_DEPEND}"
RDEPEND+="${RDEPEND:+ }${E_RDEPEND}"
PDEPEND+="${PDEPEND:+ }${E_PDEPEND}"
+ HDEPEND+="${HDEPEND:+ }${E_HDEPEND}"
REQUIRED_USE+="${REQUIRED_USE:+ }${E_REQUIRED_USE}"
- unset ECLASS E_IUSE E_REQUIRED_USE E_DEPEND E_RDEPEND E_PDEPEND \
+ unset ECLASS E_IUSE E_REQUIRED_USE E_DEPEND E_RDEPEND E_PDEPEND E_HDEPEND \
__INHERITED_QA_CACHE
# alphabetically ordered by $EBUILD_PHASE value
- case "$EAPI" in
+ case ${EAPI} in
0|1)
_valid_phases="src_compile pkg_config pkg_info src_install
pkg_nofetch pkg_postinst pkg_postrm pkg_preinst pkg_prerm
pkg_setup src_test src_unpack"
;;
- 2|3|3_pre2)
+ 2|3)
_valid_phases="src_compile pkg_config src_configure pkg_info
src_install pkg_nofetch pkg_postinst pkg_postrm pkg_preinst
src_prepare pkg_prerm pkg_setup src_test src_unpack"
@@ -670,9 +675,13 @@ if [[ $EBUILD_PHASE = depend ]] ; then
auxdbkeys="DEPEND RDEPEND SLOT SRC_URI RESTRICT HOMEPAGE LICENSE
DESCRIPTION KEYWORDS INHERITED IUSE REQUIRED_USE PDEPEND PROVIDE EAPI
- PROPERTIES DEFINED_PHASES UNUSED_05 UNUSED_04
+ PROPERTIES DEFINED_PHASES HDEPEND UNUSED_04
UNUSED_03 UNUSED_02 UNUSED_01"
+ if ! ___eapi_has_HDEPEND; then
+ unset HDEPEND
+ fi
+
# The extra $(echo) commands remove newlines.
if [ -n "${dbkey}" ] ; then
> "${dbkey}"
@@ -681,31 +690,28 @@ if [[ $EBUILD_PHASE = depend ]] ; then
done
else
for f in ${auxdbkeys} ; do
- echo $(echo ${!f}) 1>&9 || exit $?
+ eval "echo \$(echo \${!f}) 1>&${PORTAGE_PIPE_FD}" || exit $?
done
- exec 9>&-
+ eval "exec ${PORTAGE_PIPE_FD}>&-"
fi
set +f
else
- # Note: readonly variables interfere with preprocess_ebuild_env(), so
+ # Note: readonly variables interfere with __preprocess_ebuild_env(), so
# declare them only after it has already run.
declare -r $PORTAGE_READONLY_METADATA $PORTAGE_READONLY_VARS
- case "$EAPI" in
- 0|1|2)
- [[ " ${FEATURES} " == *" force-prefix "* ]] && \
- declare -r ED EPREFIX EROOT
- ;;
- *)
- declare -r ED EPREFIX EROOT
- ;;
- esac
+ if ___eapi_has_prefix_variables; then
+ declare -r ED EPREFIX EROOT
+ fi
if [[ -n $EBUILD_SH_ARGS ]] ; then
(
# Don't allow subprocesses to inherit the pipe which
# emerge uses to monitor ebuild.sh.
- exec 9>&-
- ebuild_main ${EBUILD_SH_ARGS}
+ if [[ -n ${PORTAGE_PIPE_FD} ]] ; then
+ eval "exec ${PORTAGE_PIPE_FD}>&-"
+ unset PORTAGE_PIPE_FD
+ fi
+ __ebuild_main ${EBUILD_SH_ARGS}
exit 0
)
exit $?
diff --git a/bin/egencache b/bin/egencache
index a75a34172..c14be936b 100755
--- a/bin/egencache
+++ b/bin/egencache
@@ -1,15 +1,17 @@
-#!/usr/bin/python
-# Copyright 2009-2012 Gentoo Foundation
+#!/usr/bin/python -b
+# Copyright 2009-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-from __future__ import print_function
+# unicode_literals for compat with TextIOWrapper in Python 2
+from __future__ import print_function, unicode_literals
+import platform
import signal
import sys
# This block ensures that ^C interrupts are handled quietly.
try:
- def exithandler(signum,frame):
+ def exithandler(signum, _frame):
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
sys.exit(128 + signum)
@@ -20,26 +22,39 @@ try:
except KeyboardInterrupt:
sys.exit(128 + signal.SIGINT)
+def debug_signal(_signum, _frame):
+ import pdb
+ pdb.set_trace()
+
+if platform.python_implementation() == 'Jython':
+ debug_signum = signal.SIGUSR2 # bug #424259
+else:
+ debug_signum = signal.SIGUSR1
+
+signal.signal(debug_signum, debug_signal)
+
import io
import logging
-import optparse
import subprocess
import time
import textwrap
import re
-try:
- import portage
-except ImportError:
- from os import path as osp
- sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
- import portage
-
+from os import path as osp
+pym_path = osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym")
+sys.path.insert(0, pym_path)
+import portage
+portage._internal_caller = True
from portage import os, _encodings, _unicode_encode, _unicode_decode
from _emerge.MetadataRegen import MetadataRegen
from portage.cache.cache_errors import CacheError, StatCollision
+from portage.const import TIMESTAMP_FORMAT
from portage.manifest import guessManifestFileType
+from portage.package.ebuild._parallel_manifest.ManifestScheduler import ManifestScheduler
from portage.util import cmp_sort_key, writemsg_level
+from portage.util._argparse import ArgumentParser
+from portage.util._async.run_main_scheduler import run_main_scheduler
+from portage.util._eventloop.global_event_loop import global_event_loop
from portage import cpv_getkey
from portage.dep import Atom, isjustname
from portage.versions import pkgsplit, vercmp
@@ -59,72 +74,98 @@ else:
from repoman.utilities import FindVCS
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
long = int
def parse_args(args):
usage = "egencache [options] <action> ... [atom] ..."
- parser = optparse.OptionParser(usage=usage)
+ parser = ArgumentParser(usage=usage)
- actions = optparse.OptionGroup(parser, 'Actions')
- actions.add_option("--update",
+ actions = parser.add_argument_group('Actions')
+ actions.add_argument("--update",
action="store_true",
- help="update metadata/cache/ (generate as necessary)")
- actions.add_option("--update-use-local-desc",
+ help="update metadata/md5-cache/ (generate as necessary)")
+ actions.add_argument("--update-use-local-desc",
action="store_true",
help="update the use.local.desc file from metadata.xml")
- actions.add_option("--update-changelogs",
+ actions.add_argument("--update-changelogs",
action="store_true",
help="update the ChangeLog files from SCM logs")
- parser.add_option_group(actions)
+ actions.add_argument("--update-manifests",
+ action="store_true",
+ help="update manifests")
- common = optparse.OptionGroup(parser, 'Common options')
- common.add_option("--repo",
+ common = parser.add_argument_group('Common options')
+ common.add_argument("--repo",
action="store",
- help="name of repo to operate on (default repo is located at $PORTDIR)")
- common.add_option("--config-root",
+ help="name of repo to operate on")
+ common.add_argument("--config-root",
help="location of portage config files",
dest="portage_configroot")
- common.add_option("--portdir",
- help="override the portage tree location",
+ common.add_argument("--gpg-dir",
+ help="override the PORTAGE_GPG_DIR variable",
+ dest="gpg_dir")
+ common.add_argument("--gpg-key",
+ help="override the PORTAGE_GPG_KEY variable",
+ dest="gpg_key")
+ common.add_argument("--portdir",
+ help="override the PORTDIR variable (deprecated in favor of --repositories-configuration)",
dest="portdir")
- common.add_option("--portdir-overlay",
- help="override the PORTDIR_OVERLAY variable (requires that --repo is also specified)",
+ common.add_argument("--portdir-overlay",
+ help="override the PORTDIR_OVERLAY variable (deprecated in favor of --repositories-configuration)",
dest="portdir_overlay")
- common.add_option("--tolerant",
+ common.add_argument("--repositories-configuration",
+ help="override configuration of repositories (in format of repos.conf)",
+ dest="repositories_configuration")
+ common.add_argument("--sign-manifests",
+ choices=('y', 'n'),
+ metavar="<y|n>",
+ help="manually override layout.conf sign-manifests setting")
+ common.add_argument("--strict-manifests",
+ choices=('y', 'n'),
+ metavar="<y|n>",
+ help="manually override \"strict\" FEATURES setting")
+ common.add_argument("--thin-manifests",
+ choices=('y', 'n'),
+ metavar="<y|n>",
+ help="manually override layout.conf thin-manifests setting")
+ common.add_argument("--tolerant",
action="store_true",
help="exit successfully if only minor errors occurred")
- common.add_option("--ignore-default-opts",
+ common.add_argument("--ignore-default-opts",
action="store_true",
help="do not use the EGENCACHE_DEFAULT_OPTS environment variable")
- parser.add_option_group(common)
+ common.add_argument("--write-timestamp",
+ action="store_true",
+ help="write metadata/timestamp.chk as required for rsync repositories")
- update = optparse.OptionGroup(parser, '--update options')
- update.add_option("--cache-dir",
+ update = parser.add_argument_group('--update options')
+ update.add_argument("--cache-dir",
help="location of the metadata cache",
dest="cache_dir")
- update.add_option("--jobs",
+ update.add_argument("-j", "--jobs",
+ type=int,
action="store",
help="max ebuild processes to spawn")
- update.add_option("--load-average",
+ update.add_argument("--load-average",
+ type=float,
action="store",
help="max load allowed when spawning multiple jobs",
dest="load_average")
- update.add_option("--rsync",
+ update.add_argument("--rsync",
action="store_true",
help="enable rsync stat collision workaround " + \
"for bug 139134 (use with --update)")
- parser.add_option_group(update)
- uld = optparse.OptionGroup(parser, '--update-use-local-desc options')
- uld.add_option("--preserve-comments",
+ uld = parser.add_argument_group('--update-use-local-desc options')
+ uld.add_argument("--preserve-comments",
action="store_true",
help="preserve the comments from the existing use.local.desc file")
- uld.add_option("--use-local-desc-output",
+ uld.add_argument("--use-local-desc-output",
help="output file for use.local.desc data (or '-' for stdout)",
dest="uld_output")
- parser.add_option_group(uld)
- options, args = parser.parse_args(args)
+ options, args = parser.parse_known_args(args)
if options.jobs:
jobs = None
@@ -171,9 +212,12 @@ def parse_args(args):
parser.error("Write access denied: --cache-dir='%s'" % \
(options.cache_dir,))
- if options.portdir_overlay is not None and \
- options.repo is None:
- parser.error("--portdir-overlay option requires --repo option")
+ if options.portdir is not None:
+ writemsg_level("egencache: warning: --portdir option is deprecated in favor of --repositories-configuration option\n",
+ level=logging.WARNING, noiselevel=-1)
+ if options.portdir_overlay is not None:
+ writemsg_level("egencache: warning: --portdir-overlay option is deprecated in favor of --repositories-configuration option\n",
+ level=logging.WARNING, noiselevel=-1)
for atom in args:
try:
@@ -215,9 +259,11 @@ class GenCache(object):
else:
self._cp_set = None
self._cp_missing = set()
+ write_auxdb = "metadata-transfer" in portdb.settings.features
self._regen = MetadataRegen(portdb, cp_iter=cp_iter,
consumer=self._metadata_callback,
- max_jobs=max_jobs, max_load=max_load)
+ max_jobs=max_jobs, max_load=max_load,
+ write_auxdb=write_auxdb, main=True)
self.returncode = os.EX_OK
conf = portdb.repositories.get_repo_for_location(tree)
self._trg_caches = tuple(conf.iter_pregenerated_caches(
@@ -255,98 +301,74 @@ class GenCache(object):
def _write_cache(self, trg_cache, cpv, repo_path, metadata, ebuild_hash):
- if not hasattr(trg_cache, 'raise_stat_collision'):
- # This cache does not avoid redundant writes automatically,
- # so check for an identical existing entry before writing.
- # This prevents unnecessary disk writes and can also prevent
- # unnecessary rsync transfers.
- try:
- dest = trg_cache[cpv]
- except (KeyError, CacheError):
- pass
- else:
- if trg_cache.validate_entry(dest,
- ebuild_hash, self._eclass_db):
- identical = True
- for k in self._auxdbkeys:
- if dest.get(k, '') != metadata.get(k, ''):
- identical = False
- break
- if identical:
- return
+ if not hasattr(trg_cache, 'raise_stat_collision'):
+ # This cache does not avoid redundant writes automatically,
+ # so check for an identical existing entry before writing.
+ # This prevents unnecessary disk writes and can also prevent
+ # unnecessary rsync transfers.
+ try:
+ dest = trg_cache[cpv]
+ except (KeyError, CacheError):
+ pass
+ else:
+ if trg_cache.validate_entry(dest,
+ ebuild_hash, self._eclass_db):
+ identical = True
+ for k in self._auxdbkeys:
+ if dest.get(k, '') != metadata.get(k, ''):
+ identical = False
+ break
+ if identical:
+ return
+ try:
+ chf = trg_cache.validation_chf
+ metadata['_%s_' % chf] = getattr(ebuild_hash, chf)
try:
- chf = trg_cache.validation_chf
- metadata['_%s_' % chf] = getattr(ebuild_hash, chf)
+ trg_cache[cpv] = metadata
+ except StatCollision as sc:
+ # If the content of a cache entry changes and neither the
+ # file mtime nor size changes, it will prevent rsync from
+ # detecting changes. Cache backends may raise this
+ # exception from _setitem() if they detect this type of stat
+ # collision. These exceptions are handled by bumping the
+ # mtime on the ebuild (and the corresponding cache entry).
+ # See bug #139134. It is convenient to include checks for
+ # redundant writes along with the internal StatCollision
+ # detection code, so for caches with the
+ # raise_stat_collision attribute, we do not need to
+ # explicitly check for redundant writes like we do for the
+ # other cache types above.
+ max_mtime = sc.mtime
+ for _ec, ec_hash in metadata['_eclasses_'].items():
+ if max_mtime < ec_hash.mtime:
+ max_mtime = ec_hash.mtime
+ if max_mtime == sc.mtime:
+ max_mtime += 1
+ max_mtime = long(max_mtime)
try:
+ os.utime(ebuild_hash.location, (max_mtime, max_mtime))
+ except OSError as e:
+ self.returncode |= 1
+ writemsg_level(
+ "%s writing target: %s\n" % (cpv, e),
+ level=logging.ERROR, noiselevel=-1)
+ else:
+ ebuild_hash.mtime = max_mtime
+ metadata['_mtime_'] = max_mtime
trg_cache[cpv] = metadata
- except StatCollision as sc:
- # If the content of a cache entry changes and neither the
- # file mtime nor size changes, it will prevent rsync from
- # detecting changes. Cache backends may raise this
- # exception from _setitem() if they detect this type of stat
- # collision. These exceptions are handled by bumping the
- # mtime on the ebuild (and the corresponding cache entry).
- # See bug #139134. It is convenient to include checks for
- # redundant writes along with the internal StatCollision
- # detection code, so for caches with the
- # raise_stat_collision attribute, we do not need to
- # explicitly check for redundant writes like we do for the
- # other cache types above.
- max_mtime = sc.mtime
- for ec, ec_hash in metadata['_eclasses_'].items():
- if max_mtime < ec_hash.mtime:
- max_mtime = ec_hash.mtime
- if max_mtime == sc.mtime:
- max_mtime += 1
- max_mtime = long(max_mtime)
- try:
- os.utime(ebuild_hash.location, (max_mtime, max_mtime))
- except OSError as e:
- self.returncode |= 1
- writemsg_level(
- "%s writing target: %s\n" % (cpv, e),
- level=logging.ERROR, noiselevel=-1)
- else:
- ebuild_hash.mtime = max_mtime
- metadata['_mtime_'] = max_mtime
- trg_cache[cpv] = metadata
- self._portdb.auxdb[repo_path][cpv] = metadata
+ self._portdb.auxdb[repo_path][cpv] = metadata
- except CacheError as ce:
- self.returncode |= 1
- writemsg_level(
- "%s writing target: %s\n" % (cpv, ce),
- level=logging.ERROR, noiselevel=-1)
+ except CacheError as ce:
+ self.returncode |= 1
+ writemsg_level(
+ "%s writing target: %s\n" % (cpv, ce),
+ level=logging.ERROR, noiselevel=-1)
def run(self):
-
- received_signal = []
-
- def sighandler(signum, frame):
- signal.signal(signal.SIGINT, signal.SIG_IGN)
- signal.signal(signal.SIGTERM, signal.SIG_IGN)
- self._regen.terminate()
- received_signal.append(128 + signum)
-
- earlier_sigint_handler = signal.signal(signal.SIGINT, sighandler)
- earlier_sigterm_handler = signal.signal(signal.SIGTERM, sighandler)
-
- try:
- self._regen.run()
- finally:
- # Restore previous handlers
- if earlier_sigint_handler is not None:
- signal.signal(signal.SIGINT, earlier_sigint_handler)
- else:
- signal.signal(signal.SIGINT, signal.SIG_DFL)
- if earlier_sigterm_handler is not None:
- signal.signal(signal.SIGTERM, earlier_sigterm_handler)
- else:
- signal.signal(signal.SIGTERM, signal.SIG_DFL)
-
- if received_signal:
- sys.exit(received_signal[0])
+ signum = run_main_scheduler(self._regen)
+ if signum is not None:
+ sys.exit(128 + signum)
self.returncode |= self._regen.returncode
@@ -371,8 +393,8 @@ class GenCache(object):
self.returncode |= 1
writemsg_level(
"Error listing cache entries for " + \
- "'%s/metadata/cache': %s, continuing...\n" % \
- (self._portdb.porttree_root, ce),
+ "'%s': %s, continuing...\n" % \
+ (trg_cache.location, ce),
level=logging.ERROR, noiselevel=-1)
else:
@@ -393,8 +415,8 @@ class GenCache(object):
self.returncode |= 1
writemsg_level(
"Error listing cache entries for " + \
- "'%s/metadata/cache': %s, continuing...\n" % \
- (self._portdb.porttree_root, ce),
+ "'%s': %s, continuing...\n" % \
+ (trg_cache.location, ce),
level=logging.ERROR, noiselevel=-1)
if cp_missing:
@@ -436,7 +458,7 @@ class GenUseLocalDesc(object):
self._portdb = portdb
self._output = output
self._preserve_comments = preserve_comments
-
+
def run(self):
repo_path = self._portdb.porttrees[0]
ops = {'<':0, '<=':1, '=':2, '>=':3, '>':4}
@@ -509,14 +531,14 @@ class GenUseLocalDesc(object):
encoding=_encodings['fs'], errors='strict'),
mode='a', encoding=_encodings['repo.content'],
errors='backslashreplace')
- output.write(_unicode_decode('\n'))
+ output.write('\n')
else:
- output.write(textwrap.dedent(_unicode_decode('''\
+ output.write(textwrap.dedent('''\
# This file is deprecated as per GLEP 56 in favor of metadata.xml. Please add
# your descriptions to your package's metadata.xml ONLY.
# * generated automatically using egencache *
- ''')))
+ '''))
# The cmp function no longer exists in python3, so we'll
# implement our own here under a slightly different name
@@ -544,7 +566,8 @@ class GenUseLocalDesc(object):
for cp in self._portdb.cp_all():
metadata_path = os.path.join(repo_path, cp, 'metadata.xml')
try:
- metadata = ElementTree.parse(metadata_path,
+ metadata = ElementTree.parse(_unicode_encode(metadata_path,
+ encoding=_encodings['fs'], errors='strict'),
parser=ElementTree.XMLParser(
target=_MetadataTreeBuilder()))
except IOError:
@@ -600,8 +623,7 @@ class GenUseLocalDesc(object):
resatoms = sorted(reskeys, key=cmp_sort_key(atomcmp))
resdesc = resdict[reskeys[resatoms[-1]]]
- output.write(_unicode_decode(
- '%s:%s - %s\n' % (cp, flag, resdesc)))
+ output.write('%s:%s - %s\n' % (cp, flag, resdesc))
output.close()
@@ -623,7 +645,8 @@ class _special_filename(_filename_base):
self.file_name = file_name
self.file_type = guessManifestFileType(file_name)
- def file_type_lt(self, a, b):
+ @staticmethod
+ def file_type_lt(a, b):
"""
Defines an ordering between file types.
"""
@@ -698,12 +721,12 @@ class GenChangeLogs(object):
self.returncode |= 2
return
- output.write(textwrap.dedent(_unicode_decode('''\
+ output.write(textwrap.dedent('''\
# ChangeLog for %s
# Copyright 1999-%s Gentoo Foundation; Distributed under the GPL v2
# $Header: $
- ''' % (cp, time.strftime('%Y')))))
+ ''' % (cp, time.strftime('%Y'))))
# now grab all the commits
commits = self.grab(['git', 'rev-list', 'HEAD', '--', '.']).split()
@@ -767,11 +790,10 @@ class GenChangeLogs(object):
# Reverse the sort order for headers.
for c in reversed(changed):
if c.startswith('+') and c.endswith('.ebuild'):
- output.write(_unicode_decode(
- '*%s (%s)\n' % (c[1:-7], date)))
+ output.write('*%s (%s)\n' % (c[1:-7], date))
wroteheader = True
if wroteheader:
- output.write(_unicode_decode('\n'))
+ output.write('\n')
# strip '<cp>: ', '[<cp>] ', and similar
body[0] = re.sub(r'^\W*' + re.escape(cp) + r'\W+', '', body[0])
@@ -791,13 +813,12 @@ class GenChangeLogs(object):
# don't break filenames on hyphens
self._wrapper.break_on_hyphens = False
- output.write(_unicode_decode(
- self._wrapper.fill(
- '%s; %s %s:' % (date, author, ', '.join(changed)))))
+ output.write(self._wrapper.fill(
+ '%s; %s %s:' % (date, author, ', '.join(changed))))
# but feel free to break commit messages there
self._wrapper.break_on_hyphens = True
- output.write(_unicode_decode(
- '\n%s\n\n' % '\n'.join(self._wrapper.fill(x) for x in body)))
+ output.write(
+ '\n%s\n\n' % '\n'.join(self._wrapper.fill(x) for x in body))
output.close()
@@ -830,17 +851,22 @@ class GenChangeLogs(object):
self.generate_changelog(cp)
def egencache_main(args):
- parser, options, atoms = parse_args(args)
-
- config_root = options.config_root
# The calling environment is ignored, so the program is
# completely controlled by commandline arguments.
env = {}
- if options.repo is None:
- env['PORTDIR_OVERLAY'] = ''
- elif options.portdir_overlay:
+ if not sys.stdout.isatty():
+ portage.output.nocolor()
+ env['NOCOLOR'] = 'true'
+
+ parser, options, atoms = parse_args(args)
+
+ config_root = options.config_root
+
+ if options.repositories_configuration is not None:
+ env['PORTAGE_REPOSITORIES'] = options.repositories_configuration
+ elif options.portdir_overlay is not None:
env['PORTDIR_OVERLAY'] = options.portdir_overlay
if options.cache_dir is not None:
@@ -854,7 +880,8 @@ def egencache_main(args):
default_opts = None
if not options.ignore_default_opts:
- default_opts = settings.get('EGENCACHE_DEFAULT_OPTS', '').split()
+ default_opts = portage.util.shlex_split(
+ settings.get('EGENCACHE_DEFAULT_OPTS', ''))
if default_opts:
parser, options, args = parse_args(default_opts + args)
@@ -865,18 +892,50 @@ def egencache_main(args):
settings = portage.config(config_root=config_root,
local_config=False, env=env)
- if not options.update and not options.update_use_local_desc \
- and not options.update_changelogs:
+ if not (options.update or options.update_use_local_desc or
+ options.update_changelogs or options.update_manifests):
parser.error('No action specified')
return 1
+ if options.repo is None:
+ if len(settings.repositories.prepos) == 2:
+ for repo in settings.repositories:
+ if repo.name != "DEFAULT":
+ options.repo = repo.name
+ break
+
+ if options.repo is None:
+ parser.error("--repo option is required")
+
+ repo_path = settings.repositories.treemap.get(options.repo)
+ if repo_path is None:
+ parser.error("Unable to locate repository named '%s'" % (options.repo,))
+ return 1
+
+ repo_config = settings.repositories.get_repo_for_location(repo_path)
+
+ if options.strict_manifests is not None:
+ if options.strict_manifests == "y":
+ settings.features.add("strict")
+ else:
+ settings.features.discard("strict")
+
if options.update and 'metadata-transfer' not in settings.features:
- settings.features.add('metadata-transfer')
+ # Forcibly enable metadata-transfer if portdbapi has a pregenerated
+ # cache that does not support eclass validation.
+ cache = repo_config.get_pregenerated_cache(
+ portage.dbapi.dbapi._known_keys, readonly=True)
+ if cache is not None and not cache.complete_eclass_entries:
+ settings.features.add('metadata-transfer')
+ cache = None
settings.lock()
portdb = portage.portdbapi(mysettings=settings)
+ # Limit ebuilds to the specified repo.
+ portdb.porttrees = [repo_path]
+
if options.update:
if options.cache_dir is not None:
# already validated earlier
@@ -892,17 +951,71 @@ def egencache_main(args):
level=logging.ERROR, noiselevel=-1)
return 1
- if options.repo is not None:
- repo_path = portdb.getRepositoryPath(options.repo)
- if repo_path is None:
- parser.error("Unable to locate repository named '%s'" % \
- (options.repo,))
- return 1
+ if options.sign_manifests is not None:
+ repo_config.sign_manifest = options.sign_manifests == 'y'
- # Limit ebuilds to the specified repo.
- portdb.porttrees = [repo_path]
- else:
- portdb.porttrees = [portdb.porttree_root]
+ if options.thin_manifests is not None:
+ repo_config.thin_manifest = options.thin_manifests == 'y'
+
+ gpg_cmd = None
+ gpg_vars = None
+ force_sign_key = None
+
+ if options.update_manifests:
+ if repo_config.sign_manifest:
+
+ sign_problem = False
+ gpg_dir = None
+ gpg_cmd = settings.get("PORTAGE_GPG_SIGNING_COMMAND")
+ if gpg_cmd is None:
+ writemsg_level("egencache: error: "
+ "PORTAGE_GPG_SIGNING_COMMAND is unset! "
+ "Is make.globals missing?\n",
+ level=logging.ERROR, noiselevel=-1)
+ sign_problem = True
+ elif "${PORTAGE_GPG_KEY}" in gpg_cmd and \
+ options.gpg_key is None and \
+ "PORTAGE_GPG_KEY" not in settings:
+ writemsg_level("egencache: error: "
+ "PORTAGE_GPG_KEY is unset!\n",
+ level=logging.ERROR, noiselevel=-1)
+ sign_problem = True
+ elif "${PORTAGE_GPG_DIR}" in gpg_cmd:
+ if options.gpg_dir is not None:
+ gpg_dir = options.gpg_dir
+ elif "PORTAGE_GPG_DIR" not in settings:
+ gpg_dir = os.path.expanduser("~/.gnupg")
+ else:
+ gpg_dir = os.path.expanduser(settings["PORTAGE_GPG_DIR"])
+ if not os.access(gpg_dir, os.X_OK):
+ writemsg_level(("egencache: error: "
+ "Unable to access directory: "
+ "PORTAGE_GPG_DIR='%s'\n") % gpg_dir,
+ level=logging.ERROR, noiselevel=-1)
+ sign_problem = True
+
+ if sign_problem:
+ writemsg_level("egencache: You may disable manifest "
+ "signatures with --sign-manifests=n or by setting "
+ "\"sign-manifests = false\" in metadata/layout.conf\n",
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ gpg_vars = {}
+ if gpg_dir is not None:
+ gpg_vars["PORTAGE_GPG_DIR"] = gpg_dir
+ gpg_var_names = []
+ if options.gpg_key is None:
+ gpg_var_names.append("PORTAGE_GPG_KEY")
+ else:
+ gpg_vars["PORTAGE_GPG_KEY"] = options.gpg_key
+
+ for k in gpg_var_names:
+ v = settings.get(k)
+ if v is not None:
+ gpg_vars[k] = v
+
+ force_sign_key = gpg_vars.get("PORTAGE_GPG_KEY")
ret = [os.EX_OK]
@@ -921,6 +1034,29 @@ def egencache_main(args):
else:
ret.append(gen_cache.returncode)
+ if options.update_manifests:
+
+ cp_iter = None
+ if atoms:
+ cp_iter = iter(atoms)
+
+ event_loop = global_event_loop()
+ scheduler = ManifestScheduler(portdb, cp_iter=cp_iter,
+ gpg_cmd=gpg_cmd, gpg_vars=gpg_vars,
+ force_sign_key=force_sign_key,
+ max_jobs=options.jobs,
+ max_load=options.load_average,
+ event_loop=event_loop)
+
+ signum = run_main_scheduler(scheduler)
+ if signum is not None:
+ sys.exit(128 + signum)
+
+ if options.tolerant:
+ ret.append(os.EX_OK)
+ else:
+ ret.append(scheduler.returncode)
+
if options.update_use_local_desc:
gen_desc = GenUseLocalDesc(portdb,
output=options.uld_output,
@@ -933,6 +1069,16 @@ def egencache_main(args):
gen_clogs.run()
ret.append(gen_clogs.returncode)
+ if options.write_timestamp:
+ timestamp_path = os.path.join(repo_path, 'metadata', 'timestamp.chk')
+ try:
+ with open(timestamp_path, 'w') as f:
+ f.write(time.strftime('%s\n' % TIMESTAMP_FORMAT, time.gmtime()))
+ except IOError:
+ ret.append(os.EX_IOERR)
+ else:
+ ret.append(os.EX_OK)
+
return max(ret)
if __name__ == "__main__":
diff --git a/bin/emaint b/bin/emaint
index bee46c40d..aeeb18328 100755
--- a/bin/emaint
+++ b/bin/emaint
@@ -1,9 +1,8 @@
-#!/usr/bin/python -O
-# Copyright 2005-2012 Gentoo Foundation
+#!/usr/bin/python -bO
+# Copyright 2005-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-"""'The emaint program provides an interface to system health
- checks and maintenance.
+"""System health checks and maintenance utilities.
"""
from __future__ import print_function
@@ -14,10 +13,10 @@ import errno
try:
import signal
- def exithandler(signum,frame):
+ def exithandler(signum, _frame):
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
- sys.exit(1)
+ sys.exit(128 + signum)
signal.signal(signal.SIGINT, exithandler)
signal.signal(signal.SIGTERM, exithandler)
@@ -26,13 +25,11 @@ try:
except KeyboardInterrupt:
sys.exit(1)
-try:
- import portage
-except ImportError:
- from os import path as osp
- sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
- import portage
-
+from os import path as osp
+pym_path = osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym")
+sys.path.insert(0, pym_path)
+import portage
+portage._internal_caller = True
from portage.emaint.main import emaint_main
try:
diff --git a/bin/emerge b/bin/emerge
index a9a56432c..bb93d83a6 100755
--- a/bin/emerge
+++ b/bin/emerge
@@ -1,5 +1,5 @@
-#!/usr/bin/python
-# Copyright 2006-2012 Gentoo Foundation
+#!/usr/bin/python -b
+# Copyright 2006-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -7,67 +7,73 @@ from __future__ import print_function
import platform
import signal
import sys
-# This block ensures that ^C interrupts are handled quietly.
+
+# This block ensures that ^C interrupts are handled quietly. We handle
+# KeyboardInterrupt instead of installing a SIGINT handler, since
+# exiting from signal handlers intermittently causes python to ignore
+# the SystemExit exception with a message like this:
+# Exception SystemExit: 130 in <function remove at 0x7fd2146c1320> ignored
try:
- def exithandler(signum,frame):
- signal.signal(signal.SIGINT, signal.SIG_IGN)
+ def exithandler(signum, _frame):
signal.signal(signal.SIGTERM, signal.SIG_IGN)
sys.exit(128 + signum)
- signal.signal(signal.SIGINT, exithandler)
signal.signal(signal.SIGTERM, exithandler)
# Prevent "[Errno 32] Broken pipe" exceptions when
# writing to a pipe.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
-except KeyboardInterrupt:
- sys.exit(128 + signal.SIGINT)
+ def debug_signal(_signum, _frame):
+ import pdb
+ pdb.set_trace()
-def debug_signal(signum, frame):
- import pdb
- pdb.set_trace()
+ if platform.python_implementation() == 'Jython':
+ debug_signum = signal.SIGUSR2 # bug #424259
+ else:
+ debug_signum = signal.SIGUSR1
-if platform.python_implementation() == 'Jython':
- debug_signum = signal.SIGUSR2 # bug #424259
-else:
- debug_signum = signal.SIGUSR1
+ signal.signal(debug_signum, debug_signal)
-signal.signal(debug_signum, debug_signal)
-
-try:
- from _emerge.main import emerge_main
-except ImportError:
from os import path as osp
- import sys
- sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+ pym_path = osp.join(osp.dirname(osp.dirname(
+ osp.realpath(__file__))), "pym")
+ sys.path.insert(0, pym_path)
+ import portage
+ portage._internal_caller = True
+ portage._disable_legacy_globals()
from _emerge.main import emerge_main
-if __name__ == "__main__":
- import sys
- from portage.exception import ParseError, PermissionDenied
- try:
- retval = emerge_main()
- except PermissionDenied as e:
- sys.stderr.write("Permission denied: '%s'\n" % str(e))
- sys.exit(e.errno)
- except ParseError as e:
- sys.stderr.write("%s\n" % str(e))
- sys.exit(1)
- except SystemExit:
- raise
- except Exception:
- # If an unexpected exception occurs then we don't want the mod_echo
- # output to obscure the traceback, so dump the mod_echo output before
- # showing the traceback.
- import traceback
- tb_str = traceback.format_exc()
+ if __name__ == "__main__":
+ from portage.exception import ParseError, PermissionDenied
try:
- from portage.elog import mod_echo
- except ImportError:
- pass
- else:
- mod_echo.finalize()
- sys.stderr.write(tb_str)
- sys.exit(1)
- sys.exit(retval)
+ retval = emerge_main()
+ except PermissionDenied as e:
+ sys.stderr.write("Permission denied: '%s'\n" % str(e))
+ sys.exit(e.errno)
+ except ParseError as e:
+ sys.stderr.write("%s\n" % str(e))
+ sys.exit(1)
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except Exception:
+ # If an unexpected exception occurs then we don't want the
+ # mod_echo output to obscure the traceback, so dump the
+ # mod_echo output before showing the traceback.
+ import traceback
+ tb_str = traceback.format_exc()
+ try:
+ from portage.elog import mod_echo
+ except ImportError:
+ pass
+ else:
+ mod_echo.finalize()
+ sys.stderr.write(tb_str)
+ sys.exit(1)
+ sys.exit(retval)
+
+except KeyboardInterrupt:
+ sys.stderr.write("\n\nExiting on signal %(signal)s\n" %
+ {"signal": signal.SIGINT})
+ sys.stderr.flush()
+ sys.exit(128 + signal.SIGINT)
diff --git a/bin/emerge-webrsync b/bin/emerge-webrsync
index bfd9aa2fc..2f0689c15 100755
--- a/bin/emerge-webrsync
+++ b/bin/emerge-webrsync
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# Author: Karl Trygve Kalleberg <karltk@gentoo.org>
# Rewritten from the old, Perl-based emerge-webrsync script
@@ -22,9 +22,9 @@ vvecho() { [[ ${do_verbose} -eq 1 ]] && echo "$@" ; }
# Only echo if not in verbose mode
nvecho() { [[ ${do_verbose} -eq 0 ]] && echo "$@" ; }
# warning echos
-wecho() { echo "${argv0}: warning: $*" 1>&2 ; }
+wecho() { echo "${argv0##*/}: warning: $*" 1>&2 ; }
# error echos
-eecho() { echo "${argv0}: error: $*" 1>&2 ; }
+eecho() { echo "${argv0##*/}: error: $*" 1>&2 ; }
argv0=$0
@@ -39,23 +39,33 @@ else
eecho "could not find 'portageq'; aborting"
exit 1
fi
-eval $("${portageq}" envvar -v FEATURES FETCHCOMMAND GENTOO_MIRRORS \
- PORTAGE_BIN_PATH PORTAGE_GPG_DIR \
- PORTAGE_NICENESS PORTAGE_RSYNC_EXTRA_OPTS PORTAGE_TMPDIR PORTDIR \
- SYNC http_proxy ftp_proxy)
-DISTDIR="${PORTAGE_TMPDIR}/emerge-webrsync"
+eval "$("${portageq}" envvar -v DISTDIR EPREFIX FEATURES \
+ FETCHCOMMAND GENTOO_MIRRORS \
+ PORTAGE_BIN_PATH PORTAGE_CONFIGROOT PORTAGE_GPG_DIR \
+ PORTAGE_NICENESS PORTAGE_REPOSITORIES PORTAGE_RSYNC_EXTRA_OPTS \
+ PORTAGE_RSYNC_OPTS PORTAGE_TMPDIR \
+ USERLAND http_proxy ftp_proxy)"
export http_proxy ftp_proxy
+source "${PORTAGE_BIN_PATH}"/isolated-functions.sh || exit 1
+
+repo_name=gentoo
+repo_location=$(__repo_attr "${repo_name}" location)
+if [[ -z ${repo_location} ]]; then
+ eecho "Repository '${repo_name}' not found"
+ exit 1
+fi
+repo_sync_type=$(__repo_attr "${repo_name}" sync-type)
+
# If PORTAGE_NICENESS is overriden via the env then it will
# still pass through the portageq call and override properly.
if [ -n "${PORTAGE_NICENESS}" ]; then
renice $PORTAGE_NICENESS $$ > /dev/null
fi
-source "${PORTAGE_BIN_PATH}"/isolated-functions.sh || exit 1
-
do_verbose=0
do_debug=0
+keep=false
if has webrsync-gpg ${FEATURES} ; then
WEBSYNC_VERIFY_SIGNATURE=1
@@ -99,7 +109,9 @@ get_date_part() {
get_utc_second_from_string() {
local s="$1"
if [[ ${USERLAND} == BSD ]] ; then
- date -juf "%Y%m%d" "$s" +"%s"
+ # Specify zeros for the least significant digits, or else those
+ # digits are inherited from the current system clock time.
+ date -juf "%Y%m%d%H%M.%S" "${s}0000.00" +"%s"
else
date -d "${s:0:4}-${s:4:2}-${s:6:2}" -u +"%s"
fi
@@ -108,8 +120,8 @@ get_utc_second_from_string() {
get_portage_timestamp() {
local portage_current_timestamp=0
- if [ -f "${PORTDIR}/metadata/timestamp.x" ]; then
- portage_current_timestamp=$(cut -f 1 -d " " "${PORTDIR}/metadata/timestamp.x" )
+ if [ -f "${repo_location}/metadata/timestamp.x" ]; then
+ portage_current_timestamp=$(cut -f 1 -d " " "${repo_location}/metadata/timestamp.x" )
fi
echo "${portage_current_timestamp}"
@@ -125,13 +137,18 @@ fetch_file() {
elif [ "${FETCHCOMMAND/curl/}" != "${FETCHCOMMAND}" ]; then
opts="--continue-at - $(nvecho -s -f)"
else
- rm -f "${FILE}"
+ rm -f "${DISTDIR}/${FILE}"
fi
- vecho "Fetching file ${FILE} ..."
+ __vecho "Fetching file ${FILE} ..."
# already set DISTDIR=
- eval "${FETCHCOMMAND}" ${opts}
- [ -s "${FILE}" ]
+ eval "${FETCHCOMMAND} ${opts}"
+ if [[ $? -eq 0 && -s ${DISTDIR}/${FILE} ]] ; then
+ return 0
+ else
+ rm -f "${DISTDIR}/${FILE}"
+ return 1
+ fi
}
check_file_digest() {
@@ -139,10 +156,12 @@ check_file_digest() {
local file="$2"
local r=1
- vecho "Checking digest ..."
+ __vecho "Checking digest ..."
if type -P md5sum > /dev/null; then
- md5sum -c $digest && r=0
+ local md5sum_output=$(md5sum "${file}")
+ local digest_content=$(< "${digest}")
+ [ "${md5sum_output%%[[:space:]]*}" = "${digest_content%%[[:space:]]*}" ] && r=0
elif type -P md5 > /dev/null; then
[ "$(md5 -q "${file}")" == "$(cut -d ' ' -f 1 "${digest}")" ] && r=0
else
@@ -159,7 +178,7 @@ check_file_signature() {
if [ ${WEBSYNC_VERIFY_SIGNATURE} != 0 ]; then
- vecho "Checking signature ..."
+ __vecho "Checking signature ..."
if type -P gpg > /dev/null; then
gpg --homedir "${PORTAGE_GPG_DIR}" --verify "$signature" "$file" && r=0
@@ -183,13 +202,25 @@ get_snapshot_timestamp() {
sync_local() {
local file="$1"
- vecho "Syncing local tree ..."
+ __vecho "Syncing local tree ..."
+
+ local ownership="portage:portage"
+ if has usersync ${FEATURES} ; then
+ case "${USERLAND}" in
+ BSD)
+ ownership=$(stat -f '%Su:%Sg' "${repo_location}")
+ ;;
+ *)
+ ownership=$(stat -c '%U:%G' "${repo_location}")
+ ;;
+ esac
+ fi
if type -P tarsync > /dev/null ; then
- local chown_opts="-o portage -g portage"
- chown portage:portage portage > /dev/null 2>&1 || chown_opts=""
+ local chown_opts="-o ${ownership%:*} -g ${ownership#*:}"
+ chown ${ownership} "${repo_location}" > /dev/null 2>&1 || chown_opts=""
if ! tarsync $(vvecho -v) -s 1 ${chown_opts} \
- -e /distfiles -e /packages -e /local "${file}" "${PORTDIR}"; then
+ -e /distfiles -e /packages -e /local "${file}" "${repo_location}"; then
eecho "tarsync failed; tarball is corrupt? (${file})"
return 1
fi
@@ -201,27 +232,29 @@ sync_local() {
fi
# Free disk space
- rm -f "${file}"
+ ${keep} || rm -f "${file}"
- chown portage:portage portage > /dev/null 2>&1 && \
- chown -R portage:portage portage
+ local rsync_opts="${PORTAGE_RSYNC_OPTS} ${PORTAGE_RSYNC_EXTRA_OPTS}"
+ if chown ${ownership} portage > /dev/null 2>&1; then
+ chown -R ${ownership} portage
+ rsync_opts+=" --owner --group"
+ fi
cd portage
- rsync -av --progress --stats --delete --delete-after \
- --exclude='/distfiles' --exclude='/packages' \
- --exclude='/local' ${PORTAGE_RSYNC_EXTRA_OPTS} . "${PORTDIR%%/}"
+ rsync ${rsync_opts} . "${repo_location%%/}"
cd ..
- vecho "Cleaning up ..."
+ __vecho "Cleaning up ..."
rm -fr portage
fi
if has metadata-transfer ${FEATURES} ; then
- vecho "Updating cache ..."
- emerge --metadata
+ __vecho "Updating cache ..."
+ "${PORTAGE_BIN_PATH}/emerge" --metadata
fi
- [ -x /etc/portage/bin/post_sync ] && /etc/portage/bin/post_sync
+ local post_sync=${PORTAGE_CONFIGROOT}etc/portage/bin/post_sync
+ [ -x "${post_sync}" ] && "${post_sync}"
# --quiet suppresses output if there are no relevant news items
- has news ${FEATURES} && emerge --check-news --quiet
+ has news ${FEATURES} && "${PORTAGE_BIN_PATH}/emerge" --check-news --quiet
return 0
}
@@ -251,14 +284,15 @@ do_snapshot() {
for mirror in ${GENTOO_MIRRORS} ; do
- vecho "Trying to retrieve ${date} snapshot from ${mirror} ..."
+ mirror=${mirror%/}
+ __vecho "Trying to retrieve ${date} snapshot from ${mirror} ..."
for compression in ${compressions} ; do
local file="portage-${date}.tar.${compression}"
local digest="${file}.md5sum"
local signature="${file}.gpgsig"
- if [ -s "${file}" -a -s "${digest}" -a -s "${signature}" ] ; then
+ if [ -s "${DISTDIR}/${file}" -a -s "${DISTDIR}/${digest}" -a -s "${DISTDIR}/${signature}" ] ; then
check_file_digest "${DISTDIR}/${digest}" "${DISTDIR}/${file}" && \
check_file_signature "${DISTDIR}/${signature}" "${DISTDIR}/${file}" && \
have_files=1
@@ -280,8 +314,8 @@ do_snapshot() {
#
if [ ${have_files} -eq 1 ]; then
- vecho "Getting snapshot timestamp ..."
- local snapshot_timestamp=$(get_snapshot_timestamp "${file}")
+ __vecho "Getting snapshot timestamp ..."
+ local snapshot_timestamp=$(get_snapshot_timestamp "${DISTDIR}/${file}")
if [ ${ignore_timestamp} == 0 ]; then
if [ ${snapshot_timestamp} -lt $(get_portage_timestamp) ]; then
@@ -310,7 +344,7 @@ do_snapshot() {
#
# Remove files and use a different mirror
#
- rm -f "${file}" "${digest}" "${signature}"
+ rm -f "${DISTDIR}/${file}" "${DISTDIR}/${digest}" "${DISTDIR}/${signature}"
fi
done
@@ -318,12 +352,12 @@ do_snapshot() {
done
if [ ${have_files} -eq 1 ]; then
- sync_local "${file}" && r=0
+ sync_local "${DISTDIR}/${file}" && r=0
else
- vecho "${date} snapshot was not found"
+ __vecho "${date} snapshot was not found"
fi
-
- rm -f "${file}" "${digest}" "${signature}"
+
+ ${keep} || rm -f "${DISTDIR}/${file}" "${DISTDIR}/${digest}" "${DISTDIR}/${signature}"
return "${r}"
}
@@ -331,9 +365,9 @@ do_latest_snapshot() {
local attempts=0
local r=1
- vecho "Fetching most recent snapshot ..."
+ __vecho "Fetching most recent snapshot ..."
- # The snapshot for a given day is generated at 01:45 UTC on the following
+ # The snapshot for a given day is generated at 00:45 UTC on the following
# day, so the current day's snapshot (going by UTC time) hasn't been
# generated yet. Therefore, always start by looking for the previous day's
# snapshot (for attempts=1, subtract 1 day from the current UTC time).
@@ -349,10 +383,10 @@ do_latest_snapshot() {
local start_time=$(get_utc_date_in_seconds)
local start_hour=$(get_date_part ${start_time} "%H")
- # Daily snapshots are created at 1:45 AM and are not
- # available until after 2 AM. Don't waste time trying
+ # Daily snapshots are created at 00:45 and are not
+ # available until after 01:00. Don't waste time trying
# to fetch a snapshot before it's been created.
- if [ ${start_hour} -lt 2 ] ; then
+ if [ ${start_hour} -lt 1 ] ; then
(( start_time -= 86400 ))
fi
local snapshot_date=$(get_date_part ${start_time} "%Y%m%d")
@@ -361,8 +395,8 @@ do_latest_snapshot() {
while (( ${attempts} < 40 )) ; do
(( attempts++ ))
(( snapshot_date_seconds -= 86400 ))
- # snapshots are created at 1:45 AM
- (( approx_snapshot_time = snapshot_date_seconds + 86400 + 6300 ))
+ # snapshots are created at 00:45
+ (( approx_snapshot_time = snapshot_date_seconds + 86400 + 2700 ))
(( timestamp_difference = existing_timestamp - approx_snapshot_time ))
[ ${timestamp_difference} -lt 0 ] && (( timestamp_difference = -1 * timestamp_difference ))
snapshot_date=$(get_date_part ${snapshot_date_seconds} "%Y%m%d")
@@ -388,7 +422,7 @@ do_latest_snapshot() {
"snapshot. In order to force sync," \
"use the --revert option or remove" \
"the timestamp file located at" \
- "'${PORTDIR}/metadata/timestamp.x'." | fmt -w 70 | \
+ "'${repo_location}/metadata/timestamp.x'." | fmt -w 70 | \
while read -r line ; do
ewarn "${line}"
done
@@ -408,9 +442,10 @@ do_latest_snapshot() {
usage() {
cat <<-EOF
Usage: $0 [options]
-
+
Options:
--revert=yyyymmdd Revert to snapshot
+ -k, --keep Keep snapshots in DISTDIR (don't delete)
-q, --quiet Only output errors
-v, --verbose Enable verbose output
-x, --debug Enable debug output
@@ -427,14 +462,12 @@ usage() {
main() {
local arg
local revert_date
-
- [ ! -d "${DISTDIR}" ] && mkdir -p "${DISTDIR}"
- cd "${DISTDIR}"
for arg in "$@" ; do
local v=${arg#*=}
case ${arg} in
-h|--help) usage ;;
+ -k|--keep) keep=true ;;
-q|--quiet) PORTAGE_QUIET=1 ;;
-v|--verbose) do_verbose=1 ;;
-x|--debug) do_debug=1 ;;
@@ -443,16 +476,39 @@ main() {
esac
done
+ [[ -d ${repo_location} ]] || mkdir -p "${repo_location}"
+ if [[ ! -w ${repo_location} ]] ; then
+ eecho "Repository '${repo_name}' is not writable: ${repo_location}"
+ exit 1
+ fi
+
+ [[ -d ${PORTAGE_TMPDIR}/portage ]] || mkdir -p "${PORTAGE_TMPDIR}/portage"
+ TMPDIR=$(mktemp -d "${PORTAGE_TMPDIR}/portage/webrsync-XXXXXX")
+ if [[ ! -w ${TMPDIR} ]] ; then
+ eecho "TMPDIR is not writable: ${TMPDIR}"
+ exit 1
+ fi
+ trap 'cd / ; rm -rf "${TMPDIR}"' EXIT
+ cd "${TMPDIR}" || exit 1
+
+ ${keep} || DISTDIR=${TMPDIR}
+ [ ! -d "${DISTDIR}" ] && mkdir -p "${DISTDIR}"
+
+ if ${keep} && [[ ! -w ${DISTDIR} ]] ; then
+ eecho "DISTDIR is not writable: ${DISTDIR}"
+ exit 1
+ fi
+
# This is a sanity check to help prevent people like funtoo users
# from accidentally wiping out their git tree.
- if [[ -n $SYNC && ${SYNC#rsync:} = $SYNC ]] ; then
- echo "The current SYNC variable setting does not refer to an rsync URI:" >&2
+ if [[ -n ${repo_sync_type} && ${repo_sync_type} != rsync ]] ; then
+ echo "The current sync-type attribute of repository 'gentoo' is not set to 'rsync':" >&2
echo >&2
- echo " SYNC=$SYNC" >&2
+ echo " sync-type=${repo_sync_type}" >&2
echo >&2
echo "If you intend to use emerge-webrsync then please" >&2
- echo "adjust SYNC to refer to an rsync URI." >&2
- echo "emerge-webrsync exiting due to abnormal SYNC setting." >&2
+ echo "adjust sync-type and sync-uri attributes to refer to rsync." >&2
+ echo "emerge-webrsync exiting due to abnormal sync-type setting." >&2
exit 1
fi
diff --git a/bin/emirrordist b/bin/emirrordist
new file mode 100755
index 000000000..0368eee2a
--- /dev/null
+++ b/bin/emirrordist
@@ -0,0 +1,13 @@
+#!/usr/bin/python -b
+# Copyright 2013-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+
+import portage
+portage._internal_caller = True
+portage._disable_legacy_globals()
+from portage._emirrordist.main import emirrordist_main
+
+if __name__ == "__main__":
+ sys.exit(emirrordist_main(sys.argv[1:]))
diff --git a/bin/env-update b/bin/env-update
index 8a69f2bb2..7651ef9c1 100755
--- a/bin/env-update
+++ b/bin/env-update
@@ -1,5 +1,5 @@
-#!/usr/bin/python -O
-# Copyright 1999-2006 Gentoo Foundation
+#!/usr/bin/python -bO
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -25,12 +25,12 @@ if len(sys.argv) > 1:
print("!!! Invalid command line options!\n")
usage(1)
-try:
- import portage
-except ImportError:
- from os import path as osp
- sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
- import portage
+from os import path as osp
+pym_path = osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym")
+sys.path.insert(0, pym_path)
+import portage
+portage._internal_caller = True
+
try:
portage.env_update(makelinks)
except IOError as e:
diff --git a/bin/etc-update b/bin/etc-update
index d763c1f73..1a99231b7 100755
--- a/bin/etc-update
+++ b/bin/etc-update
@@ -62,7 +62,7 @@ do_mv_ln() {
}
scan() {
- echo "Scanning Configuration files..."
+ ${QUIET} || echo "Scanning Configuration files..."
rm -rf "${TMP}"/files > /dev/null 2>&1
mkdir "${TMP}"/files || die "Failed mkdir command!"
count=0
@@ -107,13 +107,13 @@ scan() {
for mpath in ${CONFIG_PROTECT_MASK}; do
mpath="${EROOT%/}${mpath}"
if [[ "${rpath}" == "${mpath}"* ]] ; then
- echo "Updating masked file: ${live_file}"
+ ${QUIET} || echo "Updating masked file: ${live_file}"
mv "${cfg_file}" "${live_file}"
continue 2
fi
done
if [[ ! -f ${file} ]] ; then
- echo "Skipping non-file ${file} ..."
+ ${QUIET} || echo "Skipping non-file ${file} ..."
continue
fi
@@ -140,7 +140,7 @@ scan() {
fi
if [[ ${MATCHES} == 1 ]] ; then
- echo "Automerging trivial changes in: ${live_file}"
+ ${QUIET} || echo "Automerging trivial changes in: ${live_file}"
do_mv_ln "${cfg_file}" "${live_file}"
continue
else
@@ -548,9 +548,9 @@ die() {
local msg=$1 exitcode=${2:-1}
if [ ${exitcode} -eq 0 ] ; then
- printf 'Exiting: %b\n' "${msg}"
+ ${QUIET} || printf 'Exiting: %b\n' "${msg}"
scan > /dev/null
- [ ${count} -gt 0 ] && echo "NOTE: ${count} updates remaining"
+ ! ${QUIET} && [ ${count} -gt 0 ] && echo "NOTE: ${count} updates remaining"
else
error "${msg}"
fi
@@ -575,6 +575,7 @@ usage() {
-d, --debug Enable shell debugging
-h, --help Show help and run away
-p, --preen Automerge trivial changes only and quit
+ -q, --quiet Show only essential output
-v, --verbose Show settings and such along the way
-V, --version Show version and trundle away
@@ -600,6 +601,7 @@ declare title="Gentoo's etc-update tool!"
PREEN=false
SET_X=false
+QUIET=false
VERBOSE=false
NONINTERACTIVE_MV=false
while [[ -n $1 ]] ; do
@@ -607,6 +609,7 @@ while [[ -n $1 ]] ; do
-d|--debug) SET_X=true;;
-h|--help) usage;;
-p|--preen) PREEN=true;;
+ -q|--quiet) QUIET=true;;
-v|--verbose) VERBOSE=true;;
-V|--version) emerge --version; exit 0;;
--automode) parse_automode_flag $2 && shift || usage 1 "Invalid mode '$2'";;
@@ -617,7 +620,7 @@ while [[ -n $1 ]] ; do
done
${SET_X} && set -x
-type portageq >/dev/null || die "missing portageq"
+type -P portageq >/dev/null || die "missing portageq"
portage_vars=(
CONFIG_PROTECT{,_MASK}
PORTAGE_CONFIGROOT
@@ -627,7 +630,7 @@ portage_vars=(
USERLAND
NOCOLOR
)
-eval $(portageq envvar -v ${portage_vars[@]})
+eval $(${PORTAGE_PYTHON:+"${PORTAGE_PYTHON}"} "$(type -P portageq)" envvar -v ${portage_vars[@]})
export PORTAGE_TMPDIR
SCAN_PATHS=${*:-${CONFIG_PROTECT}}
diff --git a/bin/filter-bash-environment.py b/bin/filter-bash-environment.py
index b9aec96d0..a4cdc5429 100755
--- a/bin/filter-bash-environment.py
+++ b/bin/filter-bash-environment.py
@@ -1,10 +1,9 @@
-#!/usr/bin/python
-# Copyright 1999-2011 Gentoo Foundation
+#!/usr/bin/python -b
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import codecs
import io
-import optparse
import os
import re
import sys
@@ -126,10 +125,19 @@ if __name__ == "__main__":
"intact. The PATTERN is a space separated list of variable names" + \
" and it supports python regular expression syntax."
usage = "usage: %s PATTERN" % os.path.basename(sys.argv[0])
- parser = optparse.OptionParser(description=description, usage=usage)
- options, args = parser.parse_args(sys.argv[1:])
+ args = sys.argv[1:]
+
+ if '-h' in args or '--help' in args:
+ sys.stdout.write(usage + "\n")
+ sys.stdout.flush()
+ sys.exit(os.EX_OK)
+
if len(args) != 1:
- parser.error("Missing required PATTERN argument.")
+ sys.stderr.write(usage + "\n")
+ sys.stderr.write("Exactly one PATTERN argument required.\n")
+ sys.stderr.flush()
+ sys.exit(2)
+
file_in = sys.stdin
file_out = sys.stdout
if sys.hexversion >= 0x3000000:
diff --git a/bin/fixpackages b/bin/fixpackages
index dc43ed2b3..cec0030f2 100755
--- a/bin/fixpackages
+++ b/bin/fixpackages
@@ -1,5 +1,5 @@
-#!/usr/bin/python
-# Copyright 1999-2011 Gentoo Foundation
+#!/usr/bin/python -b
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -7,21 +7,27 @@ from __future__ import print_function
import os
import sys
-try:
- import portage
-except ImportError:
- from os import path as osp
- sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
- import portage
-
+from os import path as osp
+pym_path = osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym")
+sys.path.insert(0, pym_path)
+import portage
+portage._internal_caller = True
from portage import os
from portage.output import EOutput
+from portage.util._argparse import ArgumentParser
from textwrap import wrap
from portage._global_updates import _global_updates
mysettings = portage.settings
mytrees = portage.db
mtimedb = portage.mtimedb
+description = """The fixpackages program performs package move updates on
+ configuration files, installed packages, and binary packages."""
+description = " ".join(description.split())
+
+parser = ArgumentParser(description=description)
+parser.parse_args()
+
if mysettings['ROOT'] != "/":
out = EOutput()
msg = "The fixpackages program is not intended for use with " + \
diff --git a/bin/glsa-check b/bin/glsa-check
index a840c3206..972679a80 100755
--- a/bin/glsa-check
+++ b/bin/glsa-check
@@ -1,81 +1,79 @@
-#!/usr/bin/python
-# Copyright 2008-2011 Gentoo Foundation
+#!/usr/bin/python -b
+# Copyright 2008-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
import sys
+import codecs
-try:
- import portage
-except ImportError:
- from os import path as osp
- sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
- import portage
-
+from os import path as osp
+pym_path = osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym")
+sys.path.insert(0, pym_path)
+import portage
+portage._internal_caller = True
from portage import os
-from portage.output import *
-
-from optparse import OptionGroup, OptionParser
+from portage.output import green, red, nocolor, white
+from portage.util._argparse import ArgumentParser
__program__ = "glsa-check"
__author__ = "Marius Mauch <genone@gentoo.org>"
__version__ = "1.0"
-def cb_version(*args, **kwargs):
- """Callback for --version"""
- sys.stderr.write("\n"+ __program__ + ", version " + __version__ + "\n")
- sys.stderr.write("Author: " + __author__ + "\n")
- sys.stderr.write("This program is licensed under the GPL, version 2\n\n")
- sys.exit(0)
-
# option parsing
-parser = OptionParser(usage="%prog <option> [glsa-list]",
- version="%prog "+ __version__)
-parser.epilog = "glsa-list can contain an arbitrary number of GLSA ids," \
+epilog = "glsa-list can contain an arbitrary number of GLSA ids," \
" filenames containing GLSAs or the special identifiers" \
" 'all', 'new' and 'affected'"
+parser = ArgumentParser(usage=__program__ + " <option> [glsa-list]",
+ epilog=epilog)
-modes = OptionGroup(parser, "Modes")
-modes.add_option("-l", "--list", action="store_const",
+modes = parser.add_argument_group("Modes")
+modes.add_argument("-l", "--list", action="store_const",
const="list", dest="mode",
help="List all unapplied GLSA")
-modes.add_option("-d", "--dump", action="store_const",
+modes.add_argument("-d", "--dump", action="store_const",
const="dump", dest="mode",
help="Show all information about the given GLSA")
-modes.add_option("", "--print", action="store_const",
+modes.add_argument("--print", action="store_const",
const="dump", dest="mode",
help="Alias for --dump")
-modes.add_option("-t", "--test", action="store_const",
+modes.add_argument("-t", "--test", action="store_const",
const="test", dest="mode",
help="Test if this system is affected by the given GLSA")
-modes.add_option("-p", "--pretend", action="store_const",
+modes.add_argument("-p", "--pretend", action="store_const",
const="pretend", dest="mode",
help="Show the necessary commands to apply this GLSA")
-modes.add_option("-f", "--fix", action="store_const",
+modes.add_argument("-f", "--fix", action="store_const",
const="fix", dest="mode",
help="Try to auto-apply this GLSA (experimental)")
-modes.add_option("-i", "--inject", action="store_const", dest="mode",
- help="Inject the given GLSA into the checkfile")
-modes.add_option("-m", "--mail", action="store_const",
+modes.add_argument("-i", "--inject", action="store_const",
+ const="inject", dest="mode",
+ help="inject the given GLSA into the glsa_injected file")
+modes.add_argument("-m", "--mail", action="store_const",
const="mail", dest="mode",
help="Send a mail with the given GLSAs to the administrator")
-parser.add_option_group(modes)
-parser.remove_option("--version")
-parser.add_option("-V", "--version", action="callback",
- callback=cb_version, help="Some information about this tool")
-parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
+parser.add_argument("-V", "--version", action="store_true",
+ help="Some information about this tool")
+parser.add_argument("-v", "--verbose", action="store_true", dest="verbose",
help="Print more information")
-parser.add_option("-n", "--nocolor", action="callback",
- callback=lambda *args, **kwargs: nocolor(),
+parser.add_argument("-n", "--nocolor", action="store_true",
help="Disable colors")
-parser.add_option("-e", "--emergelike", action="store_false", dest="least_change",
+parser.add_argument("-e", "--emergelike", action="store_false", dest="least_change",
help="Do not use a least-change algorithm")
-parser.add_option("-c", "--cve", action="store_true", dest="list_cve",
+parser.add_argument("-c", "--cve", action="store_true", dest="list_cve",
help="Show CAN ids in listing mode")
-options, params = parser.parse_args()
+options, params = parser.parse_known_args()
+
+if options.nocolor:
+ nocolor()
+
+if options.version:
+ sys.stderr.write("\n"+ __program__ + ", version " + __version__ + "\n")
+ sys.stderr.write("Author: " + __author__ + "\n")
+ sys.stderr.write("This program is licensed under the GPL, version 2\n\n")
+ sys.exit(0)
mode = options.mode
least_change = options.least_change
@@ -101,7 +99,8 @@ elif mode == "list" and not params:
params.append("new")
# delay this for speed increase
-from portage.glsa import *
+from portage.glsa import (Glsa, GlsaTypeException, GlsaFormatException,
+ get_applied_glsas, get_glsa_list)
eroot = portage.settings['EROOT']
vardb = portage.db[eroot]["vartree"].dbapi
@@ -117,7 +116,7 @@ glsalist = []
if "new" in params:
glsalist = todolist
params.remove("new")
-
+
if "all" in params:
glsalist = completelist
params.remove("all")
@@ -142,8 +141,17 @@ for p in params[:]:
glsalist.extend([g for g in params if g not in glsalist])
-def summarylist(myglsalist, fd1=sys.stdout, fd2=sys.stderr):
- fd2.write(white("[A]")+" means this GLSA was already applied,\n")
+def summarylist(myglsalist, fd1=sys.stdout, fd2=sys.stderr, encoding="utf-8"):
+ # Get to the raw streams in py3k before wrapping them with an encoded writer
+ # to avoid writing bytes to a text stream (stdout/stderr are text streams
+ # by default in py3k)
+ if hasattr(fd1, "buffer"):
+ fd1 = fd1.buffer
+ if hasattr(fd2, "buffer"):
+ fd2 = fd2.buffer
+ fd1 = codecs.getwriter(encoding)(fd1)
+ fd2 = codecs.getwriter(encoding)(fd2)
+ fd2.write(white("[A]")+" means this GLSA was marked as applied (injected),\n")
fd2.write(green("[U]")+" means the system is not affected and\n")
fd2.write(red("[N]")+" indicates that the system might be affected.\n\n")
@@ -155,7 +163,7 @@ def summarylist(myglsalist, fd1=sys.stdout, fd2=sys.stderr):
if verbose:
fd2.write(("invalid GLSA: %s (error message was: %s)\n" % (myid, e)))
continue
- if myglsa.isApplied():
+ if myglsa.isInjected():
status = "[A]"
color = white
elif myglsa.isVulnerable():
@@ -186,7 +194,7 @@ def summarylist(myglsalist, fd1=sys.stdout, fd2=sys.stderr):
fd1.write(")")
if list_cve:
fd1.write(" "+(",".join([r[:13] for r in myglsa.references if r[:4] in ["CAN-", "CVE-"]])))
- fd1.write("\n")
+ fd1.write("\n")
return 0
if mode == "list":
@@ -204,39 +212,46 @@ if mode in ["dump", "fix", "inject", "pretend"]:
if mode == "dump":
myglsa.dump()
elif mode == "fix":
- sys.stdout.write("fixing "+myid+"\n")
- mergelist = myglsa.getMergeList(least_change=least_change)
- for pkg in mergelist:
- sys.stdout.write(">>> merging "+pkg+"\n")
- # using emerge for the actual merging as it contains the dependency
- # code and we want to be consistent in behaviour. Also this functionality
- # will be integrated in emerge later, so it shouldn't hurt much.
- emergecmd = "emerge --oneshot " + portage.settings["EMERGE_OPTS"] + " =" + pkg
- if verbose:
- sys.stderr.write(emergecmd+"\n")
- exitcode = os.system(emergecmd)
- # system() returns the exitcode in the high byte of a 16bit integer
- if exitcode >= 1<<8:
- exitcode >>= 8
- if exitcode:
- sys.exit(exitcode)
- myglsa.inject()
+ sys.stdout.write("Fixing GLSA "+myid+"\n")
+ if not myglsa.isVulnerable():
+ sys.stdout.write(">>> no vulnerable packages installed\n")
+ else:
+ mergelist = myglsa.getMergeList(least_change=least_change)
+ if mergelist == []:
+ sys.stdout.write(">>> cannot fix GLSA, no unaffected packages available\n")
+ sys.exit(2)
+ for pkg in mergelist:
+ sys.stdout.write(">>> merging "+pkg+"\n")
+ # using emerge for the actual merging as it contains the dependency
+ # code and we want to be consistent in behaviour. Also this functionality
+ # will be integrated in emerge later, so it shouldn't hurt much.
+ emergecmd = "emerge --oneshot " + " =" + pkg
+ if verbose:
+ sys.stderr.write(emergecmd+"\n")
+ exitcode = os.system(emergecmd)
+ # system() returns the exitcode in the high byte of a 16bit integer
+ if exitcode >= 1<<8:
+ exitcode >>= 8
+ if exitcode:
+ sys.exit(exitcode)
+ if len(mergelist):
+ sys.stdout.write("\n")
elif mode == "pretend":
sys.stdout.write("Checking GLSA "+myid+"\n")
- mergelist = myglsa.getMergeList(least_change=least_change)
- if mergelist:
- sys.stdout.write("The following updates will be performed for this GLSA:\n")
- for pkg in mergelist:
- oldver = None
- for x in vardb.match(portage.cpv_getkey(pkg)):
- if vardb.aux_get(x, ["SLOT"]) == portdb.aux_get(pkg, ["SLOT"]):
- oldver = x
- if oldver == None:
- raise ValueError("could not find old version for package %s" % pkg)
- oldver = oldver[len(portage.cpv_getkey(oldver))+1:]
- sys.stdout.write(" " + pkg + " (" + oldver + ")\n")
+ if not myglsa.isVulnerable():
+ sys.stdout.write(">>> no vulnerable packages installed\n")
else:
- sys.stdout.write("Nothing to do for this GLSA\n")
+ mergedict = {}
+ for (vuln, update) in myglsa.getAffectionTable(least_change=least_change):
+ mergedict.setdefault(update, []).append(vuln)
+
+ sys.stdout.write(">>> The following updates will be performed for this GLSA:\n")
+ for pkg in mergedict:
+ if pkg != "":
+ sys.stdout.write(" " + pkg + " (vulnerable: " + ", ".join(mergedict[pkg]) + ")\n")
+ if "" in mergedict:
+ sys.stdout.write("\n>>> For the following packages, no upgrade path exists:\n")
+ sys.stdout.write(" " + ", ".join(mergedict[""]))
elif mode == "inject":
sys.stdout.write("injecting " + myid + "\n")
myglsa.inject()
@@ -268,9 +283,9 @@ if mode == "test":
# mail mode as requested by solar
if mode == "mail":
import portage.mail, socket
- from io import StringIO
+ from io import BytesIO
from email.mime.text import MIMEText
-
+
# color doesn't make any sense for mail
nocolor()
@@ -278,7 +293,7 @@ if mode == "mail":
myrecipient = portage.settings["PORTAGE_ELOG_MAILURI"].split()[0]
else:
myrecipient = "root@localhost"
-
+
if "PORTAGE_ELOG_MAILFROM" in portage.settings:
myfrom = portage.settings["PORTAGE_ELOG_MAILFROM"]
else:
@@ -287,11 +302,13 @@ if mode == "mail":
mysubject = "[glsa-check] Summary for %s" % socket.getfqdn()
# need a file object for summarylist()
- myfd = StringIO()
- myfd.write("GLSA Summary report for host %s\n" % socket.getfqdn())
- myfd.write("(Command was: %s)\n\n" % " ".join(sys.argv))
+ myfd = BytesIO()
+ line = "GLSA Summary report for host %s\n" % socket.getfqdn()
+ myfd.write(line.encode("utf-8"))
+ line = "(Command was: %s)\n\n" % " ".join(sys.argv)
+ myfd.write(line.encode("utf-8"))
summarylist(glsalist, fd1=myfd, fd2=myfd)
- summary = str(myfd.getvalue())
+ summary = myfd.getvalue().decode("utf-8")
myfd.close()
myattachments = []
@@ -302,16 +319,17 @@ if mode == "mail":
if verbose:
sys.stderr.write(("invalid GLSA: %s (error message was: %s)\n" % (myid, e)))
continue
- myfd = StringIO()
+ myfd = BytesIO()
myglsa.dump(outstream=myfd)
- myattachments.append(MIMEText(str(myfd.getvalue()), _charset="utf8"))
+ attachment = myfd.getvalue().decode("utf-8")
+ myattachments.append(MIMEText(attachment, _charset="utf8"))
myfd.close()
-
+
mymessage = portage.mail.create_message(myfrom, myrecipient, mysubject, summary, myattachments)
portage.mail.send_mail(portage.settings, mymessage)
-
+
sys.exit(0)
-
+
# something wrong here, all valid paths are covered with sys.exit()
sys.stderr.write("nothing more to do\n")
sys.exit(2)
diff --git a/bin/helper-functions.sh b/bin/helper-functions.sh
index c7400fa4b..b9bc74a2e 100644
--- a/bin/helper-functions.sh
+++ b/bin/helper-functions.sh
@@ -10,42 +10,45 @@ source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
#
# API functions for doing parallel processing
#
-numjobs() {
+makeopts_jobs() {
# Copied from eutils.eclass:makeopts_jobs()
local jobs=$(echo " ${MAKEOPTS} " | \
sed -r -n 's:.*[[:space:]](-j|--jobs[=[:space:]])[[:space:]]*([0-9]+).*:\2:p')
echo ${jobs:-1}
}
-multijob_init() {
+__multijob_init() {
# Setup a pipe for children to write their pids to when they finish.
- mj_control_pipe=$(mktemp -t multijob.XXXXXX)
- rm "${mj_control_pipe}"
- mkfifo "${mj_control_pipe}"
- redirect_alloc_fd mj_control_fd "${mj_control_pipe}"
- rm -f "${mj_control_pipe}"
+ # We have to allocate two fd's because POSIX has undefined behavior
+ # when you open a FIFO for simultaneous read/write. #487056
+ local pipe=$(mktemp -t multijob.XXXXXX)
+ rm -f "${pipe}"
+ mkfifo -m 600 "${pipe}"
+ __redirect_alloc_fd mj_write_fd "${pipe}"
+ __redirect_alloc_fd mj_read_fd "${pipe}"
+ rm -f "${pipe}"
# See how many children we can fork based on the user's settings.
- mj_max_jobs=$(numjobs)
+ mj_max_jobs=$(makeopts_jobs "$@")
mj_num_jobs=0
}
-multijob_child_init() {
- trap 'echo ${BASHPID} $? >&'${mj_control_fd} EXIT
+__multijob_child_init() {
+ trap 'echo ${BASHPID:-$(__bashpid)} $? >&'${mj_write_fd} EXIT
trap 'exit 1' INT TERM
}
-multijob_finish_one() {
+__multijob_finish_one() {
local pid ret
- read -r -u ${mj_control_fd} pid ret
+ read -r -u ${mj_read_fd} pid ret
: $(( --mj_num_jobs ))
return ${ret}
}
-multijob_finish() {
+__multijob_finish() {
local ret=0
while [[ ${mj_num_jobs} -gt 0 ]] ; do
- multijob_finish_one
+ __multijob_finish_one
: $(( ret |= $? ))
done
# Let bash clean up its internal child tracking state.
@@ -53,38 +56,42 @@ multijob_finish() {
return ${ret}
}
-multijob_post_fork() {
+__multijob_post_fork() {
: $(( ++mj_num_jobs ))
if [[ ${mj_num_jobs} -ge ${mj_max_jobs} ]] ; then
- multijob_finish_one
+ __multijob_finish_one
fi
return $?
}
-# @FUNCTION: redirect_alloc_fd
+# @FUNCTION: __redirect_alloc_fd
# @USAGE: <var> <file> [redirection]
# @DESCRIPTION:
# Find a free fd and redirect the specified file via it. Store the new
# fd in the specified variable. Useful for the cases where we don't care
# about the exact fd #.
-redirect_alloc_fd() {
+__redirect_alloc_fd() {
local var=$1 file=$2 redir=${3:-"<>"}
if [[ $(( (BASH_VERSINFO[0] << 8) + BASH_VERSINFO[1] )) -ge $(( (4 << 8) + 1 )) ]] ; then
- # Newer bash provides this functionality.
- eval "exec {${var}}${redir}'${file}'"
+ # Newer bash provides this functionality.
+ eval "exec {${var}}${redir}'${file}'"
else
- # Need to provide the functionality ourselves.
- local fd=10
- while :; do
- # Make sure the fd isn't open. It could be a char device,
- # or a symlink (possibly broken) to something else.
- if [[ ! -e /dev/fd/${fd} ]] && [[ ! -L /dev/fd/${fd} ]] ; then
- eval "exec ${fd}${redir}'${file}'" && break
- fi
- [[ ${fd} -gt 1024 ]] && die "redirect_alloc_fd failed"
- : $(( ++fd ))
- done
- : $(( ${var} = fd ))
+ # Need to provide the functionality ourselves.
+ local fd=10
+ local fddir=/dev/fd
+ # Prefer /proc/self/fd if available (/dev/fd
+ # doesn't work on solaris, see bug #474536).
+ [[ -d /proc/self/fd ]] && fddir=/proc/self/fd
+ while :; do
+ # Make sure the fd isn't open. It could be a char device,
+ # or a symlink (possibly broken) to something else.
+ if [[ ! -e ${fddir}/${fd} ]] && [[ ! -L ${fddir}/${fd} ]] ; then
+ eval "exec ${fd}${redir}'${file}'" && break
+ fi
+ [[ ${fd} -gt 1024 ]] && die 'could not locate a free temp fd !?'
+ : $(( ++fd ))
+ done
+ : $(( ${var} = fd ))
fi
}
diff --git a/bin/install.py b/bin/install.py
new file mode 100755
index 000000000..3c5e0de65
--- /dev/null
+++ b/bin/install.py
@@ -0,0 +1,253 @@
+#!/usr/bin/python -b
+# Copyright 2013-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os
+import stat
+import sys
+import subprocess
+import traceback
+
+import portage
+from portage.util._argparse import ArgumentParser
+from portage.util.movefile import _copyxattr
+from portage.exception import OperationNotSupported
+
+# Change back to original cwd _after_ all imports (bug #469338).
+os.chdir(os.environ["__PORTAGE_HELPER_CWD"])
+
+def parse_args(args):
+ """
+ Parse the command line arguments using optparse for python 2.6 compatibility
+ Args:
+ args: a list of the white space delimited command line
+ Returns:
+ tuple of the Namespace of parsed options, and a list of order parameters
+ """
+ parser = ArgumentParser(add_help=False)
+
+ parser.add_argument(
+ "-b",
+ action="store_true",
+ dest="shortopt_b"
+ )
+ parser.add_argument(
+ "--backup",
+ action="store",
+ dest="backup"
+ )
+ parser.add_argument(
+ "-c",
+ action="store_true",
+ dest="shortopt_c"
+ )
+ parser.add_argument(
+ "--compare",
+ "-C",
+ action="store_true",
+ dest="compare"
+ )
+ parser.add_argument(
+ "--directory",
+ "-d",
+ action="store_true",
+ dest="directory"
+ )
+ parser.add_argument(
+ "-D",
+ action="store_true",
+ dest="shortopt_D"
+ )
+ parser.add_argument(
+ "--owner",
+ "-o",
+ action="store",
+ dest="owner"
+ )
+ parser.add_argument(
+ "--group",
+ "-g",
+ action="store",
+ dest="group"
+ )
+ parser.add_argument(
+ "--mode",
+ "-m",
+ action="store",
+ dest="mode"
+ )
+ parser.add_argument(
+ "--preserve-timestamps",
+ "-p",
+ action="store_true",
+ dest="preserve_timestamps"
+ )
+ parser.add_argument(
+ "--strip",
+ "-s",
+ action="store_true",
+ dest="strip"
+ )
+ parser.add_argument(
+ "--strip-program",
+ action="store",
+ dest="strip_program"
+ )
+ parser.add_argument(
+ "--suffix",
+ "-S",
+ action="store",
+ dest="suffix"
+ )
+ parser.add_argument(
+ "--target-directory",
+ "-t",
+ action="store",
+ dest="target_directory"
+ )
+ parser.add_argument(
+ "--no-target-directory",
+ "-T",
+ action="store_true",
+ dest="no_target_directory"
+ )
+ parser.add_argument(
+ "--context",
+ "-Z",
+ action="store",
+ dest="context"
+ )
+ parser.add_argument(
+ "--verbose",
+ "-v",
+ action="store_true",
+ dest="verbose"
+ )
+ parser.add_argument(
+ "--help",
+ action="store_true",
+ dest="help"
+ )
+ parser.add_argument(
+ "--version",
+ action="store_true",
+ dest="version"
+ )
+
+ # Use parse_known_args for maximum compatibility with
+ # getopt handling of non-option file arguments. Note
+ # that parser.add_argument("files", nargs='+') would
+ # be subtly incompatible because it requires that all
+ # of the file arguments be grouped sequentially. Also
+ # note that we have to explicitly call add_argument
+ # for known options in order for argparse to correctly
+ # separate option arguments from file arguments in all
+ # cases (it also allows for optparse compatibility).
+ parsed_args = parser.parse_known_args()
+
+ opts = parsed_args[0]
+ files = parsed_args[1]
+ files = [f for f in files if f != "--"] # filter out "--"
+
+ return (opts, files)
+
+
+def copy_xattrs(opts, files):
+ """
+ Copy the extended attributes using portage.util.movefile._copyxattr
+ Args:
+ opts: Namespace of the parsed command line otions
+ files: list of ordered command line parameters which should be files/directories
+ Returns:
+ system exit code
+ """
+ if opts.directory or not files:
+ return os.EX_OK
+
+ if opts.target_directory is None:
+ source, target = files[:-1], files[-1]
+ target_is_directory = os.path.isdir(target)
+ else:
+ source, target = files, opts.target_directory
+ target_is_directory = True
+
+ exclude = os.environ.get("PORTAGE_XATTR_EXCLUDE", "security.* system.nfs4_acl")
+
+ try:
+ if target_is_directory:
+ for s in source:
+ abs_path = os.path.join(target, os.path.basename(s))
+ _copyxattr(s, abs_path, exclude=exclude)
+ else:
+ _copyxattr(source[0], target, exclude=exclude)
+ return os.EX_OK
+
+ except OperationNotSupported:
+ traceback.print_exc()
+ return os.EX_OSERR
+
+
+def Which(filename, path=None, exclude=None):
+ """
+ Find the absolute path of 'filename' in a given search 'path'
+ Args:
+ filename: basename of the file
+ path: colon delimited search path
+ exclude: path of file to exclude
+ """
+ if path is None:
+ path = os.environ.get('PATH', '')
+
+ if exclude is not None:
+ st = os.stat(exclude)
+ exclude = (st.st_ino, st.st_dev)
+
+ for p in path.split(':'):
+ p = os.path.join(p, filename)
+ if os.access(p, os.X_OK):
+ try:
+ st = os.stat(p)
+ except OSError:
+ # file disappeared?
+ pass
+ else:
+ if stat.S_ISREG(st.st_mode) and \
+ (exclude is None or exclude != (st.st_ino, st.st_dev)):
+ return p
+
+ return None
+
+
+def main(args):
+ opts, files = parse_args(args)
+ install_binary = Which('install', exclude=os.environ["__PORTAGE_HELPER_PATH"])
+ if install_binary is None:
+ sys.stderr.write("install: command not found\n")
+ return 127
+
+ cmdline = [install_binary]
+ cmdline += args
+
+ if sys.hexversion >= 0x3000000:
+ # We can't trust that the filesystem encoding (locale dependent)
+ # correctly matches the arguments, so use surrogateescape to
+ # pass through the original argv bytes for Python 3.
+ fs_encoding = sys.getfilesystemencoding()
+ cmdline = [x.encode(fs_encoding, 'surrogateescape') for x in cmdline]
+ files = [x.encode(fs_encoding, 'surrogateescape') for x in files]
+ if opts.target_directory is not None:
+ opts.target_directory = \
+ opts.target_directory.encode(fs_encoding, 'surrogateescape')
+
+ returncode = subprocess.call(cmdline)
+ if returncode == os.EX_OK:
+ returncode = copy_xattrs(opts, files)
+ if returncode != os.EX_OK:
+ portage.util.writemsg("!!! install: copy_xattrs failed with the "
+ "following arguments: %s\n" %
+ " ".join(portage._shell_quote(x) for x in args), noiselevel=-1)
+ return returncode
+
+
+if __name__ == "__main__":
+ sys.exit(main(sys.argv[1:]))
diff --git a/bin/isolated-functions.sh b/bin/isolated-functions.sh
index dbf988b28..a22af574a 100644
--- a/bin/isolated-functions.sh
+++ b/bin/isolated-functions.sh
@@ -1,7 +1,9 @@
#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}/eapi.sh"
+
# We need this next line for "die" and "assert". It expands
# It _must_ preceed all the calls to die and assert.
shopt -s expand_aliases
@@ -15,7 +17,7 @@ assert() {
done
}
-assert_sigpipe_ok() {
+__assert_sigpipe_ok() {
# When extracting a tar file like this:
#
# bzip2 -dc foo.tar.bz2 | tar xof -
@@ -43,21 +45,21 @@ assert_sigpipe_ok() {
shopt -s extdebug
-# dump_trace([number of funcs on stack to skip],
+# __dump_trace([number of funcs on stack to skip],
# [whitespacing for filenames],
# [whitespacing for line numbers])
-dump_trace() {
+__dump_trace() {
local funcname="" sourcefile="" lineno="" s="yes" n p
declare -i strip=${1:-1}
local filespacing=$2 linespacing=$3
- # The qa_call() function and anything before it are portage internals
+ # The __qa_call() function and anything before it are portage internals
# that the user will not be interested in. Therefore, the stack trace
- # should only show calls that come after qa_call().
+ # should only show calls that come after __qa_call().
(( n = ${#FUNCNAME[@]} - 1 ))
(( p = ${#BASH_ARGV[@]} ))
while (( n > 0 )) ; do
- [ "${FUNCNAME[${n}]}" == "qa_call" ] && break
+ [ "${FUNCNAME[${n}]}" == "__qa_call" ] && break
(( p -= ${BASH_ARGC[${n}]} ))
(( n-- ))
done
@@ -86,7 +88,7 @@ dump_trace() {
}
nonfatal() {
- if has "${EAPI:-0}" 0 1 2 3 3_pre2 ; then
+ if ! ___eapi_has_nonfatal; then
die "$FUNCNAME() not supported in this EAPI"
fi
if [[ $# -lt 1 ]]; then
@@ -96,18 +98,24 @@ nonfatal() {
PORTAGE_NONFATAL=1 "$@"
}
-helpers_die() {
- case "${EAPI:-0}" in
- 0|1|2|3)
- echo -e "$@" >&2
- ;;
- *)
- die "$@"
- ;;
- esac
+__bashpid() {
+ # The BASHPID variable is new to bash-4.0, so add a hack for older
+ # versions. This must be used like so:
+ # ${BASHPID:-$(__bashpid)}
+ sh -c 'echo ${PPID}'
+}
+
+__helpers_die() {
+ if ___eapi_helpers_can_die; then
+ die "$@"
+ else
+ echo -e "$@" >&2
+ fi
}
die() {
+ local IFS=$' \t\n'
+
if [[ $PORTAGE_NONFATAL -eq 1 ]]; then
echo -e " $WARN*$NORMAL ${FUNCNAME[1]}: WARNING: $@" >&2
return 1
@@ -124,7 +132,7 @@ die() {
# setup spacing to make output easier to read
(( n = ${#FUNCNAME[@]} - 1 ))
while (( n > 0 )) ; do
- [ "${FUNCNAME[${n}]}" == "qa_call" ] && break
+ [ "${FUNCNAME[${n}]}" == "__qa_call" ] && break
(( n-- ))
done
(( n == 0 )) && (( n = ${#FUNCNAME[@]} - 1 ))
@@ -140,14 +148,14 @@ die() {
# get a stack trace, so at least report the phase that failed.
local phase_str=
[[ -n $EBUILD_PHASE ]] && phase_str=" ($EBUILD_PHASE phase)"
- eerror "ERROR: $CATEGORY/$PF failed${phase_str}:"
+ eerror "ERROR: ${CATEGORY}/${PF}::${PORTAGE_REPO_NAME} failed${phase_str}:"
eerror " ${*:-(no error message)}"
eerror
- # dump_trace is useless when the main script is a helper binary
+ # __dump_trace is useless when the main script is a helper binary
local main_index
(( main_index = ${#BASH_SOURCE[@]} - 1 ))
if has ${BASH_SOURCE[$main_index]##*/} ebuild.sh misc-functions.sh ; then
- dump_trace 2 ${filespacing} ${linespacing}
+ __dump_trace 2 ${filespacing} ${linespacing}
eerror " $(printf "%${filespacing}s" "${BASH_SOURCE[1]##*/}"), line $(printf "%${linespacing}s" "${BASH_LINENO[0]}"): Called die"
eerror "The specific snippet of code:"
# This scans the file that called die and prints out the logic that
@@ -173,39 +181,12 @@ die() {
| while read -r n ; do eerror " ${n#RETAIN-LEADING-SPACE}" ; done
eerror
fi
- eerror "If you need support, post the output of \`emerge --info '=$CATEGORY/$PF'\`,"
- eerror "the complete build log and the output of \`emerge -pqv '=$CATEGORY/$PF'\`."
- if [[ -n ${EBUILD_OVERLAY_ECLASSES} ]] ; then
- eerror "This ebuild used the following eclasses from overlays:"
- local x
- for x in ${EBUILD_OVERLAY_ECLASSES} ; do
- eerror " ${x}"
- done
- fi
- if [ "${EMERGE_FROM}" != "binary" ] && \
- ! has ${EBUILD_PHASE} prerm postrm && \
- [ "${EBUILD#${PORTDIR}/}" == "${EBUILD}" ] ; then
- local overlay=${EBUILD%/*}
- overlay=${overlay%/*}
- overlay=${overlay%/*}
- if [[ -n $PORTAGE_REPO_NAME ]] ; then
- eerror "This ebuild is from an overlay named" \
- "'$PORTAGE_REPO_NAME': '${overlay}/'"
- else
- eerror "This ebuild is from an overlay: '${overlay}/'"
- fi
- elif [[ -n $PORTAGE_REPO_NAME && -f "$PORTDIR"/profiles/repo_name ]] ; then
- local portdir_repo_name=$(<"$PORTDIR"/profiles/repo_name)
- if [[ -n $portdir_repo_name && \
- $portdir_repo_name != $PORTAGE_REPO_NAME ]] ; then
- eerror "This ebuild is from a repository" \
- "named '$PORTAGE_REPO_NAME'"
- fi
- fi
+ eerror "If you need support, post the output of \`emerge --info '=${CATEGORY}/${PF}::${PORTAGE_REPO_NAME}'\`,"
+ eerror "the complete build log and the output of \`emerge -pqv '=${CATEGORY}/${PF}::${PORTAGE_REPO_NAME}'\`."
# Only call die hooks here if we are executed via ebuild.sh or
# misc-functions.sh, since those are the only cases where the environment
- # contains the hook functions. When necessary (like for helpers_die), die
+ # contains the hook functions. When necessary (like for __helpers_die), die
# hooks are automatically called later by a misc-functions.sh invocation.
if has ${BASH_SOURCE[$main_index]##*/} ebuild.sh misc-functions.sh && \
[[ ${EBUILD_PHASE} != depend ]] ; then
@@ -218,7 +199,8 @@ die() {
if [[ -n ${PORTAGE_LOG_FILE} ]] ; then
eerror "The complete build log is located at '${PORTAGE_LOG_FILE}'."
- if [[ ${PORTAGE_LOG_FILE} != ${T}/* ]] ; then
+ if [[ ${PORTAGE_LOG_FILE} != ${T}/* ]] && \
+ ! has fail-clean ${FEATURES} ; then
# Display path to symlink in ${T}, as requested in bug #412865.
local log_ext=log
[[ ${PORTAGE_LOG_FILE} != *.log ]] && log_ext+=.${PORTAGE_LOG_FILE##*.}
@@ -241,26 +223,20 @@ die() {
[[ -n $PORTAGE_IPC_DAEMON ]] && "$PORTAGE_BIN_PATH"/ebuild-ipc exit 1
# subshell die support
- [[ $BASHPID = $EBUILD_MASTER_PID ]] || kill -s SIGTERM $EBUILD_MASTER_PID
+ [[ ${BASHPID:-$(__bashpid)} == ${EBUILD_MASTER_PID} ]] || kill -s SIGTERM ${EBUILD_MASTER_PID}
exit 1
}
-# We need to implement diefunc() since environment.bz2 files contain
-# calls to it (due to alias expansion).
-diefunc() {
- die "${@}"
-}
-
-quiet_mode() {
+__quiet_mode() {
[[ ${PORTAGE_QUIET} -eq 1 ]]
}
-vecho() {
- quiet_mode || echo "$@"
+__vecho() {
+ __quiet_mode || echo "$@"
}
# Internal logging function, don't use this in ebuilds
-elog_base() {
+__elog_base() {
local messagetype
[ -z "${1}" -o -z "${T}" -o ! -d "${T}/logging" ] && return 1
case "${1}" in
@@ -269,7 +245,7 @@ elog_base() {
shift
;;
*)
- vecho -e " ${BAD}*${NORMAL} Invalid use of internal function elog_base(), next message will not be logged"
+ __vecho -e " ${BAD}*${NORMAL} Invalid use of internal function __elog_base(), next message will not be logged"
return 1
;;
esac
@@ -281,17 +257,17 @@ elog_base() {
}
eqawarn() {
- elog_base QA "$*"
+ __elog_base QA "$*"
[[ ${RC_ENDCOL} != "yes" && ${LAST_E_CMD} == "ebegin" ]] && echo
echo -e "$@" | while read -r ; do
- vecho " $WARN*$NORMAL $REPLY" >&2
+ __vecho " $WARN*$NORMAL $REPLY" >&2
done
LAST_E_CMD="eqawarn"
return 0
}
elog() {
- elog_base LOG "$*"
+ __elog_base LOG "$*"
[[ ${RC_ENDCOL} != "yes" && ${LAST_E_CMD} == "ebegin" ]] && echo
echo -e "$@" | while read -r ; do
echo " $GOOD*$NORMAL $REPLY"
@@ -300,26 +276,8 @@ elog() {
return 0
}
-esyslog() {
- local pri=
- local tag=
-
- if [ -x /usr/bin/logger ]
- then
- pri="$1"
- tag="$2"
-
- shift 2
- [ -z "$*" ] && return 0
-
- /usr/bin/logger -p "${pri}" -t "${tag}" -- "$*"
- fi
-
- return 0
-}
-
einfo() {
- elog_base INFO "$*"
+ __elog_base INFO "$*"
[[ ${RC_ENDCOL} != "yes" && ${LAST_E_CMD} == "ebegin" ]] && echo
echo -e "$@" | while read -r ; do
echo " $GOOD*$NORMAL $REPLY"
@@ -329,7 +287,7 @@ einfo() {
}
einfon() {
- elog_base INFO "$*"
+ __elog_base INFO "$*"
[[ ${RC_ENDCOL} != "yes" && ${LAST_E_CMD} == "ebegin" ]] && echo
echo -ne " ${GOOD}*${NORMAL} $*"
LAST_E_CMD="einfon"
@@ -337,7 +295,7 @@ einfon() {
}
ewarn() {
- elog_base WARN "$*"
+ __elog_base WARN "$*"
[[ ${RC_ENDCOL} != "yes" && ${LAST_E_CMD} == "ebegin" ]] && echo
echo -e "$@" | while read -r ; do
echo " $WARN*$NORMAL $RC_INDENTATION$REPLY" >&2
@@ -347,7 +305,7 @@ ewarn() {
}
eerror() {
- elog_base ERROR "$*"
+ __elog_base ERROR "$*"
[[ ${RC_ENDCOL} != "yes" && ${LAST_E_CMD} == "ebegin" ]] && echo
echo -e "$@" | while read -r ; do
echo " $BAD*$NORMAL $RC_INDENTATION$REPLY" >&2
@@ -372,7 +330,7 @@ ebegin() {
return 0
}
-_eend() {
+__eend() {
local retval=${1:-0} efunc=${2:-eerror} msg
shift 2
@@ -399,13 +357,13 @@ eend() {
local retval=${1:-0}
shift
- _eend ${retval} eerror "$*"
+ __eend ${retval} eerror "$*"
LAST_E_CMD="eend"
return ${retval}
}
-unset_colors() {
+__unset_colors() {
COLS=80
ENDCOL=
@@ -417,7 +375,7 @@ unset_colors() {
BRACKET=
}
-set_colors() {
+__set_colors() {
COLS=${COLUMNS:-0} # bash's internal COLUMNS variable
# Avoid wasteful stty calls during the "depend" phases.
# If stdout is a pipe, the parent process can export COLUMNS
@@ -450,10 +408,10 @@ RC_DOT_PATTERN=''
case "${NOCOLOR:-false}" in
yes|true)
- unset_colors
+ __unset_colors
;;
no|false)
- set_colors
+ __set_colors
;;
esac
@@ -504,4 +462,24 @@ has() {
return 1
}
+__repo_attr() {
+ local appropriate_section=0 exit_status=1 line saved_extglob_shopt=$(shopt -p extglob)
+ shopt -s extglob
+ while read line; do
+ [[ ${appropriate_section} == 0 && ${line} == "[$1]" ]] && appropriate_section=1 && continue
+ [[ ${appropriate_section} == 1 && ${line} == "["*"]" ]] && appropriate_section=0 && continue
+ # If a conditional expression like [[ ${line} == $2*( )=* ]] is used
+ # then bash-3.2 produces an error like the following when the file is
+ # sourced: syntax error in conditional expression: unexpected token `('
+ # Therefore, use a regular expression for compatibility.
+ if [[ ${appropriate_section} == 1 && ${line} =~ ^${2}[[:space:]]*= ]]; then
+ echo "${line##$2*( )=*( )}"
+ exit_status=0
+ break
+ fi
+ done <<< "${PORTAGE_REPOSITORIES}"
+ eval "${saved_extglob_shopt}"
+ return ${exit_status}
+}
+
true
diff --git a/bin/lock-helper.py b/bin/lock-helper.py
index dfb887669..aa2dd60fa 100755
--- a/bin/lock-helper.py
+++ b/bin/lock-helper.py
@@ -1,11 +1,12 @@
-#!/usr/bin/python
-# Copyright 2010-2011 Gentoo Foundation
+#!/usr/bin/python -b
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import os
import sys
sys.path.insert(0, os.environ['PORTAGE_PYM_PATH'])
import portage
+portage._internal_caller = True
portage._disable_legacy_globals()
def main(args):
diff --git a/bin/misc-functions.sh b/bin/misc-functions.sh
index 9eec8bb69..5ccf7c224 100755
--- a/bin/misc-functions.sh
+++ b/bin/misc-functions.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
#
# Miscellaneous shell functions that make use of the ebuild env but don't need
@@ -17,8 +17,9 @@ shift $#
source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}/ebuild.sh"
install_symlink_html_docs() {
- [[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) local ED=${D} ;; esac
+ if ! ___eapi_has_prefix_variables; then
+ local ED=${D}
+ fi
cd "${ED}" || die "cd failed"
#symlink the html documentation (if DOC_SYMLINKS_DIR is set in make.conf)
if [ -n "${DOC_SYMLINKS_DIR}" ] ; then
@@ -30,10 +31,10 @@ install_symlink_html_docs() {
done
if [ -n "${mydocdir}" ] ; then
local mysympath
- if [ -z "${SLOT}" -o "${SLOT}" = "0" ] ; then
+ if [ -z "${SLOT}" -o "${SLOT%/*}" = "0" ] ; then
mysympath="${DOC_SYMLINKS_DIR}/${CATEGORY}/${PN}"
else
- mysympath="${DOC_SYMLINKS_DIR}/${CATEGORY}/${PN}-${SLOT}"
+ mysympath="${DOC_SYMLINKS_DIR}/${CATEGORY}/${PN}-${SLOT%/*}"
fi
einfo "Symlinking ${mysympath} to the HTML documentation"
dodir "${DOC_SYMLINKS_DIR}/${CATEGORY}"
@@ -43,7 +44,20 @@ install_symlink_html_docs() {
}
# replacement for "readlink -f" or "realpath"
+READLINK_F_WORKS=""
canonicalize() {
+ if [[ -z ${READLINK_F_WORKS} ]] ; then
+ if [[ $(readlink -f -- /../ 2>/dev/null) == "/" ]] ; then
+ READLINK_F_WORKS=true
+ else
+ READLINK_F_WORKS=false
+ fi
+ fi
+ if ${READLINK_F_WORKS} ; then
+ readlink -f -- "$@"
+ return
+ fi
+
local f=$1 b n=10 wd=$(pwd)
while (( n-- > 0 )); do
while [[ ${f: -1} = / && ${#f} -gt 1 ]]; do
@@ -66,8 +80,9 @@ canonicalize() {
prepcompress() {
local -a include exclude incl_d incl_f
local f g i real_f real_d
- [[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) local ED=${D} ;; esac
+ if ! ___eapi_has_prefix_variables; then
+ local ED=${D}
+ fi
# Canonicalize path names and check for their existence.
real_d=$(canonicalize "${ED}")
@@ -141,7 +156,7 @@ prepcompress() {
# Queue up for compression.
# ecompress{,dir} doesn't like to be called with empty argument lists.
- [[ ${#incl_d[@]} -gt 0 ]] && ecompressdir --queue "${incl_d[@]}"
+ [[ ${#incl_d[@]} -gt 0 ]] && ecompressdir --limit ${PORTAGE_DOCOMPRESS_SIZE_LIMIT:-0} --queue "${incl_d[@]}"
[[ ${#incl_f[@]} -gt 0 ]] && ecompress --queue "${incl_f[@]/#/${ED}}"
[[ ${#exclude[@]} -gt 0 ]] && ecompressdir --ignore "${exclude[@]}"
return 0
@@ -149,13 +164,12 @@ prepcompress() {
install_qa_check() {
local f i qa_var x
- [[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) local EPREFIX= ED=${D} ;; esac
+ if ! ___eapi_has_prefix_variables; then
+ local EPREFIX= ED=${D}
+ fi
cd "${ED}" || die "cd failed"
- # Merge QA_FLAGS_IGNORED and QA_DT_HASH into a single array, since
- # QA_DT_HASH is deprecated.
qa_var="QA_FLAGS_IGNORED_${ARCH/-/_}"
eval "[[ -n \${!qa_var} ]] && QA_FLAGS_IGNORED=(\"\${${qa_var}[@]}\")"
if [[ ${#QA_FLAGS_IGNORED[@]} -eq 1 ]] ; then
@@ -166,29 +180,6 @@ install_qa_check() {
set -${shopts}
fi
- qa_var="QA_DT_HASH_${ARCH/-/_}"
- eval "[[ -n \${!qa_var} ]] && QA_DT_HASH=(\"\${${qa_var}[@]}\")"
- if [[ ${#QA_DT_HASH[@]} -eq 1 ]] ; then
- local shopts=$-
- set -o noglob
- QA_DT_HASH=(${QA_DT_HASH})
- set +o noglob
- set -${shopts}
- fi
-
- if [[ -n ${QA_DT_HASH} ]] ; then
- QA_FLAGS_IGNORED=("${QA_FLAGS_IGNORED[@]}" "${QA_DT_HASH[@]}")
- unset QA_DT_HASH
- fi
-
- # Merge QA_STRICT_FLAGS_IGNORED and QA_STRICT_DT_HASH, since
- # QA_STRICT_DT_HASH is deprecated
- if [ "${QA_STRICT_FLAGS_IGNORED-unset}" = unset ] && \
- [ "${QA_STRICT_DT_HASH-unset}" != unset ] ; then
- QA_STRICT_FLAGS_IGNORED=1
- unset QA_STRICT_DT_HASH
- fi
-
# Check for files built without respecting *FLAGS. Note that
# -frecord-gcc-switches must be in all *FLAGS variables, in
# order to avoid false positive results here.
@@ -200,8 +191,7 @@ install_qa_check() {
[[ "${FFLAGS}" == *-frecord-gcc-switches* ]] && \
[[ "${FCFLAGS}" == *-frecord-gcc-switches* ]] ; then
rm -f "${T}"/scanelf-ignored-CFLAGS.log
- for x in $(scanelf -qyRF '%k %p' -k \!.GCC.command.line "${ED}" | \
- sed -e "s:\!.GCC.command.line ::") ; do
+ for x in $(scanelf -qyRF '#k%p' -k '!.GCC.command.line' "${ED}") ; do
# Separate out file types that are known to support
# .GCC.command.line sections, using the `file` command
# similar to how prepstrip uses it.
@@ -226,11 +216,11 @@ install_qa_check() {
-i "${T}"/scanelf-ignored-CFLAGS.log
f=$(<"${T}"/scanelf-ignored-CFLAGS.log)
if [[ -n ${f} ]] ; then
- vecho -ne '\n'
+ __vecho -ne '\n'
eqawarn "${BAD}QA Notice: Files built without respecting CFLAGS have been detected${NORMAL}"
eqawarn " Please include the following list of files in your report:"
eqawarn "${f}"
- vecho -ne '\n'
+ __vecho -ne '\n'
sleep 1
else
rm -f "${T}"/scanelf-ignored-CFLAGS.log
@@ -240,7 +230,7 @@ install_qa_check() {
export STRIP_MASK
prepall
- has "${EAPI}" 0 1 2 3 || prepcompress
+ ___eapi_has_docompress && prepcompress
ecompressdir --dequeue
ecompress --dequeue
@@ -251,32 +241,50 @@ install_qa_check() {
for x in etc/app-defaults usr/man usr/info usr/X11R6 usr/doc usr/locale ; do
[[ -d ${ED}/$x ]] && f+=" $x\n"
done
-
if [[ -n $f ]] ; then
eqawarn "QA Notice: This ebuild installs into the following deprecated directories:"
eqawarn
eqawarn "$f"
fi
- if [[ -d ${ED}/etc/udev/rules.d ]] ; then
- f=
- for x in $(ls "${ED}/etc/udev/rules.d") ; do
- f+=" etc/udev/rules.d/$x\n"
- done
- if [[ -n $f ]] ; then
- eqawarn "QA Notice: udev rules should be installed in /lib/udev/rules.d:"
- eqawarn
- eqawarn "$f"
+ # It's ok create these directories, but not to install into them. #493154
+ # TODO: We should add var/lib to this list.
+ f=
+ for x in var/cache var/lock var/run run ; do
+ if [[ ! -L ${ED}/${x} && -d ${ED}/${x} ]] ; then
+ if [[ -z $(find "${ED}/${x}" -prune -empty) ]] ; then
+ f+=$(cd "${ED}"; find "${x}" -printf ' %p\n')
+ fi
fi
+ done
+ if [[ -n ${f} ]] ; then
+ eqawarn "QA Notice: This ebuild installs into paths that should be created at runtime."
+ eqawarn " To fix, simply do not install into these directories. Instead, your package"
+ eqawarn " should create dirs on the fly at runtime as needed via init scripts/etc..."
+ eqawarn
+ eqawarn "${f}"
+ fi
+
+ set +f
+ f=
+ for x in "${ED}etc/udev/rules.d/"* "${ED}lib"*"/udev/rules.d/"* ; do
+ [[ -e ${x} ]] || continue
+ [[ ${x} == ${ED}lib/udev/rules.d/* ]] && continue
+ f+=" ${x#${ED}}\n"
+ done
+ if [[ -n $f ]] ; then
+ eqawarn "QA Notice: udev rules should be installed in /lib/udev/rules.d:"
+ eqawarn
+ eqawarn "$f"
fi
# Now we look for all world writable files.
local unsafe_files=$(find "${ED}" -type f -perm -2 | sed -e "s:^${ED}:- :")
if [[ -n ${unsafe_files} ]] ; then
- vecho "QA Security Notice: world writable file(s):"
- vecho "${unsafe_files}"
- vecho "- This may or may not be a security problem, most of the time it is one."
- vecho "- Please double check that $PF really needs a world writeable bit and file bugs accordingly."
+ __vecho "QA Security Notice: world writable file(s):"
+ __vecho "${unsafe_files}"
+ __vecho "- This may or may not be a security problem, most of the time it is one."
+ __vecho "- Please double check that $PF really needs a world writeable bit and file bugs accordingly."
sleep 1
fi
@@ -307,7 +315,7 @@ install_qa_check() {
for l in $(echo "${rpath_files}" | grep -E ":${dir}|::|: "); do
f+=" ${l%%:*}\n"
if ! has stricter ${FEATURES}; then
- vecho "Auto fixing rpaths for ${l%%:*}"
+ __vecho "Auto fixing rpaths for ${l%%:*}"
TMPDIR="${dir}" scanelf -BXr "${l%%:*}" -o /dev/null
fi
done
@@ -321,12 +329,12 @@ install_qa_check() {
# Print QA notice.
if [[ -n ${f}${x} ]] ; then
- vecho -ne '\n'
+ __vecho -ne '\n'
eqawarn "QA Notice: The following files contain insecure RUNPATHs"
eqawarn " Please file a bug about this at http://bugs.gentoo.org/"
eqawarn " with the maintaining herd of the package."
eqawarn "${f}${f:+${x:+\n}}${x}"
- vecho -ne '\n'
+ __vecho -ne '\n'
if [[ -n ${x} ]] || has stricter ${FEATURES} ; then
insecure_rpath=1
fi
@@ -344,7 +352,7 @@ install_qa_check() {
f=$(scanelf -qyRF '%t %p' "${ED}" | grep -v 'usr/lib/debug/')
if [[ -n ${f} ]] ; then
scanelf -qyRAF '%T %p' "${PORTAGE_BUILDDIR}"/ &> "${T}"/scanelf-textrel.log
- vecho -ne '\n'
+ __vecho -ne '\n'
eqawarn "QA Notice: The following files contain runtime text relocations"
eqawarn " Text relocations force the dynamic linker to perform extra"
eqawarn " work at startup, waste system resources, and may pose a security"
@@ -353,7 +361,7 @@ install_qa_check() {
eqawarn " For more information, see http://hardened.gentoo.org/pic-fix-guide.xml"
eqawarn " Please include the following list of files in your report:"
eqawarn "${f}"
- vecho -ne '\n'
+ __vecho -ne '\n'
die_msg="${die_msg} textrels,"
sleep 1
fi
@@ -364,7 +372,7 @@ install_qa_check() {
*-linux-gnu*)
# Check for files with executable stacks, but only on arches which
# are supported at the moment. Keep this list in sync with
- # http://hardened.gentoo.org/gnu-stack.xml (Arch Status)
+ # http://www.gentoo.org/proj/en/hardened/gnu-stack.xml (Arch Status)
case ${CTARGET:-${CHOST}} in
arm*|i?86*|ia64*|m68k*|s390*|sh*|x86_64*)
# Allow devs to mark things as ignorable ... e.g. things
@@ -389,7 +397,7 @@ install_qa_check() {
if [[ -n ${f} ]] ; then
# One more pass to help devs track down the source
scanelf -qyRAF '%e %p' "${PORTAGE_BUILDDIR}"/ &> "${T}"/scanelf-execstack.log
- vecho -ne '\n'
+ __vecho -ne '\n'
eqawarn "QA Notice: The following files contain writable and executable sections"
eqawarn " Files with such sections will not work properly (or at all!) on some"
eqawarn " architectures/operating systems. A bug should be filed at"
@@ -399,15 +407,15 @@ install_qa_check() {
eqawarn " Note: Bugs should be filed for the respective maintainers"
eqawarn " of the package in question and not hardened@g.o."
eqawarn "${f}"
- vecho -ne '\n'
+ __vecho -ne '\n'
die_msg="${die_msg} execstacks"
sleep 1
fi
# Check for files built without respecting LDFLAGS
if [[ "${LDFLAGS}" == *,--hash-style=gnu* ]] && \
- ! has binchecks ${RESTRICT} ; then
- f=$(scanelf -qyRF '%k %p' -k .hash "${ED}" | sed -e "s:\.hash ::")
+ ! has binchecks ${RESTRICT} ; then
+ f=$(scanelf -qyRF '#k%p' -k .hash "${ED}")
if [[ -n ${f} ]] ; then
echo "${f}" > "${T}"/scanelf-ignored-LDFLAGS.log
if [ "${QA_STRICT_FLAGS_IGNORED-unset}" = unset ] ; then
@@ -421,11 +429,11 @@ install_qa_check() {
-i "${T}"/scanelf-ignored-LDFLAGS.log
f=$(<"${T}"/scanelf-ignored-LDFLAGS.log)
if [[ -n ${f} ]] ; then
- vecho -ne '\n'
+ __vecho -ne '\n'
eqawarn "${BAD}QA Notice: Files built without respecting LDFLAGS have been detected${NORMAL}"
eqawarn " Please include the following list of files in your report:"
eqawarn "${f}"
- vecho -ne '\n'
+ __vecho -ne '\n'
sleep 1
else
rm -f "${T}"/scanelf-ignored-LDFLAGS.log
@@ -442,7 +450,7 @@ install_qa_check() {
# Check for shared libraries lacking SONAMEs
qa_var="QA_SONAME_${ARCH/-/_}"
eval "[[ -n \${!qa_var} ]] && QA_SONAME=(\"\${${qa_var}[@]}\")"
- f=$(scanelf -ByF '%S %p' "${ED}"{,usr/}lib*/lib*.so* | gawk '$2 == "" { print }' | sed -e "s:^[[:space:]]${ED}:/:")
+ f=$(scanelf -ByF '%S %p' "${ED}"{,usr/}lib*/lib*.so* | awk '$2 == "" { print }' | sed -e "s:^[[:space:]]${ED}:/:")
if [[ -n ${f} ]] ; then
echo "${f}" > "${T}"/scanelf-missing-SONAME.log
if [[ "${QA_STRICT_SONAME-unset}" == unset ]] ; then
@@ -463,10 +471,10 @@ install_qa_check() {
sed -e "/^\$/d" -i "${T}"/scanelf-missing-SONAME.log
f=$(<"${T}"/scanelf-missing-SONAME.log)
if [[ -n ${f} ]] ; then
- vecho -ne '\n'
+ __vecho -ne '\n'
eqawarn "QA Notice: The following shared libraries lack a SONAME"
eqawarn "${f}"
- vecho -ne '\n'
+ __vecho -ne '\n'
sleep 1
else
rm -f "${T}"/scanelf-missing-SONAME.log
@@ -476,7 +484,7 @@ install_qa_check() {
# Check for shared libraries lacking NEEDED entries
qa_var="QA_DT_NEEDED_${ARCH/-/_}"
eval "[[ -n \${!qa_var} ]] && QA_DT_NEEDED=(\"\${${qa_var}[@]}\")"
- f=$(scanelf -ByF '%n %p' "${ED}"{,usr/}lib*/lib*.so* | gawk '$2 == "" { print }' | sed -e "s:^[[:space:]]${ED}:/:")
+ f=$(scanelf -ByF '%n %p' "${ED}"{,usr/}lib*/lib*.so* | awk '$2 == "" { print }' | sed -e "s:^[[:space:]]${ED}:/:")
if [[ -n ${f} ]] ; then
echo "${f}" > "${T}"/scanelf-missing-NEEDED.log
if [[ "${QA_STRICT_DT_NEEDED-unset}" == unset ]] ; then
@@ -497,10 +505,10 @@ install_qa_check() {
sed -e "/^\$/d" -i "${T}"/scanelf-missing-NEEDED.log
f=$(<"${T}"/scanelf-missing-NEEDED.log)
if [[ -n ${f} ]] ; then
- vecho -ne '\n'
+ __vecho -ne '\n'
eqawarn "QA Notice: The following shared libraries lack NEEDED entries"
eqawarn "${f}"
- vecho -ne '\n'
+ __vecho -ne '\n'
sleep 1
else
rm -f "${T}"/scanelf-missing-NEEDED.log
@@ -545,14 +553,13 @@ install_qa_check() {
die "Unsafe files found in \${D}. Portage will not install them."
fi
- if [[ -d ${D}/${D} ]] ; then
- declare -i INSTALLTOD=0
- for i in $(find "${D}/${D}/"); do
- eqawarn "QA Notice: /${i##${D}/${D}} installed in \${D}/\${D}"
+ if [[ -d ${D%/}${D} ]] ; then
+ local -i INSTALLTOD=0
+ while read -r -d $'\0' i ; do
+ eqawarn "QA Notice: /${i##${D%/}${D}} installed in \${D}/\${D}"
((INSTALLTOD++))
- done
- die "Aborting due to QA concerns: ${INSTALLTOD} files installed in ${D}/${D}"
- unset INSTALLTOD
+ done < <(find "${D%/}${D}" -print0)
+ die "Aborting due to QA concerns: ${INSTALLTOD} files installed in ${D%/}${D}"
fi
# Sanity check syntax errors in init.d scripts
@@ -563,10 +570,31 @@ install_qa_check() {
[[ -L ${i} ]] && continue
# if empty conf.d/init.d dir exists (baselayout), then i will be "/etc/conf.d/*" and not exist
[[ ! -e ${i} ]] && continue
+ if [[ ${d} == /etc/init.d && ${i} != *.sh ]] ; then
+ # skip non-shell-script for bug #451386
+ [[ $(head -n1 "${i}") =~ ^#!.*[[:space:]/](runscript|sh)$ ]] || continue
+ fi
bash -n "${i}" || die "The init.d file has syntax errors: ${i}"
done
done
+ local checkbashisms=$(type -P checkbashisms)
+ if [[ -n ${checkbashisms} ]] ; then
+ for d in /etc/init.d ; do
+ [[ -d ${ED}${d} ]] || continue
+ for i in "${ED}${d}"/* ; do
+ [[ -e ${i} ]] || continue
+ [[ -L ${i} ]] && continue
+ f=$("${checkbashisms}" -f "${i}" 2>&1)
+ [[ $? != 0 && -n ${f} ]] || continue
+ eqawarn "QA Notice: shell script appears to use non-POSIX feature(s):"
+ while read -r ;
+ do eqawarn " ${REPLY}"
+ done <<< "${f//${ED}}"
+ done
+ done
+ fi
+
# Look for leaking LDFLAGS into pkg-config files
f=$(egrep -sH '^Libs.*-Wl,(-O[012]|--hash-style)' "${ED}"/usr/*/pkgconfig/*.pc)
if [[ -n ${f} ]] ; then
@@ -577,17 +605,16 @@ install_qa_check() {
# this should help to ensure that all (most?) shared libraries are executable
# and that all libtool scripts / static libraries are not executable
local j
- for i in "${ED}"opt/*/lib{,32,64} \
- "${ED}"lib{,32,64} \
- "${ED}"usr/lib{,32,64} \
- "${ED}"usr/X11R6/lib{,32,64} ; do
+ for i in "${ED}"opt/*/lib* \
+ "${ED}"lib* \
+ "${ED}"usr/lib* ; do
[[ ! -d ${i} ]] && continue
for j in "${i}"/*.so.* "${i}"/*.so ; do
[[ ! -e ${j} ]] && continue
[[ -L ${j} ]] && continue
[[ -x ${j} ]] && continue
- vecho "making executable: ${j#${ED}}"
+ __vecho "making executable: ${j#${ED}}"
chmod +x "${j}"
done
@@ -595,7 +622,7 @@ install_qa_check() {
[[ ! -e ${j} ]] && continue
[[ -L ${j} ]] && continue
[[ ! -x ${j} ]] && continue
- vecho "removing executable bit: ${j#${ED}}"
+ __vecho "removing executable bit: ${j#${ED}}"
chmod -x "${j}"
done
@@ -604,7 +631,7 @@ install_qa_check() {
[[ ! -L ${j} ]] && continue
linkdest=$(readlink "${j}")
if [[ ${linkdest} == /* ]] ; then
- vecho -ne '\n'
+ __vecho -ne '\n'
eqawarn "QA Notice: Found an absolute symlink in a library directory:"
eqawarn " ${j#${D}} -> ${linkdest}"
eqawarn " It should be a relative symlink if in the same directory"
@@ -613,8 +640,8 @@ install_qa_check() {
done
done
- # When installing static libraries into /usr/lib and shared libraries into
- # /lib, we have to make sure we have a linker script in /usr/lib along side
+ # When installing static libraries into /usr/lib and shared libraries into
+ # /lib, we have to make sure we have a linker script in /usr/lib along side
# the static library, or gcc will utilize the static lib when linking :(.
# http://bugs.gentoo.org/4411
abort="no"
@@ -624,7 +651,7 @@ install_qa_check() {
if [[ ! -e ${s} ]] ; then
s=${s%usr/*}${s##*/usr/}
if [[ -e ${s} ]] ; then
- vecho -ne '\n'
+ __vecho -ne '\n'
eqawarn "QA Notice: Missing gen_usr_ldscript for ${s##*/}"
abort="yes"
fi
@@ -635,11 +662,11 @@ install_qa_check() {
# Make sure people don't store libtool files or static libs in /lib
f=$(ls "${ED}"lib*/*.{a,la} 2>/dev/null)
if [[ -n ${f} ]] ; then
- vecho -ne '\n'
+ __vecho -ne '\n'
eqawarn "QA Notice: Excessive files found in the / partition"
eqawarn "${f}"
- vecho -ne '\n'
- die "static archives (*.a) and libtool library files (*.la) do not belong in /"
+ __vecho -ne '\n'
+ die "static archives (*.a) and libtool library files (*.la) belong in /usr/lib*, not /lib*"
fi
# Verify that the libtool files don't contain bogus $D entries.
@@ -647,7 +674,7 @@ install_qa_check() {
for a in "${ED}"usr/lib*/*.la ; do
s=${a##*/}
if grep -qs "${ED}" "${a}" ; then
- vecho -ne '\n'
+ __vecho -ne '\n'
eqawarn "QA Notice: ${s} appears to contain PORTAGE_TMPDIR paths"
abort="yes"
fi
@@ -688,6 +715,8 @@ install_qa_check() {
": warning: reference to local variable .* returned"
": warning: returning reference to temporary"
": warning: function returns address of local variable"
+ ": warning: .*\\[-Wsizeof-pointer-memaccess\\]"
+ ": warning: .*\\[-Waggressive-loop-optimizations\\]"
# this may be valid code :/
#": warning: multi-character character constant"
# need to check these two ...
@@ -726,18 +755,19 @@ install_qa_check() {
eerror " with the maintaining herd of the package."
eerror
else
- vecho -ne '\n'
+ __vecho -ne '\n'
eqawarn "QA Notice: Package triggers severe warnings which indicate that it"
eqawarn " may exhibit random runtime failures."
eqawarn "${f}"
- vecho -ne '\n'
+ __vecho -ne '\n'
fi
fi
done
local cat_cmd=cat
[[ $PORTAGE_LOG_FILE = *.gz ]] && cat_cmd=zcat
[[ $reset_debug = 1 ]] && set -x
- f=$($cat_cmd "${PORTAGE_LOG_FILE}" | \
+ # Use safe cwd, avoiding unsafe import for bug #469338.
+ f=$(cd "${PORTAGE_PYM_PATH}" ; $cat_cmd "${PORTAGE_LOG_FILE}" | \
"${PORTAGE_PYTHON:-/usr/bin/python}" "$PORTAGE_BIN_PATH"/check-implicit-pointer-usage.py || die "check-implicit-pointer-usage.py failed")
if [[ -n ${f} ]] ; then
@@ -763,11 +793,11 @@ install_qa_check() {
eerror " with the maintaining herd of the package."
eerror
else
- vecho -ne '\n'
+ __vecho -ne '\n'
eqawarn "QA Notice: Package triggers severe warnings which indicate that it"
eqawarn " will almost certainly crash on 64bit architectures."
eqawarn "${f}"
- vecho -ne '\n'
+ __vecho -ne '\n'
fi
fi
@@ -793,32 +823,42 @@ install_qa_check() {
[[ -x /usr/bin/file && -x /usr/bin/find ]] && \
[[ -n ${MULTILIB_STRICT_DIRS} && -n ${MULTILIB_STRICT_DENY} ]]
then
- local abort=no dir file firstrun=yes
+ rm -f "${T}/multilib-strict.log"
+ local abort=no dir file
MULTILIB_STRICT_EXEMPT=$(echo ${MULTILIB_STRICT_EXEMPT} | sed -e 's:\([(|)]\):\\\1:g')
for dir in ${MULTILIB_STRICT_DIRS} ; do
[[ -d ${ED}/${dir} ]] || continue
for file in $(find ${ED}/${dir} -type f | grep -v "^${ED}/${dir}/${MULTILIB_STRICT_EXEMPT}"); do
if file ${file} | egrep -q "${MULTILIB_STRICT_DENY}" ; then
- if [[ ${firstrun} == yes ]] ; then
- echo "Files matching a file type that is not allowed:"
- firstrun=no
- fi
- abort=yes
- echo " ${file#${ED}//}"
+ echo "${file#${ED}//}" >> "${T}/multilib-strict.log"
fi
done
done
- [[ ${abort} == yes ]] && die "multilib-strict check failed!"
- fi
- # ensure packages don't install systemd units automagically
- if ! has systemd ${INHERITED} && \
- [[ -d "${ED}"/lib/systemd/system ]]
- then
- eqawarn "QA Notice: package installs systemd unit files (/lib/systemd/system)"
- eqawarn " but does not inherit systemd.eclass."
- has stricter ${FEATURES} \
- && die "install aborted due to missing inherit of systemd.eclass"
+ if [[ -s ${T}/multilib-strict.log ]] ; then
+ if [[ ${#QA_MULTILIB_PATHS[@]} -eq 1 ]] ; then
+ local shopts=$-
+ set -o noglob
+ QA_MULTILIB_PATHS=(${QA_MULTILIB_PATHS})
+ set +o noglob
+ set -${shopts}
+ fi
+ if [ "${QA_STRICT_MULTILIB_PATHS-unset}" = unset ] ; then
+ for x in "${QA_MULTILIB_PATHS[@]}" ; do
+ sed -e "s#^${x#/}\$##" -i "${T}/multilib-strict.log"
+ done
+ sed -e "/^\$/d" -i "${T}/multilib-strict.log"
+ fi
+ if [[ -s ${T}/multilib-strict.log ]] ; then
+ abort=yes
+ echo "Files matching a file type that is not allowed:"
+ while read -r ; do
+ echo " ${REPLY}"
+ done < "${T}/multilib-strict.log"
+ fi
+ fi
+
+ [[ ${abort} == yes ]] && die "multilib-strict check failed!"
fi
}
@@ -851,16 +891,6 @@ install_qa_check_prefix() {
# all further checks rely on ${ED} existing
[[ -d ${ED} ]] || return
- # this does not really belong here, but it's closely tied to
- # the code below; many runscripts generate positives here, and we
- # know they don't work (bug #196294) so as long as that one
- # remains an issue, simply remove them as they won't work
- # anyway, avoid etc/init.d/functions.sh from being thrown away
- if [[ ( -d "${ED}"/etc/conf.d || -d "${ED}"/etc/init.d ) && ! -f "${ED}"/etc/init.d/functions.sh ]] ; then
- ewarn "removed /etc/init.d and /etc/conf.d directories until bug #196294 has been resolved"
- rm -Rf "${ED}"/etc/{conf,init}.d
- fi
-
# check shebangs, bug #282539
rm -f "${T}"/non-prefix-shebangs-errs
local WHITELIST=" /usr/bin/env "
@@ -952,7 +982,7 @@ install_mask() {
local no_inst
for no_inst in ${install_mask}; do
set +o noglob
- quiet_mode || einfo "Removing ${no_inst}"
+ __quiet_mode || einfo "Removing ${no_inst}"
# normal stuff
rm -Rf "${root}"/${no_inst} >&/dev/null
@@ -971,8 +1001,9 @@ preinst_mask() {
return 1
fi
- [[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) local ED=${D} ;; esac
+ if ! ___eapi_has_prefix_variables; then
+ local ED=${D}
+ fi
# Make sure $PWD is not ${D} so that we don't leave gmon.out files
# in there in case any tools were built with -pg in CFLAGS.
@@ -1000,8 +1031,9 @@ preinst_sfperms() {
return 1
fi
- [[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) local ED=${D} ;; esac
+ if ! ___eapi_has_prefix_variables; then
+ local ED=${D}
+ fi
# Smart FileSystem Permissions
if has sfperms $FEATURES; then
@@ -1039,8 +1071,9 @@ preinst_suid_scan() {
return 1
fi
- [[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) local ED=${D} ;; esac
+ if ! ___eapi_has_prefix_variables; then
+ local ED=${D}
+ fi
# total suid control.
if has suidctl $FEATURES; then
@@ -1050,19 +1083,19 @@ preinst_suid_scan() {
# to files outside of the sandbox, but this
# can easly be bypassed using the addwrite() function
addwrite "${sfconf}"
- vecho ">>> Performing suid scan in ${ED}"
+ __vecho ">>> Performing suid scan in ${ED}"
for i in $(find "${ED}" -type f \( -perm -4000 -o -perm -2000 \) ); do
if [ -s "${sfconf}" ]; then
install_path=/${i#${ED}}
if grep -q "^${install_path}\$" "${sfconf}" ; then
- vecho "- ${install_path} is an approved suid file"
+ __vecho "- ${install_path} is an approved suid file"
else
- vecho ">>> Removing sbit on non registered ${install_path}"
+ __vecho ">>> Removing sbit on non registered ${install_path}"
for x in 5 4 3 2 1 0; do sleep 0.25 ; done
ls_ret=$(ls -ldh "${i}")
chmod ugo-s "${i}"
grep "^#${install_path}$" "${sfconf}" > /dev/null || {
- vecho ">>> Appending commented out entry to ${sfconf} for ${PF}"
+ __vecho ">>> Appending commented out entry to ${sfconf} for ${PF}"
echo "## ${ls_ret%${ED}*}${install_path}" >> "${sfconf}"
echo "#${install_path}" >> "${sfconf}"
# no delwrite() eh?
@@ -1070,7 +1103,7 @@ preinst_suid_scan() {
}
fi
else
- vecho "suidctl feature set but you are lacking a ${sfconf}"
+ __vecho "suidctl feature set but you are lacking a ${sfconf}"
fi
done
fi
@@ -1082,34 +1115,35 @@ preinst_selinux_labels() {
return 1
fi
if has selinux ${FEATURES}; then
- # SELinux file labeling (needs to always be last in dyn_preinst)
+ # SELinux file labeling (needs to execute after preinst)
# only attempt to label if setfiles is executable
# and 'context' is available on selinuxfs.
if [ -f /selinux/context -o -f /sys/fs/selinux/context ] && \
[ -x /usr/sbin/setfiles -a -x /usr/sbin/selinuxconfig ]; then
- vecho ">>> Setting SELinux security labels"
+ __vecho ">>> Setting SELinux security labels"
(
eval "$(/usr/sbin/selinuxconfig)" || \
die "Failed to determine SELinux policy paths.";
-
+
addwrite /selinux/context
addwrite /sys/fs/selinux/context
-
+
/usr/sbin/setfiles "${file_contexts_path}" -r "${D}" "${D}"
) || die "Failed to set SELinux security labels."
else
# nonfatal, since merging can happen outside a SE kernel
# like during a recovery situation
- vecho "!!! Unable to set SELinux security labels"
+ __vecho "!!! Unable to set SELinux security labels"
fi
fi
}
-dyn_package() {
+__dyn_package() {
local PROOT
- [[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) local EPREFIX= ED=${D} ;; esac
+ if ! ___eapi_has_prefix_variables; then
+ local EPREFIX= ED=${D}
+ fi
# Make sure $PWD is not ${D} so that we don't leave gmon.out files
# in there in case any tools were built with -pg in CFLAGS.
@@ -1132,6 +1166,7 @@ dyn_package() {
local tar_options=""
[[ $PORTAGE_VERBOSE = 1 ]] && tar_options+=" -v"
+ has xattr ${FEATURES} && [[ $(tar --help 2> /dev/null) == *--xattrs* ]] && tar_options+=" --xattrs"
# Sandbox is disabled in case the user wants to use a symlink
# for $PKGDIR and/or $PKGDIR/All.
export SANDBOX_ON="0"
@@ -1141,7 +1176,7 @@ dyn_package() {
tar $tar_options -cf - $PORTAGE_BINPKG_TAR_OPTS -C "${PROOT}" . | \
$PORTAGE_BZIP2_COMMAND -c > "$PORTAGE_BINPKG_TMPFILE"
assert "failed to pack binary package: '$PORTAGE_BINPKG_TMPFILE'"
- PYTHONPATH=${PORTAGE_PYM_PATH}${PYTHONPATH:+:}${PYTHONPATH} \
+ PYTHONPATH=${PORTAGE_PYTHONPATH:-${PORTAGE_PYM_PATH}} \
"${PORTAGE_PYTHON:-/usr/bin/python}" "$PORTAGE_BIN_PATH"/xpak-helper.py recompose \
"$PORTAGE_BINPKG_TMPFILE" "$PORTAGE_BUILDDIR/build-info"
if [ $? -ne 0 ]; then
@@ -1158,7 +1193,7 @@ dyn_package() {
fi
[ -n "${md5_hash}" ] && \
echo ${md5_hash} > "${PORTAGE_BUILDDIR}"/build-info/BINPKGMD5
- vecho ">>> Done."
+ __vecho ">>> Done."
# cleanup our temp tree
[[ -n ${PKG_INSTALL_MASK} ]] && rm -rf "${PROOT}"
@@ -1167,8 +1202,8 @@ dyn_package() {
die "Failed to create $PORTAGE_BUILDDIR/.packaged"
}
-dyn_spec() {
- local sources_dir=/usr/src/rpm/SOURCES
+__dyn_spec() {
+ local sources_dir=${T}/rpmbuild/SOURCES
mkdir -p "${sources_dir}"
declare -a tar_args=("${EBUILD}")
[[ -d ${FILESDIR} ]] && tar_args=("${EBUILD}" "${FILESDIR}")
@@ -1181,10 +1216,9 @@ Summary: ${DESCRIPTION}
Name: ${PN}
Version: ${PV}
Release: ${PR}
-Copyright: GPL
+License: GPL
Group: portage/${CATEGORY}
Source: ${PF}.tar.gz
-Buildroot: ${D}
%description
${DESCRIPTION}
@@ -1205,18 +1239,18 @@ __END1__
}
-dyn_rpm() {
-
- [[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) local EPREFIX= ;; esac
+__dyn_rpm() {
+ if ! ___eapi_has_prefix_variables; then
+ local EPREFIX=
+ fi
cd "${T}" || die "cd failed"
- local machine_name=$(uname -m)
- local dest_dir=${EPREFIX}/usr/src/rpm/RPMS/${machine_name}
- addwrite ${EPREFIX}/usr/src/rpm
+ local machine_name=${CHOST%%-*}
+ local dest_dir=${T}/rpmbuild/RPMS/${machine_name}
addwrite "${RPMDIR}"
- dyn_spec
- rpmbuild -bb --clean --rmsource "${PF}.spec" || die "Failed to integrate rpm spec file"
+ __dyn_spec
+ HOME=${T} \
+ rpmbuild -bb --clean --nodeps --rmsource "${PF}.spec" --buildroot "${D}" --target "${CHOST}" || die "Failed to integrate rpm spec file"
install -D "${dest_dir}/${PN}-${PV}-${PR}.${machine_name}.rpm" \
"${RPMDIR}/${CATEGORY}/${PN}-${PV}-${PR}.rpm" || \
die "Failed to move rpm"
@@ -1254,7 +1288,7 @@ install_hooks() {
}
if [ -n "${MISC_FUNCTIONS_ARGS}" ]; then
- source_all_bashrcs
+ __source_all_bashrcs
[ "$PORTAGE_DEBUG" == "1" ] && set -x
for x in ${MISC_FUNCTIONS_ARGS}; do
${x}
diff --git a/bin/phase-functions.sh b/bin/phase-functions.sh
index ce251ceb9..f39a024a2 100644
--- a/bin/phase-functions.sh
+++ b/bin/phase-functions.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# Hardcoded bash lists are needed for backward compatibility with
@@ -8,28 +8,31 @@
# when portage is upgrading itself.
PORTAGE_READONLY_METADATA="DEFINED_PHASES DEPEND DESCRIPTION
- EAPI HOMEPAGE INHERITED IUSE REQUIRED_USE KEYWORDS LICENSE
+ EAPI HDEPEND HOMEPAGE INHERITED IUSE REQUIRED_USE KEYWORDS LICENSE
PDEPEND PROVIDE RDEPEND REPOSITORY RESTRICT SLOT SRC_URI"
-PORTAGE_READONLY_VARS="D EBUILD EBUILD_PHASE \
+PORTAGE_READONLY_VARS="D EBUILD EBUILD_PHASE EBUILD_PHASE_FUNC \
EBUILD_SH_ARGS ECLASSDIR EMERGE_FROM FILESDIR MERGE_TYPE \
PM_EBUILD_HOOK_DIR \
PORTAGE_ACTUAL_DISTDIR PORTAGE_ARCHLIST PORTAGE_BASHRC \
PORTAGE_BINPKG_FILE PORTAGE_BINPKG_TAR_OPTS PORTAGE_BINPKG_TMPFILE \
- PORTAGE_BIN_PATH PORTAGE_BUILDDIR PORTAGE_BUNZIP2_COMMAND \
+ PORTAGE_BIN_PATH PORTAGE_BUILDDIR PORTAGE_BUILD_GROUP \
+ PORTAGE_BUILD_USER PORTAGE_BUNZIP2_COMMAND \
PORTAGE_BZIP2_COMMAND PORTAGE_COLORMAP PORTAGE_CONFIGROOT \
PORTAGE_DEBUG PORTAGE_DEPCACHEDIR PORTAGE_EBUILD_EXIT_FILE \
+ PORTAGE_ECLASS_LOCATIONS \
PORTAGE_GID PORTAGE_GRPNAME PORTAGE_INST_GID PORTAGE_INST_UID \
- PORTAGE_IPC_DAEMON PORTAGE_IUSE PORTAGE_LOG_FILE \
+ PORTAGE_INTERNAL_CALLER PORTAGE_IPC_DAEMON PORTAGE_IUSE PORTAGE_LOG_FILE \
PORTAGE_MUTABLE_FILTERED_VARS PORTAGE_OVERRIDE_EPREFIX \
- PORTAGE_PYM_PATH PORTAGE_PYTHON \
+ PORTAGE_PYM_PATH PORTAGE_PYTHON PORTAGE_PYTHONPATH \
PORTAGE_READONLY_METADATA PORTAGE_READONLY_VARS \
- PORTAGE_REPO_NAME PORTAGE_RESTRICT \
+ PORTAGE_REPO_NAME PORTAGE_REPOSITORIES PORTAGE_RESTRICT \
PORTAGE_SAVED_READONLY_VARS PORTAGE_SIGPIPE_STATUS \
PORTAGE_TMPDIR PORTAGE_UPDATE_ENV PORTAGE_USERNAME \
- PORTAGE_VERBOSE PORTAGE_WORKDIR_MODE PORTDIR PORTDIR_OVERLAY \
+ PORTAGE_VERBOSE PORTAGE_WORKDIR_MODE PORTAGE_XATTR_EXCLUDE \
+ PORTDIR \
PROFILE_PATHS REPLACING_VERSIONS REPLACED_BY_VERSION T WORKDIR \
- __PORTAGE_TEST_HARDLINK_LOCKS"
+ __PORTAGE_HELPER __PORTAGE_TEST_HARDLINK_LOCKS"
PORTAGE_SAVED_READONLY_VARS="A CATEGORY P PF PN PR PV PVR"
@@ -39,7 +42,7 @@ PORTAGE_SAVED_READONLY_VARS="A CATEGORY P PF PN PR PV PVR"
# it is saved or loaded (any mutations do not persist).
PORTAGE_MUTABLE_FILTERED_VARS="AA HOSTNAME"
-# @FUNCTION: filter_readonly_variables
+# @FUNCTION: __filter_readonly_variables
# @DESCRIPTION: [--filter-sandbox] [--allow-extra-vars]
# Read an environment from stdin and echo to stdout while filtering variables
# with names that are known to cause interference:
@@ -81,14 +84,14 @@ PORTAGE_MUTABLE_FILTERED_VARS="AA HOSTNAME"
# readonly variable cause the shell to exit while executing the "source"
# builtin command. To avoid this problem, this function filters those
# variables out and discards them. See bug #190128.
-filter_readonly_variables() {
+__filter_readonly_variables() {
local x filtered_vars
local readonly_bash_vars="BASHOPTS BASHPID DIRSTACK EUID
FUNCNAME GROUPS PIPESTATUS PPID SHELLOPTS UID"
local bash_misc_vars="BASH BASH_.* COLUMNS COMP_WORDBREAKS HISTCMD
HISTFILE HOSTNAME HOSTTYPE IFS LINENO MACHTYPE OLDPWD
OPTERR OPTIND OSTYPE POSIXLY_CORRECT PS4 PWD RANDOM
- SECONDS SHELL SHLVL _"
+ SECONDS SHLVL _"
local filtered_sandbox_vars="SANDBOX_ACTIVE SANDBOX_BASHRC
SANDBOX_DEBUG_LOG SANDBOX_DISABLED SANDBOX_LIB
SANDBOX_LOG SANDBOX_ON"
@@ -100,15 +103,9 @@ filter_readonly_variables() {
# Don't filter/interfere with prefix variables unless they are
# supported by the current EAPI.
- case "${EAPI:-0}" in
- 0|1|2)
- [[ " ${FEATURES} " == *" force-prefix "* ]] && \
- filtered_vars+=" ED EPREFIX EROOT"
- ;;
- *)
- filtered_vars+=" ED EPREFIX EROOT"
- ;;
- esac
+ if ___eapi_has_prefix_variables; then
+ filtered_vars+=" ED EPREFIX EROOT"
+ fi
if has --filter-sandbox $* ; then
filtered_vars="${filtered_vars} SANDBOX_.*"
@@ -140,14 +137,14 @@ filter_readonly_variables() {
"${PORTAGE_PYTHON:-/usr/bin/python}" "${PORTAGE_BIN_PATH}"/filter-bash-environment.py "${filtered_vars}" || die "filter-bash-environment.py failed"
}
-# @FUNCTION: preprocess_ebuild_env
+# @FUNCTION: __preprocess_ebuild_env
# @DESCRIPTION:
# Filter any readonly variables from ${T}/environment, source it, and then
-# save it via save_ebuild_env(). This process should be sufficient to prevent
+# save it via __save_ebuild_env(). This process should be sufficient to prevent
# any stale variables or functions from an arbitrary environment from
# interfering with the current environment. This is useful when an existing
# environment needs to be loaded from a binary or installed package.
-preprocess_ebuild_env() {
+__preprocess_ebuild_env() {
local _portage_filter_opts="--filter-features --filter-locale --filter-path --filter-sandbox"
# If environment.raw is present, this is a signal from the python side,
@@ -156,7 +153,7 @@ preprocess_ebuild_env() {
# Otherwise, we don't need to filter the environment.
[ -f "${T}/environment.raw" ] || return 0
- filter_readonly_variables $_portage_filter_opts < "${T}"/environment \
+ __filter_readonly_variables $_portage_filter_opts < "${T}"/environment \
>> "$T/environment.filtered" || return $?
unset _portage_filter_opts
mv "${T}"/environment.filtered "${T}"/environment || return $?
@@ -174,20 +171,20 @@ preprocess_ebuild_env() {
# until we've merged them with our current values.
export SANDBOX_ON=0
- # It's remotely possible that save_ebuild_env() has been overridden
+ # It's remotely possible that __save_ebuild_env() has been overridden
# by the above source command. To protect ourselves, we override it
# here with our own version. ${PORTAGE_BIN_PATH} is safe to use here
# because it's already filtered above.
source "${PORTAGE_BIN_PATH}/save-ebuild-env.sh" || exit $?
- # Rely on save_ebuild_env() to filter out any remaining variables
+ # Rely on __save_ebuild_env() to filter out any remaining variables
# and functions that could interfere with the current environment.
- save_ebuild_env || exit $?
+ __save_ebuild_env || exit $?
>> "$T/environment.success" || exit $?
) > "${T}/environment.filtered"
local retval
if [ -e "${T}/environment.success" ] ; then
- filter_readonly_variables --filter-features < \
+ __filter_readonly_variables --filter-features < \
"${T}/environment.filtered" > "${T}/environment"
retval=$?
else
@@ -197,62 +194,62 @@ preprocess_ebuild_env() {
return ${retval}
}
-ebuild_phase() {
- declare -F "$1" >/dev/null && qa_call $1
+__ebuild_phase() {
+ declare -F "$1" >/dev/null && __qa_call $1
}
-ebuild_phase_with_hooks() {
+__ebuild_phase_with_hooks() {
local x phase_name=${1}
for x in {pre_,,post_}${phase_name} ; do
- ebuild_phase ${x}
+ __ebuild_phase ${x}
done
}
-dyn_pretend() {
+__dyn_pretend() {
if [[ -e $PORTAGE_BUILDDIR/.pretended ]] ; then
- vecho ">>> It appears that '$PF' is already pretended; skipping."
- vecho ">>> Remove '$PORTAGE_BUILDDIR/.pretended' to force pretend."
+ __vecho ">>> It appears that '$PF' is already pretended; skipping."
+ __vecho ">>> Remove '$PORTAGE_BUILDDIR/.pretended' to force pretend."
return 0
fi
- ebuild_phase pre_pkg_pretend
- ebuild_phase pkg_pretend
+ __ebuild_phase pre_pkg_pretend
+ __ebuild_phase pkg_pretend
>> "$PORTAGE_BUILDDIR/.pretended" || \
die "Failed to create $PORTAGE_BUILDDIR/.pretended"
- ebuild_phase post_pkg_pretend
+ __ebuild_phase post_pkg_pretend
}
-dyn_setup() {
+__dyn_setup() {
if [[ -e $PORTAGE_BUILDDIR/.setuped ]] ; then
- vecho ">>> It appears that '$PF' is already setup; skipping."
- vecho ">>> Remove '$PORTAGE_BUILDDIR/.setuped' to force setup."
+ __vecho ">>> It appears that '$PF' is already setup; skipping."
+ __vecho ">>> Remove '$PORTAGE_BUILDDIR/.setuped' to force setup."
return 0
fi
- ebuild_phase pre_pkg_setup
- ebuild_phase pkg_setup
+ __ebuild_phase pre_pkg_setup
+ __ebuild_phase pkg_setup
>> "$PORTAGE_BUILDDIR/.setuped" || \
die "Failed to create $PORTAGE_BUILDDIR/.setuped"
- ebuild_phase post_pkg_setup
+ __ebuild_phase post_pkg_setup
}
-dyn_unpack() {
+__dyn_unpack() {
if [[ -f ${PORTAGE_BUILDDIR}/.unpacked ]] ; then
- vecho ">>> WORKDIR is up-to-date, keeping..."
+ __vecho ">>> WORKDIR is up-to-date, keeping..."
return 0
fi
if [ ! -d "${WORKDIR}" ]; then
install -m${PORTAGE_WORKDIR_MODE:-0700} -d "${WORKDIR}" || die "Failed to create dir '${WORKDIR}'"
fi
cd "${WORKDIR}" || die "Directory change failed: \`cd '${WORKDIR}'\`"
- ebuild_phase pre_src_unpack
- vecho ">>> Unpacking source..."
- ebuild_phase src_unpack
+ __ebuild_phase pre_src_unpack
+ __vecho ">>> Unpacking source..."
+ __ebuild_phase src_unpack
>> "$PORTAGE_BUILDDIR/.unpacked" || \
die "Failed to create $PORTAGE_BUILDDIR/.unpacked"
- vecho ">>> Source unpacked in ${WORKDIR}"
- ebuild_phase post_src_unpack
+ __vecho ">>> Source unpacked in ${WORKDIR}"
+ __ebuild_phase post_src_unpack
}
-dyn_clean() {
+__dyn_clean() {
if [ -z "${PORTAGE_BUILDDIR}" ]; then
echo "Aborting clean phase because PORTAGE_BUILDDIR is unset!"
return 1
@@ -299,7 +296,7 @@ dyn_clean() {
true
}
-abort_handler() {
+__abort_handler() {
local msg
if [ "$2" != "fail" ]; then
msg="${EBUILD}: ${1} aborted; exiting."
@@ -314,37 +311,37 @@ abort_handler() {
trap - SIGINT SIGQUIT
}
-abort_prepare() {
- abort_handler src_prepare $1
+__abort_prepare() {
+ __abort_handler src_prepare $1
rm -f "$PORTAGE_BUILDDIR/.prepared"
exit 1
}
-abort_configure() {
- abort_handler src_configure $1
+__abort_configure() {
+ __abort_handler src_configure $1
rm -f "$PORTAGE_BUILDDIR/.configured"
exit 1
}
-abort_compile() {
- abort_handler "src_compile" $1
+__abort_compile() {
+ __abort_handler "src_compile" $1
rm -f "${PORTAGE_BUILDDIR}/.compiled"
exit 1
}
-abort_test() {
- abort_handler "dyn_test" $1
+__abort_test() {
+ __abort_handler "__dyn_test" $1
rm -f "${PORTAGE_BUILDDIR}/.tested"
exit 1
}
-abort_install() {
- abort_handler "src_install" $1
+__abort_install() {
+ __abort_handler "src_install" $1
rm -rf "${PORTAGE_BUILDDIR}/image"
exit 1
}
-has_phase_defined_up_to() {
+__has_phase_defined_up_to() {
local phase
for phase in unpack prepare configure compile install; do
has ${phase} ${DEFINED_PHASES} && return 0
@@ -354,89 +351,89 @@ has_phase_defined_up_to() {
return 1
}
-dyn_prepare() {
+__dyn_prepare() {
if [[ -e $PORTAGE_BUILDDIR/.prepared ]] ; then
- vecho ">>> It appears that '$PF' is already prepared; skipping."
- vecho ">>> Remove '$PORTAGE_BUILDDIR/.prepared' to force prepare."
+ __vecho ">>> It appears that '$PF' is already prepared; skipping."
+ __vecho ">>> Remove '$PORTAGE_BUILDDIR/.prepared' to force prepare."
return 0
fi
if [[ -d $S ]] ; then
cd "${S}"
- elif has $EAPI 0 1 2 3 3_pre2 ; then
+ elif ___eapi_has_S_WORKDIR_fallback; then
cd "${WORKDIR}"
- elif [[ -z ${A} ]] && ! has_phase_defined_up_to prepare; then
+ elif [[ -z ${A} ]] && ! __has_phase_defined_up_to prepare; then
cd "${WORKDIR}"
else
die "The source directory '${S}' doesn't exist"
fi
- trap abort_prepare SIGINT SIGQUIT
+ trap __abort_prepare SIGINT SIGQUIT
- ebuild_phase pre_src_prepare
- vecho ">>> Preparing source in $PWD ..."
- ebuild_phase src_prepare
+ __ebuild_phase pre_src_prepare
+ __vecho ">>> Preparing source in $PWD ..."
+ __ebuild_phase src_prepare
>> "$PORTAGE_BUILDDIR/.prepared" || \
die "Failed to create $PORTAGE_BUILDDIR/.prepared"
- vecho ">>> Source prepared."
- ebuild_phase post_src_prepare
+ __vecho ">>> Source prepared."
+ __ebuild_phase post_src_prepare
trap - SIGINT SIGQUIT
}
-dyn_configure() {
+__dyn_configure() {
if [[ -e $PORTAGE_BUILDDIR/.configured ]] ; then
- vecho ">>> It appears that '$PF' is already configured; skipping."
- vecho ">>> Remove '$PORTAGE_BUILDDIR/.configured' to force configuration."
+ __vecho ">>> It appears that '$PF' is already configured; skipping."
+ __vecho ">>> Remove '$PORTAGE_BUILDDIR/.configured' to force configuration."
return 0
fi
if [[ -d $S ]] ; then
cd "${S}"
- elif has $EAPI 0 1 2 3 3_pre2 ; then
+ elif ___eapi_has_S_WORKDIR_fallback; then
cd "${WORKDIR}"
- elif [[ -z ${A} ]] && ! has_phase_defined_up_to configure; then
+ elif [[ -z ${A} ]] && ! __has_phase_defined_up_to configure; then
cd "${WORKDIR}"
else
die "The source directory '${S}' doesn't exist"
fi
- trap abort_configure SIGINT SIGQUIT
+ trap __abort_configure SIGINT SIGQUIT
- ebuild_phase pre_src_configure
+ __ebuild_phase pre_src_configure
- vecho ">>> Configuring source in $PWD ..."
- ebuild_phase src_configure
+ __vecho ">>> Configuring source in $PWD ..."
+ __ebuild_phase src_configure
>> "$PORTAGE_BUILDDIR/.configured" || \
die "Failed to create $PORTAGE_BUILDDIR/.configured"
- vecho ">>> Source configured."
+ __vecho ">>> Source configured."
- ebuild_phase post_src_configure
+ __ebuild_phase post_src_configure
trap - SIGINT SIGQUIT
}
-dyn_compile() {
+__dyn_compile() {
if [[ -e $PORTAGE_BUILDDIR/.compiled ]] ; then
- vecho ">>> It appears that '${PF}' is already compiled; skipping."
- vecho ">>> Remove '$PORTAGE_BUILDDIR/.compiled' to force compilation."
+ __vecho ">>> It appears that '${PF}' is already compiled; skipping."
+ __vecho ">>> Remove '$PORTAGE_BUILDDIR/.compiled' to force compilation."
return 0
fi
if [[ -d $S ]] ; then
cd "${S}"
- elif has $EAPI 0 1 2 3 3_pre2 ; then
+ elif ___eapi_has_S_WORKDIR_fallback; then
cd "${WORKDIR}"
- elif [[ -z ${A} ]] && ! has_phase_defined_up_to compile; then
+ elif [[ -z ${A} ]] && ! __has_phase_defined_up_to compile; then
cd "${WORKDIR}"
else
die "The source directory '${S}' doesn't exist"
fi
- trap abort_compile SIGINT SIGQUIT
+ trap __abort_compile SIGINT SIGQUIT
if has distcc $FEATURES && has distcc-pump $FEATURES ; then
if [[ -z $INCLUDE_SERVER_PORT ]] || [[ ! -w $INCLUDE_SERVER_PORT ]] ; then
@@ -445,90 +442,96 @@ dyn_compile() {
fi
fi
- ebuild_phase pre_src_compile
+ __ebuild_phase pre_src_compile
- vecho ">>> Compiling source in $PWD ..."
- ebuild_phase src_compile
+ __vecho ">>> Compiling source in $PWD ..."
+ __ebuild_phase src_compile
>> "$PORTAGE_BUILDDIR/.compiled" || \
die "Failed to create $PORTAGE_BUILDDIR/.compiled"
- vecho ">>> Source compiled."
+ __vecho ">>> Source compiled."
- ebuild_phase post_src_compile
+ __ebuild_phase post_src_compile
trap - SIGINT SIGQUIT
}
-dyn_test() {
+__dyn_test() {
if [[ -e $PORTAGE_BUILDDIR/.tested ]] ; then
- vecho ">>> It appears that ${PN} has already been tested; skipping."
- vecho ">>> Remove '${PORTAGE_BUILDDIR}/.tested' to force test."
+ __vecho ">>> It appears that ${PN} has already been tested; skipping."
+ __vecho ">>> Remove '${PORTAGE_BUILDDIR}/.tested' to force test."
return
fi
- if [ "${EBUILD_FORCE_TEST}" == "1" ] ; then
- # If USE came from ${T}/environment then it might not have USE=test
- # like it's supposed to here.
- ! has test ${USE} && export USE="${USE} test"
- fi
-
- trap "abort_test" SIGINT SIGQUIT
+ trap "__abort_test" SIGINT SIGQUIT
if [ -d "${S}" ]; then
cd "${S}"
else
cd "${WORKDIR}"
fi
- if ! has test $FEATURES && [ "${EBUILD_FORCE_TEST}" != "1" ]; then
- vecho ">>> Test phase [not enabled]: ${CATEGORY}/${PF}"
- elif has test $RESTRICT; then
+ if has test ${RESTRICT} ; then
einfo "Skipping make test/check due to ebuild restriction."
- vecho ">>> Test phase [explicitly disabled]: ${CATEGORY}/${PF}"
+ __vecho ">>> Test phase [disabled because of RESTRICT=test]: ${CATEGORY}/${PF}"
+
+ # If ${EBUILD_FORCE_TEST} == 1 and FEATURES came from ${T}/environment
+ # then it might not have FEATURES=test like it's supposed to here.
+ elif [[ ${EBUILD_FORCE_TEST} != 1 ]] && ! has test ${FEATURES} ; then
+ __vecho ">>> Test phase [not enabled]: ${CATEGORY}/${PF}"
else
+ # If ${EBUILD_FORCE_TEST} == 1 and USE came from ${T}/environment
+ # then it might not have USE=test like it's supposed to here.
+ if [[ ${EBUILD_FORCE_TEST} == 1 && test =~ ${PORTAGE_IUSE} ]] && \
+ ! has test ${USE} ; then
+ export USE="${USE} test"
+ fi
+
local save_sp=${SANDBOX_PREDICT}
addpredict /
- ebuild_phase pre_src_test
- ebuild_phase src_test
+ __ebuild_phase pre_src_test
+ __ebuild_phase src_test
>> "$PORTAGE_BUILDDIR/.tested" || \
die "Failed to create $PORTAGE_BUILDDIR/.tested"
- ebuild_phase post_src_test
+ __ebuild_phase post_src_test
SANDBOX_PREDICT=${save_sp}
fi
trap - SIGINT SIGQUIT
}
-dyn_install() {
+__dyn_install() {
[ -z "$PORTAGE_BUILDDIR" ] && die "${FUNCNAME}: PORTAGE_BUILDDIR is unset"
if has noauto $FEATURES ; then
rm -f "${PORTAGE_BUILDDIR}/.installed"
elif [[ -e $PORTAGE_BUILDDIR/.installed ]] ; then
- vecho ">>> It appears that '${PF}' is already installed; skipping."
- vecho ">>> Remove '${PORTAGE_BUILDDIR}/.installed' to force install."
+ __vecho ">>> It appears that '${PF}' is already installed; skipping."
+ __vecho ">>> Remove '${PORTAGE_BUILDDIR}/.installed' to force install."
return 0
fi
- trap "abort_install" SIGINT SIGQUIT
- ebuild_phase pre_src_install
+ trap "__abort_install" SIGINT SIGQUIT
+ __ebuild_phase pre_src_install
- _x=${ED}
- [[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) _x=${D} ;; esac
+ if ___eapi_has_prefix_variables; then
+ _x=${ED}
+ else
+ _x=${D}
+ fi
rm -rf "${D}"
mkdir -p "${_x}"
unset _x
if [[ -d $S ]] ; then
cd "${S}"
- elif has $EAPI 0 1 2 3 3_pre2 ; then
+ elif ___eapi_has_S_WORKDIR_fallback; then
cd "${WORKDIR}"
- elif [[ -z ${A} ]] && ! has_phase_defined_up_to install; then
+ elif [[ -z ${A} ]] && ! __has_phase_defined_up_to install; then
cd "${WORKDIR}"
else
die "The source directory '${S}' doesn't exist"
fi
- vecho
- vecho ">>> Install ${PF} into ${D} category ${CATEGORY}"
+ __vecho
+ __vecho ">>> Install ${PF} into ${D} category ${CATEGORY}"
#our custom version of libtool uses $S and $D to fix
#invalid paths in .la files
export S D
@@ -541,12 +544,12 @@ dyn_install() {
export _E_EXEDESTTREE_=""
export _E_DOCDESTTREE_=""
- ebuild_phase src_install
+ __ebuild_phase src_install
>> "$PORTAGE_BUILDDIR/.installed" || \
die "Failed to create $PORTAGE_BUILDDIR/.installed"
- vecho ">>> Completed installing ${PF} into ${D}"
- vecho
- ebuild_phase post_src_install
+ __vecho ">>> Completed installing ${PF} into ${D}"
+ __vecho
+ __ebuild_phase post_src_install
cd "${PORTAGE_BUILDDIR}"/build-info
set -f
@@ -560,10 +563,15 @@ dyn_install() {
if [[ $CATEGORY != virtual ]] ; then
for f in ASFLAGS CBUILD CC CFLAGS CHOST CTARGET CXX \
CXXFLAGS EXTRA_ECONF EXTRA_EINSTALL EXTRA_MAKE \
- LDFLAGS LIBCFLAGS LIBCXXFLAGS ; do
+ LDFLAGS LIBCFLAGS LIBCXXFLAGS QA_CONFIGURE_OPTIONS \
+ QA_DESKTOP_FILE ; do
x=$(echo -n ${!f})
[[ -n $x ]] && echo "$x" > $f
done
+ # whitespace preserved
+ for f in QA_AM_MAINTAINER_MODE ; do
+ [[ -n ${!f} ]] && echo "${!f}" > $f
+ done
fi
echo "${USE}" > USE
echo "${EAPI:-0}" > EAPI
@@ -571,24 +579,22 @@ dyn_install() {
# Save EPREFIX, since it makes it easy to use chpathtool to
# adjust the content of a binary package so that it will
# work in a different EPREFIX from the one is was built for.
- case "${EAPI:-0}" in
- 0|1|2)
- [[ " ${FEATURES} " == *" force-prefix "* ]] && \
- [ -n "${EPREFIX}" ] && echo "${EPREFIX}" > EPREFIX
- ;;
- *)
- [ -n "${EPREFIX}" ] && echo "${EPREFIX}" > EPREFIX
- ;;
- esac
+ if ___eapi_has_prefix_variables && [[ -n ${EPREFIX} ]]; then
+ echo "${EPREFIX}" > EPREFIX
+ fi
set +f
# local variables can leak into the saved environment.
unset f
- save_ebuild_env --exclude-init-phases | filter_readonly_variables \
- --filter-path --filter-sandbox --allow-extra-vars > environment
- assert "save_ebuild_env failed"
+ # Use safe cwd, avoiding unsafe import for bug #469338.
+ cd "${PORTAGE_PYM_PATH}"
+ __save_ebuild_env --exclude-init-phases | __filter_readonly_variables \
+ --filter-path --filter-sandbox --allow-extra-vars > \
+ "${PORTAGE_BUILDDIR}"/build-info/environment
+ assert "__save_ebuild_env failed"
+ cd "${PORTAGE_BUILDDIR}"/build-info || die
${PORTAGE_BZIP2_COMMAND} -f9 environment
@@ -601,15 +607,7 @@ dyn_install() {
trap - SIGINT SIGQUIT
}
-dyn_preinst() {
- if [ -z "${D}" ]; then
- eerror "${FUNCNAME}: D is unset"
- return 1
- fi
- ebuild_phase_with_hooks pkg_preinst
-}
-
-dyn_help() {
+__dyn_help() {
echo
echo "Portage"
echo "Copyright 1999-2010 Gentoo Foundation"
@@ -625,6 +623,7 @@ dyn_help() {
echo " pretend : execute package specific pretend actions"
echo " setup : execute package specific setup actions"
echo " fetch : download source archive(s) and patches"
+ echo " nofetch : display special fetch instructions"
echo " digest : create a manifest file for the package"
echo " manifest : create a manifest file for the package"
echo " unpack : unpack sources (auto-dependencies if needed)"
@@ -672,19 +671,18 @@ dyn_help() {
echo
}
-# @FUNCTION: _ebuild_arg_to_phase
+# @FUNCTION: __ebuild_arg_to_phase
# @DESCRIPTION:
# Translate a known ebuild(1) argument into the precise
# name of it's corresponding ebuild phase.
-_ebuild_arg_to_phase() {
- [ $# -ne 2 ] && die "expected exactly 2 args, got $#: $*"
- local eapi=$1
- local arg=$2
+__ebuild_arg_to_phase() {
+ [ $# -ne 1 ] && die "expected exactly 1 arg, got $#: $*"
+ local arg=$1
local phase_func=""
case "$arg" in
pretend)
- ! has $eapi 0 1 2 3 3_pre2 && \
+ ___eapi_has_pkg_pretend && \
phase_func=pkg_pretend
;;
setup)
@@ -697,11 +695,11 @@ _ebuild_arg_to_phase() {
phase_func=src_unpack
;;
prepare)
- ! has $eapi 0 1 && \
+ ___eapi_has_src_prepare && \
phase_func=src_prepare
;;
configure)
- ! has $eapi 0 1 && \
+ ___eapi_has_src_configure && \
phase_func=src_configure
;;
compile)
@@ -732,7 +730,7 @@ _ebuild_arg_to_phase() {
return 0
}
-_ebuild_phase_funcs() {
+__ebuild_phase_funcs() {
[ $# -ne 2 ] && die "expected exactly 2 args, got $#: $*"
local eapi=$1
local phase_func=$2
@@ -742,20 +740,20 @@ _ebuild_phase_funcs() {
for x in pkg_nofetch src_unpack src_test ; do
declare -F $x >/dev/null || \
- eval "$x() { _eapi0_$x \"\$@\" ; }"
+ eval "$x() { __eapi0_$x \"\$@\" ; }"
done
- case $eapi in
+ case "$eapi" in
0|1)
if ! declare -F src_compile >/dev/null ; then
- case $eapi in
+ case "$eapi" in
0)
- src_compile() { _eapi0_src_compile "$@" ; }
+ src_compile() { __eapi0_src_compile "$@" ; }
;;
*)
- src_compile() { _eapi1_src_compile "$@" ; }
+ src_compile() { __eapi1_src_compile "$@" ; }
;;
esac
fi
@@ -775,35 +773,35 @@ _ebuild_phase_funcs() {
*)
declare -F src_configure >/dev/null || \
- src_configure() { _eapi2_src_configure "$@" ; }
+ src_configure() { __eapi2_src_configure "$@" ; }
declare -F src_compile >/dev/null || \
- src_compile() { _eapi2_src_compile "$@" ; }
+ src_compile() { __eapi2_src_compile "$@" ; }
- has $eapi 2 3 3_pre2 || declare -F src_install >/dev/null || \
- src_install() { _eapi4_src_install "$@" ; }
+ has $eapi 2 3 || declare -F src_install >/dev/null || \
+ src_install() { __eapi4_src_install "$@" ; }
if has $phase_func $default_phases ; then
- _eapi2_pkg_nofetch () { _eapi0_pkg_nofetch "$@" ; }
- _eapi2_src_unpack () { _eapi0_src_unpack "$@" ; }
- _eapi2_src_prepare () { true ; }
- _eapi2_src_test () { _eapi0_src_test "$@" ; }
- _eapi2_src_install () { die "$FUNCNAME is not supported" ; }
+ __eapi2_pkg_nofetch () { __eapi0_pkg_nofetch "$@" ; }
+ __eapi2_src_unpack () { __eapi0_src_unpack "$@" ; }
+ __eapi2_src_prepare () { true ; }
+ __eapi2_src_test () { __eapi0_src_test "$@" ; }
+ __eapi2_src_install () { die "$FUNCNAME is not supported" ; }
for x in $default_phases ; do
- eval "default_$x() { _eapi2_$x \"\$@\" ; }"
+ eval "default_$x() { __eapi2_$x \"\$@\" ; }"
done
- eval "default() { _eapi2_$phase_func \"\$@\" ; }"
+ eval "default() { __eapi2_$phase_func \"\$@\" ; }"
- case $eapi in
+ case "$eapi" in
2|3)
;;
*)
- eval "default_src_install() { _eapi4_src_install \"\$@\" ; }"
+ eval "default_src_install() { __eapi4_src_install \"\$@\" ; }"
[[ $phase_func = src_install ]] && \
- eval "default() { _eapi4_$phase_func \"\$@\" ; }"
+ eval "default() { __eapi4_$phase_func \"\$@\" ; }"
;;
esac
@@ -825,14 +823,14 @@ _ebuild_phase_funcs() {
esac
}
-ebuild_main() {
+__ebuild_main() {
# Subshell/helper die support (must export for the die helper).
# Since this function is typically executed in a subshell,
# setup EBUILD_MASTER_PID to refer to the current $BASHPID,
# which seems to give the best results when further
# nested subshells call die.
- export EBUILD_MASTER_PID=$BASHPID
+ export EBUILD_MASTER_PID=${BASHPID:-$(__bashpid)}
trap 'exit 1' SIGTERM
#a reasonable default for $S
@@ -861,37 +859,39 @@ ebuild_main() {
# respect FEATURES="-ccache".
has ccache $FEATURES || export CCACHE_DISABLE=1
- local phase_func=$(_ebuild_arg_to_phase "$EAPI" "$EBUILD_PHASE")
- [[ -n $phase_func ]] && _ebuild_phase_funcs "$EAPI" "$phase_func"
+ local phase_func=$(__ebuild_arg_to_phase "$EBUILD_PHASE")
+ [[ -n $phase_func ]] && __ebuild_phase_funcs "$EAPI" "$phase_func"
unset phase_func
- source_all_bashrcs
+ __source_all_bashrcs
case ${1} in
nofetch)
- ebuild_phase_with_hooks pkg_nofetch
+ __ebuild_phase_with_hooks pkg_nofetch
;;
- prerm|postrm|postinst|config|info)
+ prerm|postrm|preinst|postinst|config|info)
if has "${1}" config info && \
! declare -F "pkg_${1}" >/dev/null ; then
ewarn "pkg_${1}() is not defined: '${EBUILD##*/}'"
fi
export SANDBOX_ON="0"
if [ "${PORTAGE_DEBUG}" != "1" ] || [ "${-/x/}" != "$-" ]; then
- ebuild_phase_with_hooks pkg_${1}
+ __ebuild_phase_with_hooks pkg_${1}
else
set -x
- ebuild_phase_with_hooks pkg_${1}
+ __ebuild_phase_with_hooks pkg_${1}
set +x
fi
- if [[ $EBUILD_PHASE == postinst ]] && [[ -n $PORTAGE_UPDATE_ENV ]]; then
+ if [[ -n $PORTAGE_UPDATE_ENV ]] ; then
# Update environment.bz2 in case installation phases
# need to pass some variables to uninstallation phases.
- save_ebuild_env --exclude-init-phases | \
- filter_readonly_variables --filter-path \
+ # Use safe cwd, avoiding unsafe import for bug #469338.
+ cd "${PORTAGE_PYM_PATH}"
+ __save_ebuild_env --exclude-init-phases | \
+ __filter_readonly_variables --filter-path \
--filter-sandbox --allow-extra-vars \
| ${PORTAGE_BZIP2_COMMAND} -c -f9 > "$PORTAGE_UPDATE_ENV"
- assert "save_ebuild_env failed"
+ assert "__save_ebuild_env failed"
fi
;;
unpack|prepare|configure|compile|test|clean|install)
@@ -917,7 +917,7 @@ ebuild_main() {
x=LIBDIR_$ABI
[ -z "$PKG_CONFIG_PATH" -a -n "$ABI" -a -n "${!x}" ] && \
- export PKG_CONFIG_PATH=/usr/${!x}/pkgconfig
+ export PKG_CONFIG_PATH=${EPREFIX}/usr/${!x}/pkgconfig
if has noauto $FEATURES && \
[[ ! -f $PORTAGE_BUILDDIR/.unpacked ]] ; then
@@ -952,24 +952,24 @@ ebuild_main() {
esac
if [ "${PORTAGE_DEBUG}" != "1" ] || [ "${-/x/}" != "$-" ]; then
- dyn_${1}
+ __dyn_${1}
else
set -x
- dyn_${1}
+ __dyn_${1}
set +x
fi
export SANDBOX_ON="0"
;;
- help|pretend|setup|preinst)
+ help|pretend|setup)
#pkg_setup needs to be out of the sandbox for tmp file creation;
#for example, awking and piping a file in /tmp requires a temp file to be created
#in /etc. If pkg_setup is in the sandbox, both our lilo and apache ebuilds break.
export SANDBOX_ON="0"
if [ "${PORTAGE_DEBUG}" != "1" ] || [ "${-/x/}" != "$-" ]; then
- dyn_${1}
+ __dyn_${1}
else
set -x
- dyn_${1}
+ __dyn_${1}
set +x
fi
;;
@@ -979,7 +979,7 @@ ebuild_main() {
export SANDBOX_ON="1"
echo "Unrecognized arg '${1}'"
echo
- dyn_help
+ __dyn_help
exit 1
;;
esac
@@ -987,11 +987,13 @@ ebuild_main() {
# Save the env only for relevant phases.
if ! has "${1}" clean help info nofetch ; then
umask 002
- save_ebuild_env | filter_readonly_variables \
+ # Use safe cwd, avoiding unsafe import for bug #469338.
+ cd "${PORTAGE_PYM_PATH}"
+ __save_ebuild_env | __filter_readonly_variables \
--filter-features > "$T/environment"
- assert "save_ebuild_env failed"
- chown portage:portage "$T/environment" &>/dev/null
- chmod g+w "$T/environment" &>/dev/null
+ assert "__save_ebuild_env failed"
+ chgrp "${PORTAGE_GRPNAME:-portage}" "$T/environment"
+ chmod g+w "$T/environment"
fi
[[ -n $PORTAGE_EBUILD_EXIT_FILE ]] && > "$PORTAGE_EBUILD_EXIT_FILE"
if [[ -n $PORTAGE_IPC_DAEMON ]] ; then
diff --git a/bin/phase-helpers.sh b/bin/phase-helpers.sh
index 946520b20..412decbe0 100644
--- a/bin/phase-helpers.sh
+++ b/bin/phase-helpers.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
export DESTTREE=/usr
@@ -11,6 +11,8 @@ export EXEOPTIONS="-m0755"
export LIBOPTIONS="-m0644"
export DIROPTIONS="-m0755"
export MOPREFIX=${PN}
+# Do not compress files which are smaller than this (in bytes). #169260
+export PORTAGE_DOCOMPRESS_SIZE_LIMIT="128"
declare -a PORTAGE_DOCOMPRESS=( /usr/share/{doc,info,man} )
declare -a PORTAGE_DOCOMPRESS_SKIP=( /usr/share/doc/${PF}/html )
@@ -19,13 +21,14 @@ into() {
export DESTTREE=""
else
export DESTTREE=$1
- [[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) local ED=${D} ;; esac
+ if ! ___eapi_has_prefix_variables; then
+ local ED=${D}
+ fi
if [ ! -d "${ED}${DESTTREE}" ]; then
install -d "${ED}${DESTTREE}"
local ret=$?
if [[ $ret -ne 0 ]] ; then
- helpers_die "${FUNCNAME[0]} failed"
+ __helpers_die "${FUNCNAME[0]} failed"
return $ret
fi
fi
@@ -37,13 +40,14 @@ insinto() {
export INSDESTTREE=""
else
export INSDESTTREE=$1
- [[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) local ED=${D} ;; esac
+ if ! ___eapi_has_prefix_variables; then
+ local ED=${D}
+ fi
if [ ! -d "${ED}${INSDESTTREE}" ]; then
install -d "${ED}${INSDESTTREE}"
local ret=$?
if [[ $ret -ne 0 ]] ; then
- helpers_die "${FUNCNAME[0]} failed"
+ __helpers_die "${FUNCNAME[0]} failed"
return $ret
fi
fi
@@ -55,13 +59,14 @@ exeinto() {
export _E_EXEDESTTREE_=""
else
export _E_EXEDESTTREE_="$1"
- [[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) local ED=${D} ;; esac
+ if ! ___eapi_has_prefix_variables; then
+ local ED=${D}
+ fi
if [ ! -d "${ED}${_E_EXEDESTTREE_}" ]; then
install -d "${ED}${_E_EXEDESTTREE_}"
local ret=$?
if [[ $ret -ne 0 ]] ; then
- helpers_die "${FUNCNAME[0]} failed"
+ __helpers_die "${FUNCNAME[0]} failed"
return $ret
fi
fi
@@ -73,13 +78,14 @@ docinto() {
export _E_DOCDESTTREE_=""
else
export _E_DOCDESTTREE_="$1"
- [[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) local ED=${D} ;; esac
+ if ! ___eapi_has_prefix_variables; then
+ local ED=${D}
+ fi
if [ ! -d "${ED}usr/share/doc/${PF}/${_E_DOCDESTTREE_}" ]; then
install -d "${ED}usr/share/doc/${PF}/${_E_DOCDESTTREE_}"
local ret=$?
if [[ $ret -ne 0 ]] ; then
- helpers_die "${FUNCNAME[0]} failed"
+ __helpers_die "${FUNCNAME[0]} failed"
return $ret
fi
fi
@@ -112,13 +118,13 @@ libopts() {
}
docompress() {
- has "${EAPI}" 0 1 2 3 && die "'docompress' not supported in this EAPI"
+ ___eapi_has_docompress || die "'docompress' not supported in this EAPI"
local f g
if [[ $1 = "-x" ]]; then
shift
for f; do
- f=$(strip_duplicate_slashes "${f}"); f=${f%/}
+ f=$(__strip_duplicate_slashes "${f}"); f=${f%/}
[[ ${f:0:1} = / ]] || f="/${f}"
for g in "${PORTAGE_DOCOMPRESS_SKIP[@]}"; do
[[ ${f} = "${g}" ]] && continue 2
@@ -127,7 +133,7 @@ docompress() {
done
else
for f; do
- f=$(strip_duplicate_slashes "${f}"); f=${f%/}
+ f=$(__strip_duplicate_slashes "${f}"); f=${f%/}
[[ ${f:0:1} = / ]] || f="/${f}"
for g in "${PORTAGE_DOCOMPRESS[@]}"; do
[[ ${f} = "${g}" ]] && continue 2
@@ -137,29 +143,6 @@ docompress() {
fi
}
-# adds ".keep" files so that dirs aren't auto-cleaned
-keepdir() {
- dodir "$@"
- local x
- [[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) local ED=${D} ;; esac
- if [ "$1" == "-R" ] || [ "$1" == "-r" ]; then
- shift
- find "$@" -type d -printf "${ED}%p/.keep_${CATEGORY}_${PN}-${SLOT}\n" \
- | tr "\n" "\0" | \
- while read -r -d $'\0' ; do
- >> "$REPLY" || \
- die "Failed to recursively create .keep files"
- done
- else
- for x in "$@"; do
- >> "${ED}${x}/.keep_${CATEGORY}_${PN}-${SLOT}" || \
- die "Failed to create .keep in ${ED}${x}"
- done
- fi
-}
-
-
useq() {
has $EBUILD_PHASE prerm postrm || eqawarn \
"QA Notice: The 'useq' function is deprecated (replaced by 'use')"
@@ -174,6 +157,17 @@ usev() {
return 1
}
+if ___eapi_has_usex; then
+ usex() {
+ if use "$1"; then
+ echo "${2-yes}$4"
+ else
+ echo "${3-no}$5"
+ fi
+ return 0
+ }
+fi
+
use() {
local u=$1
local found=0
@@ -194,18 +188,31 @@ use() {
#fi
true
- # Make sure we have this USE flag in IUSE
- elif [[ -n $PORTAGE_IUSE && -n $EBUILD_PHASE ]] ; then
- [[ $u =~ $PORTAGE_IUSE ]] || \
+ # Make sure we have this USE flag in IUSE, but exempt binary
+ # packages for API consumers like Entropy which do not require
+ # a full profile with IUSE_IMPLICIT and stuff (see bug #456830).
+ elif [[ -n $PORTAGE_IUSE && -n $EBUILD_PHASE &&
+ -n $PORTAGE_INTERNAL_CALLER ]] ; then
+ if [[ ! $u =~ $PORTAGE_IUSE ]] ; then
+ if [[ ! ${EAPI} =~ ^(0|1|2|3|4|4-python|4-slot-abi)$ ]] ; then
+ # This is only strict starting with EAPI 5, since implicit IUSE
+ # is not well defined for earlier EAPIs (see bug #449708).
+ die "USE Flag '${u}' not in IUSE for ${CATEGORY}/${PF}"
+ fi
eqawarn "QA Notice: USE Flag '${u}' not" \
"in IUSE for ${CATEGORY}/${PF}"
+ fi
fi
+ local IFS=$' \t\n' prev_shopts=$- ret
+ set -f
if has ${u} ${USE} ; then
- return ${found}
+ ret=${found}
else
- return $((!found))
+ ret=$((!found))
fi
+ [[ ${prev_shopts} == *f* ]] || set +f
+ return ${ret}
}
use_with() {
@@ -215,7 +222,7 @@ use_with() {
return 1
fi
- if ! has "${EAPI:-0}" 0 1 2 3 ; then
+ if ___eapi_use_enable_and_use_with_support_empty_third_argument; then
local UW_SUFFIX=${3+=$3}
else
local UW_SUFFIX=${3:+=$3}
@@ -237,7 +244,7 @@ use_enable() {
return 1
fi
- if ! has "${EAPI:-0}" 0 1 2 3 ; then
+ if ___eapi_use_enable_and_use_with_support_empty_third_argument; then
local UE_SUFFIX=${3+=$3}
else
local UE_SUFFIX=${3:+=$3}
@@ -255,15 +262,19 @@ use_enable() {
unpack() {
local srcdir
local x
- local y
+ local y y_insensitive
+ local suffix suffix_insensitive
local myfail
local eapi=${EAPI:-0}
[ -z "$*" ] && die "Nothing passed to the 'unpack' command"
for x in "$@"; do
- vecho ">>> Unpacking ${x} to ${PWD}"
+ __vecho ">>> Unpacking ${x} to ${PWD}"
+ suffix=${x##*.}
+ suffix_insensitive=$(LC_ALL=C tr "[:upper:]" "[:lower:]" <<< "${suffix}")
y=${x%.*}
y=${y##*.}
+ y_insensitive=$(LC_ALL=C tr "[:upper:]" "[:lower:]" <<< "${y}")
if [[ ${x} == "./"* ]] ; then
srcdir=""
@@ -276,10 +287,16 @@ unpack() {
fi
[[ ! -s ${srcdir}${x} ]] && die "${x} does not exist"
- _unpack_tar() {
- if [ "${y}" == "tar" ]; then
+ __unpack_tar() {
+ if [[ ${y_insensitive} == tar ]] ; then
+ if ___eapi_unpack_is_case_sensitive && \
+ [[ tar != ${y} ]] ; then
+ eqawarn "QA Notice: unpack called with" \
+ "secondary suffix '${y}' which is unofficially" \
+ "supported with EAPI '${EAPI}'. Instead use 'tar'."
+ fi
$1 -c -- "$srcdir$x" | tar xof -
- assert_sigpipe_ok "$myfail"
+ __assert_sigpipe_ok "$myfail"
else
local cwd_dest=${x##*/}
cwd_dest=${cwd_dest%.*}
@@ -288,30 +305,67 @@ unpack() {
}
myfail="failure unpacking ${x}"
- case "${x##*.}" in
+ case "${suffix_insensitive}" in
tar)
+ if ___eapi_unpack_is_case_sensitive && \
+ [[ tar != ${suffix} ]] ; then
+ eqawarn "QA Notice: unpack called with" \
+ "suffix '${suffix}' which is unofficially supported" \
+ "with EAPI '${EAPI}'. Instead use 'tar'."
+ fi
tar xof "$srcdir$x" || die "$myfail"
;;
tgz)
+ if ___eapi_unpack_is_case_sensitive && \
+ [[ tgz != ${suffix} ]] ; then
+ eqawarn "QA Notice: unpack called with" \
+ "suffix '${suffix}' which is unofficially supported" \
+ "with EAPI '${EAPI}'. Instead use 'tgz'."
+ fi
tar xozf "$srcdir$x" || die "$myfail"
;;
tbz|tbz2)
+ if ___eapi_unpack_is_case_sensitive && \
+ [[ " tbz tbz2 " != *" ${suffix} "* ]] ; then
+ eqawarn "QA Notice: unpack called with" \
+ "suffix '${suffix}' which is unofficially supported" \
+ "with EAPI '${EAPI}'. Instead use 'tbz' or 'tbz2'."
+ fi
${PORTAGE_BUNZIP2_COMMAND:-${PORTAGE_BZIP2_COMMAND} -d} -c -- "$srcdir$x" | tar xof -
- assert_sigpipe_ok "$myfail"
+ __assert_sigpipe_ok "$myfail"
;;
- ZIP|zip|jar)
+ zip|jar)
+ if ___eapi_unpack_is_case_sensitive && \
+ [[ " ZIP zip jar " != *" ${suffix} "* ]] ; then
+ eqawarn "QA Notice: unpack called with" \
+ "suffix '${suffix}' which is unofficially supported" \
+ "with EAPI '${EAPI}'." \
+ "Instead use 'ZIP', 'zip', or 'jar'."
+ fi
# unzip will interactively prompt under some error conditions,
# as reported in bug #336285
( set +x ; while true ; do echo n || break ; done ) | \
unzip -qo "${srcdir}${x}" || die "$myfail"
;;
- gz|Z|z)
- _unpack_tar "gzip -d"
+ gz|z)
+ if ___eapi_unpack_is_case_sensitive && \
+ [[ " gz z Z " != *" ${suffix} "* ]] ; then
+ eqawarn "QA Notice: unpack called with" \
+ "suffix '${suffix}' which is unofficially supported" \
+ "with EAPI '${EAPI}'. Instead use 'gz', 'z', or 'Z'."
+ fi
+ __unpack_tar "gzip -d"
;;
bz2|bz)
- _unpack_tar "${PORTAGE_BUNZIP2_COMMAND:-${PORTAGE_BZIP2_COMMAND} -d}"
+ if ___eapi_unpack_is_case_sensitive && \
+ [[ " bz bz2 " != *" ${suffix} "* ]] ; then
+ eqawarn "QA Notice: unpack called with" \
+ "suffix '${suffix}' which is unofficially supported" \
+ "with EAPI '${EAPI}'. Instead use 'bz' or 'bz2'."
+ fi
+ __unpack_tar "${PORTAGE_BUNZIP2_COMMAND:-${PORTAGE_BZIP2_COMMAND} -d}"
;;
- 7Z|7z)
+ 7z)
local my_output
my_output="$(7z x -y "${srcdir}${x}")"
if [ $? -ne 0 ]; then
@@ -319,16 +373,41 @@ unpack() {
die "$myfail"
fi
;;
- RAR|rar)
+ rar)
+ if ___eapi_unpack_is_case_sensitive && \
+ [[ " rar RAR " != *" ${suffix} "* ]] ; then
+ eqawarn "QA Notice: unpack called with" \
+ "suffix '${suffix}' which is unofficially supported" \
+ "with EAPI '${EAPI}'. Instead use 'rar' or 'RAR'."
+ fi
unrar x -idq -o+ "${srcdir}${x}" || die "$myfail"
;;
- LHa|LHA|lha|lzh)
+ lha|lzh)
+ if ___eapi_unpack_is_case_sensitive && \
+ [[ " LHA LHa lha lzh " != *" ${suffix} "* ]] ; then
+ eqawarn "QA Notice: unpack called with" \
+ "suffix '${suffix}' which is unofficially supported" \
+ "with EAPI '${EAPI}'." \
+ "Instead use 'LHA', 'LHa', 'lha', or 'lzh'."
+ fi
lha xfq "${srcdir}${x}" || die "$myfail"
;;
a)
+ if ___eapi_unpack_is_case_sensitive && \
+ [[ " a " != *" ${suffix} "* ]] ; then
+ eqawarn "QA Notice: unpack called with" \
+ "suffix '${suffix}' which is unofficially supported" \
+ "with EAPI '${EAPI}'. Instead use 'a'."
+ fi
ar x "${srcdir}${x}" || die "$myfail"
;;
deb)
+ if ___eapi_unpack_is_case_sensitive && \
+ [[ " deb " != *" ${suffix} "* ]] ; then
+ eqawarn "QA Notice: unpack called with" \
+ "suffix '${suffix}' which is unofficially supported" \
+ "with EAPI '${EAPI}'. Instead use 'deb'."
+ fi
# Unpacking .deb archives can not always be done with
# `ar`. For instance on AIX this doesn't work out. If
# we have `deb2targz` installed, prefer it over `ar` for
@@ -356,17 +435,29 @@ unpack() {
fi
;;
lzma)
- _unpack_tar "lzma -d"
+ if ___eapi_unpack_is_case_sensitive && \
+ [[ " lzma " != *" ${suffix} "* ]] ; then
+ eqawarn "QA Notice: unpack called with" \
+ "suffix '${suffix}' which is unofficially supported" \
+ "with EAPI '${EAPI}'. Instead use 'lzma'."
+ fi
+ __unpack_tar "lzma -d"
;;
xz)
- if has $eapi 0 1 2 ; then
- vecho "unpack ${x}: file format not recognized. Ignoring."
+ if ___eapi_unpack_is_case_sensitive && \
+ [[ " xz " != *" ${suffix} "* ]] ; then
+ eqawarn "QA Notice: unpack called with" \
+ "suffix '${suffix}' which is unofficially supported" \
+ "with EAPI '${EAPI}'. Instead use 'xz'."
+ fi
+ if ___eapi_unpack_supports_xz; then
+ __unpack_tar "xz -d"
else
- _unpack_tar "xz -d"
+ __vecho "unpack ${x}: file format not recognized. Ignoring."
fi
;;
*)
- vecho "unpack ${x}: file format not recognized. Ignoring."
+ __vecho "unpack ${x}: file format not recognized. Ignoring."
;;
esac
done
@@ -378,22 +469,24 @@ unpack() {
econf() {
local x
+ local pid=${BASHPID:-$(__bashpid)}
- [[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) local EPREFIX= ;; esac
+ if ! ___eapi_has_prefix_variables; then
+ local EPREFIX=
+ fi
- _hasg() {
+ __hasg() {
local x s=$1
shift
for x ; do [[ ${x} == ${s} ]] && echo "${x}" && return 0 ; done
return 1
}
- _hasgq() { _hasg "$@" >/dev/null ; }
+ __hasgq() { __hasg "$@" >/dev/null ; }
- local phase_func=$(_ebuild_arg_to_phase "$EAPI" "$EBUILD_PHASE")
+ local phase_func=$(__ebuild_arg_to_phase "$EBUILD_PHASE")
if [[ -n $phase_func ]] ; then
- if has "$EAPI" 0 1 ; then
+ if ! ___eapi_has_src_configure; then
[[ $phase_func != src_compile ]] && \
eqawarn "QA Notice: econf called in" \
"$phase_func instead of src_compile"
@@ -408,23 +501,44 @@ econf() {
if [ -x "${ECONF_SOURCE}/configure" ]; then
if [[ -n $CONFIG_SHELL && \
"$(head -n1 "$ECONF_SOURCE/configure")" =~ ^'#!'[[:space:]]*/bin/sh([[:space:]]|$) ]] ; then
- sed -e "1s:^#![[:space:]]*/bin/sh:#!$CONFIG_SHELL:" -i "$ECONF_SOURCE/configure" || \
- die "Substition of shebang in '$ECONF_SOURCE/configure' failed"
+ # preserve timestamp, see bug #440304
+ touch -r "${ECONF_SOURCE}/configure" "${ECONF_SOURCE}/configure._portage_tmp_.${pid}" || die
+ sed -i \
+ -e "1s:^#![[:space:]]*/bin/sh:#!$CONFIG_SHELL:" \
+ "${ECONF_SOURCE}/configure" \
+ || die "Substition of shebang in '${ECONF_SOURCE}/configure' failed"
+ touch -r "${ECONF_SOURCE}/configure._portage_tmp_.${pid}" "${ECONF_SOURCE}/configure" || die
+ rm -f "${ECONF_SOURCE}/configure._portage_tmp_.${pid}"
fi
if [ -e "${EPREFIX}"/usr/share/gnuconfig/ ]; then
find "${WORKDIR}" -type f '(' \
-name config.guess -o -name config.sub ')' -print0 | \
while read -r -d $'\0' x ; do
- vecho " * econf: updating ${x/${WORKDIR}\/} with ${EPREFIX}/usr/share/gnuconfig/${x##*/}"
- cp -f "${EPREFIX}"/usr/share/gnuconfig/"${x##*/}" "${x}"
+ __vecho " * econf: updating ${x/${WORKDIR}\/} with ${EPREFIX}/usr/share/gnuconfig/${x##*/}"
+ # Make sure we do this atomically incase we're run in parallel. #487478
+ cp -f "${EPREFIX}"/usr/share/gnuconfig/"${x##*/}" "${x}.${pid}"
+ mv -f "${x}.${pid}" "${x}"
done
fi
- # EAPI=4 adds --disable-dependency-tracking to econf
- if ! has "$EAPI" 0 1 2 3 3_pre2 && \
- "${ECONF_SOURCE}/configure" --help 2>/dev/null | \
- grep -q disable-dependency-tracking ; then
- set -- --disable-dependency-tracking "$@"
+ if ___eapi_econf_passes_--disable-dependency-tracking || ___eapi_econf_passes_--disable-silent-rules; then
+ local conf_help=$("${ECONF_SOURCE}/configure" --help 2>/dev/null)
+
+ if ___eapi_econf_passes_--disable-dependency-tracking; then
+ case "${conf_help}" in
+ *--disable-dependency-tracking*)
+ set -- --disable-dependency-tracking "$@"
+ ;;
+ esac
+ fi
+
+ if ___eapi_econf_passes_--disable-silent-rules; then
+ case "${conf_help}" in
+ *--disable-silent-rules*)
+ set -- --disable-silent-rules "$@"
+ ;;
+ esac
+ fi
fi
# if the profile defines a location to install libs to aside from default, pass it on.
@@ -433,16 +547,19 @@ econf() {
if [[ -n ${ABI} && -n ${!LIBDIR_VAR} ]] ; then
CONF_LIBDIR=${!LIBDIR_VAR}
fi
- if [[ -n ${CONF_LIBDIR} ]] && ! _hasgq --libdir=\* "$@" ; then
- export CONF_PREFIX=$(_hasg --exec-prefix=\* "$@")
- [[ -z ${CONF_PREFIX} ]] && CONF_PREFIX=$(_hasg --prefix=\* "$@")
+ if [[ -n ${CONF_LIBDIR} ]] && ! __hasgq --libdir=\* "$@" ; then
+ export CONF_PREFIX=$(__hasg --exec-prefix=\* "$@")
+ [[ -z ${CONF_PREFIX} ]] && CONF_PREFIX=$(__hasg --prefix=\* "$@")
: ${CONF_PREFIX:=${EPREFIX}/usr}
CONF_PREFIX=${CONF_PREFIX#*=}
[[ ${CONF_PREFIX} != /* ]] && CONF_PREFIX="/${CONF_PREFIX}"
[[ ${CONF_LIBDIR} != /* ]] && CONF_LIBDIR="/${CONF_LIBDIR}"
- set -- --libdir="$(strip_duplicate_slashes ${CONF_PREFIX}${CONF_LIBDIR})" "$@"
+ set -- --libdir="$(__strip_duplicate_slashes "${CONF_PREFIX}${CONF_LIBDIR}")" "$@"
fi
+ # Handle arguments containing quoted whitespace (see bug #457136).
+ eval "local -a EXTRA_ECONF=(${EXTRA_ECONF})"
+
set -- \
--prefix="${EPREFIX}"/usr \
${CBUILD:+--build=${CBUILD}} \
@@ -454,8 +571,8 @@ econf() {
--sysconfdir="${EPREFIX}"/etc \
--localstatedir="${EPREFIX}"/var/lib \
"$@" \
- ${EXTRA_ECONF}
- vecho "${ECONF_SOURCE}/configure" "$@"
+ "${EXTRA_ECONF[@]}"
+ __vecho "${ECONF_SOURCE}/configure" "$@"
if ! "${ECONF_SOURCE}/configure" "$@" ; then
@@ -476,8 +593,9 @@ econf() {
einstall() {
# CONF_PREFIX is only set if they didn't pass in libdir above.
local LOCAL_EXTRA_EINSTALL="${EXTRA_EINSTALL}"
- [[ " ${FEATURES} " == *" force-prefix "* ]] || \
- case "$EAPI" in 0|1|2) local ED=${D} ;; esac
+ if ! ___eapi_has_prefix_variables; then
+ local ED=${D}
+ fi
LIBDIR_VAR="LIBDIR_${ABI}"
if [ -n "${ABI}" -a -n "${!LIBDIR_VAR}" ]; then
CONF_LIBDIR="${!LIBDIR_VAR}"
@@ -485,7 +603,7 @@ einstall() {
unset LIBDIR_VAR
if [ -n "${CONF_LIBDIR}" ] && [ "${CONF_PREFIX:+set}" = set ]; then
EI_DESTLIBDIR="${D}/${CONF_PREFIX}/${CONF_LIBDIR}"
- EI_DESTLIBDIR="$(strip_duplicate_slashes ${EI_DESTLIBDIR})"
+ EI_DESTLIBDIR="$(__strip_duplicate_slashes "${EI_DESTLIBDIR}")"
LOCAL_EXTRA_EINSTALL="libdir=${EI_DESTLIBDIR} ${LOCAL_EXTRA_EINSTALL}"
unset EI_DESTLIBDIR
fi
@@ -516,7 +634,7 @@ einstall() {
fi
}
-_eapi0_pkg_nofetch() {
+__eapi0_pkg_nofetch() {
[ -z "${SRC_URI}" ] && return
elog "The following are listed in SRC_URI for ${PN}:"
@@ -526,55 +644,59 @@ _eapi0_pkg_nofetch() {
done
}
-_eapi0_src_unpack() {
+__eapi0_src_unpack() {
[[ -n ${A} ]] && unpack ${A}
}
-_eapi0_src_compile() {
+__eapi0_src_compile() {
if [ -x ./configure ] ; then
econf
fi
- _eapi2_src_compile
+ __eapi2_src_compile
}
-_eapi0_src_test() {
+__eapi0_src_test() {
# Since we don't want emake's automatic die
# support (EAPI 4 and later), and we also don't
# want the warning messages that it produces if
# we call it in 'nonfatal' mode, we use emake_cmd
# to emulate the desired parts of emake behavior.
local emake_cmd="${MAKE:-make} ${MAKEOPTS} ${EXTRA_EMAKE}"
- if $emake_cmd -j1 check -n &> /dev/null; then
- vecho ">>> Test phase [check]: ${CATEGORY}/${PF}"
- $emake_cmd -j1 check || \
+ local internal_opts=
+ if ___eapi_default_src_test_disables_parallel_jobs; then
+ internal_opts+=" -j1"
+ fi
+ if $emake_cmd ${internal_opts} check -n &> /dev/null; then
+ __vecho ">>> Test phase [check]: ${CATEGORY}/${PF}"
+ $emake_cmd ${internal_opts} check || \
die "Make check failed. See above for details."
- elif $emake_cmd -j1 test -n &> /dev/null; then
- vecho ">>> Test phase [test]: ${CATEGORY}/${PF}"
- $emake_cmd -j1 test || \
+ elif $emake_cmd ${internal_opts} test -n &> /dev/null; then
+ __vecho ">>> Test phase [test]: ${CATEGORY}/${PF}"
+ $emake_cmd ${internal_opts} test || \
die "Make test failed. See above for details."
else
- vecho ">>> Test phase [none]: ${CATEGORY}/${PF}"
+ __vecho ">>> Test phase [none]: ${CATEGORY}/${PF}"
fi
}
-_eapi1_src_compile() {
- _eapi2_src_configure
- _eapi2_src_compile
+__eapi1_src_compile() {
+ __eapi2_src_configure
+ __eapi2_src_compile
}
-_eapi2_src_configure() {
+__eapi2_src_configure() {
if [[ -x ${ECONF_SOURCE:-.}/configure ]] ; then
econf
fi
}
-_eapi2_src_compile() {
+__eapi2_src_compile() {
if [ -f Makefile ] || [ -f GNUmakefile ] || [ -f makefile ]; then
emake || die "emake failed"
fi
}
-_eapi4_src_install() {
+__eapi4_src_install() {
if [[ -f Makefile || -f GNUmakefile || -f makefile ]] ; then
emake DESTDIR="${D}" install
fi
@@ -593,71 +715,285 @@ _eapi4_src_install() {
}
# @FUNCTION: has_version
-# @USAGE: <DEPEND ATOM>
+# @USAGE: [--host-root] <DEPEND ATOM>
# @DESCRIPTION:
# Return true if given package is installed. Otherwise return false.
# Callers may override the ROOT variable in order to match packages from an
# alternative ROOT.
has_version() {
- local eroot
- case "$EAPI" in
- 0|1|2)
- [[ " ${FEATURES} " == *" force-prefix "* ]] && \
- eroot=${ROOT%/}${EPREFIX}/ || eroot=${ROOT}
- ;;
- *)
- eroot=${ROOT%/}${EPREFIX}/
- ;;
- esac
+ local atom eroot host_root=false root=${ROOT}
+ if [[ $1 == --host-root ]] ; then
+ host_root=true
+ shift
+ fi
+ atom=$1
+ shift
+ [ $# -gt 0 ] && die "${FUNCNAME[0]}: unused argument(s): $*"
+
+ if ${host_root} ; then
+ if ! ___eapi_best_version_and_has_version_support_--host-root; then
+ die "${FUNCNAME[0]}: option --host-root is not supported with EAPI ${EAPI}"
+ fi
+ root=/
+ fi
+
+ if ___eapi_has_prefix_variables; then
+ # [[ ${root} == / ]] would be ambiguous here,
+ # since both prefixes can share root=/ while
+ # having different EPREFIX offsets.
+ if ${host_root} ; then
+ eroot=${root%/}${PORTAGE_OVERRIDE_EPREFIX}/
+ else
+ eroot=${root%/}${EPREFIX}/
+ fi
+ else
+ eroot=${root}
+ fi
if [[ -n $PORTAGE_IPC_DAEMON ]] ; then
- "$PORTAGE_BIN_PATH"/ebuild-ipc has_version "${eroot}" "$1"
+ "$PORTAGE_BIN_PATH"/ebuild-ipc has_version "${eroot}" "${atom}"
else
- PYTHONPATH=${PORTAGE_PYM_PATH}${PYTHONPATH:+:}${PYTHONPATH} \
- "${PORTAGE_PYTHON:-/usr/bin/python}" "${PORTAGE_BIN_PATH}/portageq" has_version "${eroot}" "$1"
+ "${PORTAGE_BIN_PATH}/ebuild-helpers/portageq" has_version "${eroot}" "${atom}"
fi
local retval=$?
case "${retval}" in
0|1)
return ${retval}
;;
+ 2)
+ die "${FUNCNAME[0]}: invalid atom: ${atom}"
+ ;;
*)
- die "unexpected portageq exit code: ${retval}"
+ if [[ -n ${PORTAGE_IPC_DAEMON} ]]; then
+ die "${FUNCNAME[0]}: unexpected ebuild-ipc exit code: ${retval}"
+ else
+ die "${FUNCNAME[0]}: unexpected portageq exit code: ${retval}"
+ fi
;;
esac
}
# @FUNCTION: best_version
-# @USAGE: <DEPEND ATOM>
+# @USAGE: [--host-root] <DEPEND ATOM>
# @DESCRIPTION:
# Returns the best/most-current match.
# Callers may override the ROOT variable in order to match packages from an
# alternative ROOT.
best_version() {
- local eroot
- case "$EAPI" in
- 0|1|2)
- [[ " ${FEATURES} " == *" force-prefix "* ]] && \
- eroot=${ROOT%/}${EPREFIX}/ || eroot=${ROOT}
- ;;
- *)
- eroot=${ROOT%/}${EPREFIX}/
- ;;
- esac
+ local atom eroot host_root=false root=${ROOT}
+ if [[ $1 == --host-root ]] ; then
+ host_root=true
+ shift
+ fi
+ atom=$1
+ shift
+ [ $# -gt 0 ] && die "${FUNCNAME[0]}: unused argument(s): $*"
+
+ if ${host_root} ; then
+ if ! ___eapi_best_version_and_has_version_support_--host-root; then
+ die "${FUNCNAME[0]}: option --host-root is not supported with EAPI ${EAPI}"
+ fi
+ root=/
+ fi
+
+ if ___eapi_has_prefix_variables; then
+ # [[ ${root} == / ]] would be ambiguous here,
+ # since both prefixes can share root=/ while
+ # having different EPREFIX offsets.
+ if ${host_root} ; then
+ eroot=${root%/}${PORTAGE_OVERRIDE_EPREFIX}/
+ else
+ eroot=${root%/}${EPREFIX}/
+ fi
+ else
+ eroot=${root}
+ fi
if [[ -n $PORTAGE_IPC_DAEMON ]] ; then
- "$PORTAGE_BIN_PATH"/ebuild-ipc best_version "${eroot}" "$1"
+ "$PORTAGE_BIN_PATH"/ebuild-ipc best_version "${eroot}" "${atom}"
else
- PYTHONPATH=${PORTAGE_PYM_PATH}${PYTHONPATH:+:}${PYTHONPATH} \
- "${PORTAGE_PYTHON:-/usr/bin/python}" "${PORTAGE_BIN_PATH}/portageq" best_version "${eroot}" "$1"
+ "${PORTAGE_BIN_PATH}/ebuild-helpers/portageq" best_version "${eroot}" "${atom}"
fi
local retval=$?
case "${retval}" in
0|1)
return ${retval}
;;
+ 2)
+ die "${FUNCNAME[0]}: invalid atom: ${atom}"
+ ;;
*)
- die "unexpected portageq exit code: ${retval}"
+ if [[ -n ${PORTAGE_IPC_DAEMON} ]]; then
+ die "${FUNCNAME[0]}: unexpected ebuild-ipc exit code: ${retval}"
+ else
+ die "${FUNCNAME[0]}: unexpected portageq exit code: ${retval}"
+ fi
;;
esac
}
+
+if ___eapi_has_master_repositories; then
+ master_repositories() {
+ local output repository=$1 retval
+ shift
+ [[ $# -gt 0 ]] && die "${FUNCNAME[0]}: unused argument(s): $*"
+
+ if [[ -n ${PORTAGE_IPC_DAEMON} ]]; then
+ "${PORTAGE_BIN_PATH}/ebuild-ipc" master_repositories "${EROOT}" "${repository}"
+ else
+ output=$("${PORTAGE_BIN_PATH}/ebuild-helpers/portageq" master_repositories "${EROOT}" "${repository}")
+ fi
+ retval=$?
+ [[ -n ${output} ]] && echo "${output}"
+ case "${retval}" in
+ 0|1)
+ return ${retval}
+ ;;
+ 2)
+ die "${FUNCNAME[0]}: invalid repository: ${repository}"
+ ;;
+ *)
+ if [[ -n ${PORTAGE_IPC_DAEMON} ]]; then
+ die "${FUNCNAME[0]}: unexpected ebuild-ipc exit code: ${retval}"
+ else
+ die "${FUNCNAME[0]}: unexpected portageq exit code: ${retval}"
+ fi
+ ;;
+ esac
+ }
+fi
+
+if ___eapi_has_repository_path; then
+ repository_path() {
+ local output repository=$1 retval
+ shift
+ [[ $# -gt 0 ]] && die "${FUNCNAME[0]}: unused argument(s): $*"
+
+ if [[ -n ${PORTAGE_IPC_DAEMON} ]]; then
+ "${PORTAGE_BIN_PATH}/ebuild-ipc" repository_path "${EROOT}" "${repository}"
+ else
+ output=$("${PORTAGE_BIN_PATH}/ebuild-helpers/portageq" get_repo_path "${EROOT}" "${repository}")
+ fi
+ retval=$?
+ [[ -n ${output} ]] && echo "${output}"
+ case "${retval}" in
+ 0|1)
+ return ${retval}
+ ;;
+ 2)
+ die "${FUNCNAME[0]}: invalid repository: ${repository}"
+ ;;
+ *)
+ if [[ -n ${PORTAGE_IPC_DAEMON} ]]; then
+ die "${FUNCNAME[0]}: unexpected ebuild-ipc exit code: ${retval}"
+ else
+ die "${FUNCNAME[0]}: unexpected portageq exit code: ${retval}"
+ fi
+ ;;
+ esac
+ }
+fi
+
+if ___eapi_has_available_eclasses; then
+ available_eclasses() {
+ local output repository=${PORTAGE_REPO_NAME} retval
+ [[ $# -gt 0 ]] && die "${FUNCNAME[0]}: unused argument(s): $*"
+
+ if [[ -n ${PORTAGE_IPC_DAEMON} ]]; then
+ "${PORTAGE_BIN_PATH}/ebuild-ipc" available_eclasses "${EROOT}" "${repository}"
+ else
+ output=$("${PORTAGE_BIN_PATH}/ebuild-helpers/portageq" available_eclasses "${EROOT}" "${repository}")
+ fi
+ retval=$?
+ [[ -n ${output} ]] && echo "${output}"
+ case "${retval}" in
+ 0|1)
+ return ${retval}
+ ;;
+ 2)
+ die "${FUNCNAME[0]}: invalid repository: ${repository}"
+ ;;
+ *)
+ if [[ -n ${PORTAGE_IPC_DAEMON} ]]; then
+ die "${FUNCNAME[0]}: unexpected ebuild-ipc exit code: ${retval}"
+ else
+ die "${FUNCNAME[0]}: unexpected portageq exit code: ${retval}"
+ fi
+ ;;
+ esac
+ }
+fi
+
+if ___eapi_has_eclass_path; then
+ eclass_path() {
+ local eclass=$1 output repository=${PORTAGE_REPO_NAME} retval
+ shift
+ [[ $# -gt 0 ]] && die "${FUNCNAME[0]}: unused argument(s): $*"
+
+ if [[ -n ${PORTAGE_IPC_DAEMON} ]]; then
+ "${PORTAGE_BIN_PATH}/ebuild-ipc" eclass_path "${EROOT}" "${repository}" "${eclass}"
+ else
+ output=$("${PORTAGE_BIN_PATH}/ebuild-helpers/portageq" eclass_path "${EROOT}" "${repository}" "${eclass}")
+ fi
+ retval=$?
+ [[ -n ${output} ]] && echo "${output}"
+ case "${retval}" in
+ 0|1)
+ return ${retval}
+ ;;
+ 2)
+ die "${FUNCNAME[0]}: invalid repository: ${repository}"
+ ;;
+ *)
+ if [[ -n ${PORTAGE_IPC_DAEMON} ]]; then
+ die "${FUNCNAME[0]}: unexpected ebuild-ipc exit code: ${retval}"
+ else
+ die "${FUNCNAME[0]}: unexpected portageq exit code: ${retval}"
+ fi
+ ;;
+ esac
+ }
+fi
+
+if ___eapi_has_license_path; then
+ license_path() {
+ local license=$1 output repository=${PORTAGE_REPO_NAME} retval
+ shift
+ [[ $# -gt 0 ]] && die "${FUNCNAME[0]}: unused argument(s): $*"
+
+ if [[ -n ${PORTAGE_IPC_DAEMON} ]]; then
+ "${PORTAGE_BIN_PATH}/ebuild-ipc" license_path "${EROOT}" "${repository}" "${license}"
+ else
+ output=$("${PORTAGE_BIN_PATH}/ebuild-helpers/portageq" license_path "${EROOT}" "${repository}" "${license}")
+ fi
+ retval=$?
+ [[ -n ${output} ]] && echo "${output}"
+ case "${retval}" in
+ 0|1)
+ return ${retval}
+ ;;
+ 2)
+ die "${FUNCNAME[0]}: invalid repository: ${repository}"
+ ;;
+ *)
+ if [[ -n ${PORTAGE_IPC_DAEMON} ]]; then
+ die "${FUNCNAME[0]}: unexpected ebuild-ipc exit code: ${retval}"
+ else
+ die "${FUNCNAME[0]}: unexpected portageq exit code: ${retval}"
+ fi
+ ;;
+ esac
+ }
+fi
+
+if ___eapi_has_package_manager_build_user; then
+ package_manager_build_user() {
+ echo "${PORTAGE_BUILD_USER}"
+ }
+fi
+
+if ___eapi_has_package_manager_build_group; then
+ package_manager_build_group() {
+ echo "${PORTAGE_BUILD_GROUP}"
+ }
+fi
diff --git a/bin/portageq b/bin/portageq
index d9abb0bad..79818f679 100755
--- a/bin/portageq
+++ b/bin/portageq
@@ -1,15 +1,15 @@
-#!/usr/bin/python -O
-# Copyright 1999-2012 Gentoo Foundation
+#!/usr/bin/python -bO
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-from __future__ import print_function
+from __future__ import print_function, unicode_literals
import signal
import sys
# This block ensures that ^C interrupts are handled quietly.
try:
- def exithandler(signum, frame):
+ def exithandler(signum, _frame):
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
sys.exit(128 + signum)
@@ -34,23 +34,22 @@ if os.environ.get("SANDBOX_ON") == "1":
":".join(filter(None, sandbox_write))
del sandbox_write
-try:
- import portage
-except ImportError:
- sys.path.insert(0, pym_path)
- import portage
-del pym_path
-
+sys.path.insert(0, pym_path)
+import portage
+portage._internal_caller = True
from portage import os
from portage.eapi import eapi_has_repo_deps
from portage.util import writemsg, writemsg_stdout
-from portage.output import colormap
+from portage.util._argparse import ArgumentParser
portage.proxy.lazyimport.lazyimport(globals(),
+ 're',
'subprocess',
'_emerge.Package:Package',
'_emerge.RootConfig:RootConfig',
+ '_emerge.is_valid_package_atom:insert_category_into_atom',
'portage.dbapi._expand_new_virt:expand_new_virt',
'portage._sets.base:InternalPackageSet',
+ 'portage.xml.metadata:MetaDataXML'
)
def eval_atom_use(atom):
@@ -59,6 +58,10 @@ def eval_atom_use(atom):
atom = atom.evaluate_conditionals(use)
return atom
+def uses_eroot(function):
+ function.uses_eroot = True
+ return function
+
#-----------------------------------------------------------------------------
#
# To add functionality to this tool, add a function below.
@@ -80,13 +83,14 @@ def eval_atom_use(atom):
# and will automaticly add a command by the same name as the function!
#
+@uses_eroot
def has_version(argv):
"""<eroot> <category/package>
Return code 0 if it's available, 1 otherwise.
"""
if (len(argv) < 2):
print("ERROR: insufficient parameters!")
- return 2
+ return 3
warnings = []
@@ -105,9 +109,7 @@ def has_version(argv):
try:
atom = portage.dep.Atom(argv[1], allow_repo=allow_repo, eapi=eapi)
except portage.exception.InvalidAtom as e:
- warnings.append(
- portage._unicode_decode("QA Notice: %s: %s") % \
- ('has_version', e))
+ warnings.append("QA Notice: %s: %s" % ('has_version', e))
atom = eval_atom_use(atom)
if warnings:
@@ -125,16 +127,16 @@ def has_version(argv):
portage.writemsg("ERROR: Invalid atom: '%s'\n" % argv[1],
noiselevel=-1)
return 2
-has_version.uses_root = True
+@uses_eroot
def best_version(argv):
"""<eroot> <category/package>
Returns category/package-version (without .ebuild).
"""
if (len(argv) < 2):
print("ERROR: insufficient parameters!")
- return 2
+ return 3
warnings = []
@@ -153,9 +155,7 @@ def best_version(argv):
try:
atom = portage.dep.Atom(argv[1], allow_repo=allow_repo, eapi=eapi)
except portage.exception.InvalidAtom as e:
- warnings.append(
- portage._unicode_decode("QA Notice: %s: %s") % \
- ('best_version', e))
+ warnings.append("QA Notice: %s: %s" % ('best_version', e))
atom = eval_atom_use(atom)
if warnings:
@@ -166,9 +166,9 @@ def best_version(argv):
print(portage.best(mylist))
except KeyError:
return 1
-best_version.uses_root = True
+@uses_eroot
def mass_best_version(argv):
"""<eroot> [<category/package>]+
Returns category/package-version (without .ebuild).
@@ -178,23 +178,25 @@ def mass_best_version(argv):
return 2
try:
for pack in argv[1:]:
- mylist=portage.db[argv[0]]["vartree"].dbapi.match(pack)
- print(pack+":"+portage.best(mylist))
+ mylist = portage.db[argv[0]]['vartree'].dbapi.match(pack)
+ print('%s:%s' % (pack, portage.best(mylist)))
except KeyError:
return 1
-mass_best_version.uses_root = True
+
+@uses_eroot
def metadata(argv):
if (len(argv) < 4):
- print("ERROR: insufficient parameters!", file=sys.stderr)
+ print('ERROR: insufficient parameters!', file=sys.stderr)
return 2
eroot, pkgtype, pkgspec = argv[0:3]
metakeys = argv[3:]
type_map = {
- "ebuild":"porttree",
- "binary":"bintree",
- "installed":"vartree"}
+ 'ebuild': 'porttree',
+ 'binary': 'bintree',
+ 'installed': 'vartree'
+ }
if pkgtype not in type_map:
print("Unrecognized package type: '%s'" % pkgtype, file=sys.stderr)
return 1
@@ -202,9 +204,9 @@ def metadata(argv):
repo = portage.dep.dep_getrepo(pkgspec)
pkgspec = portage.dep.remove_slot(pkgspec)
try:
- values = trees[eroot][type_map[pkgtype]].dbapi.aux_get(
- pkgspec, metakeys, myrepo=repo)
- writemsg_stdout(''.join('%s\n' % x for x in values), noiselevel=-1)
+ values = trees[eroot][type_map[pkgtype]].dbapi.aux_get(
+ pkgspec, metakeys, myrepo=repo)
+ writemsg_stdout(''.join('%s\n' % x for x in values), noiselevel=-1)
except KeyError:
print("Package not found: '%s'" % pkgspec, file=sys.stderr)
return 1
@@ -216,8 +218,8 @@ Available keys: %s
""" % ','.join(sorted(x for x in portage.auxdbkeys \
if not x.startswith('UNUSED_')))
-metadata.uses_root = True
+@uses_eroot
def contents(argv):
"""<eroot> <category/package>
List the files that are installed for a given package, with
@@ -238,8 +240,9 @@ def contents(argv):
treetype="vartree", vartree=vartree)
writemsg_stdout(''.join('%s\n' % x for x in sorted(db.getcontents())),
noiselevel=-1)
-contents.uses_root = True
+
+@uses_eroot
def owners(argv):
"""<eroot> [<filename>]+
Given a list of files, print the packages that own the files and which
@@ -253,7 +256,6 @@ def owners(argv):
sys.stderr.flush()
return 2
- from portage import catsplit, dblink
eroot = argv[0]
vardb = portage.db[eroot]["vartree"].dbapi
root = portage.settings['ROOT']
@@ -319,8 +321,8 @@ def owners(argv):
return 0
return 1
-owners.uses_root = True
+@uses_eroot
def is_protected(argv):
"""<eroot> <filename>
Given a single filename, return code 0 if it's protected, 1 otherwise.
@@ -366,8 +368,8 @@ def is_protected(argv):
return 0
return 1
-is_protected.uses_root = True
+@uses_eroot
def filter_protected(argv):
"""<eroot>
Read filenames from stdin and write them to stdout if they are protected.
@@ -395,7 +397,6 @@ def filter_protected(argv):
settings.get("CONFIG_PROTECT_MASK", ""))
protect_obj = ConfigProtect(root, protect, protect_mask)
- protected = 0
errors = 0
for line in sys.stdin:
@@ -417,7 +418,6 @@ def filter_protected(argv):
continue
if protect_obj.isprotected(f):
- protected += 1
out.write("%s\n" % filename)
out.flush()
@@ -426,8 +426,8 @@ def filter_protected(argv):
return 0
-filter_protected.uses_root = True
+@uses_eroot
def best_visible(argv):
"""<eroot> [pkgtype] <atom>
Returns category/package-version (without .ebuild).
@@ -465,8 +465,7 @@ def best_visible(argv):
noiselevel=-1)
return 2
- root_config = RootConfig(portage.settings,
- portage.db[eroot], None)
+ root_config = RootConfig(portage.settings, portage.db[eroot], None)
if hasattr(db, "xmatch"):
cpv_list = db.xmatch("match-all-cpv-only", atom)
@@ -508,11 +507,11 @@ def best_visible(argv):
writemsg_stdout("\n", noiselevel=-1)
return 1
-best_visible.uses_root = True
+@uses_eroot
def mass_best_visible(argv):
- """<root> [<type>] [<category/package>]+
+ """<eroot> [<type>] [<category/package>]+
Returns category/package-version (without .ebuild).
The pkgtype argument defaults to "ebuild" if unspecified,
otherwise it must be one of ebuild, binary, or installed.
@@ -535,9 +534,9 @@ def mass_best_visible(argv):
best_visible([root, pkgtype, pack])
except KeyError:
return 1
-mass_best_visible.uses_root = True
+@uses_eroot
def all_best_visible(argv):
"""<eroot>
Returns all best_visible packages (without .ebuild).
@@ -552,9 +551,9 @@ def all_best_visible(argv):
mybest=portage.best(portage.db[argv[0]]["porttree"].dbapi.match(pkg))
if mybest:
print(mybest)
-all_best_visible.uses_root = True
+@uses_eroot
def match(argv):
"""<eroot> <atom>
Returns a \\n separated list of category/package-version.
@@ -601,8 +600,9 @@ def match(argv):
results = vardb.match(atom)
for cpv in results:
print(cpv)
-match.uses_root = True
+
+@uses_eroot
def expand_virtual(argv):
"""<eroot> <atom>
Returns a \\n separated list of atoms expanded from a
@@ -637,9 +637,8 @@ def expand_virtual(argv):
return os.EX_OK
-expand_virtual.uses_root = True
-def vdb_path(argv):
+def vdb_path(_argv):
"""
Returns the path used for the var(installed) package database for the
set environment/configuration options.
@@ -649,56 +648,79 @@ def vdb_path(argv):
out.flush()
return os.EX_OK
-def gentoo_mirrors(argv):
+def gentoo_mirrors(_argv):
"""
Returns the mirrors set to use in the portage configuration.
"""
print(portage.settings["GENTOO_MIRRORS"])
-def portdir(argv):
+@uses_eroot
+def repositories_configuration(argv):
+ """<eroot>
+ Returns the configuration of repositories.
+ """
+ if len(argv) < 1:
+ print("ERROR: insufficient parameters!", file=sys.stderr)
+ return 3
+ sys.stdout.write(portage.db[argv[0]]["vartree"].settings.repositories.config_string())
+ sys.stdout.flush()
+
+@uses_eroot
+def repos_config(argv):
+ """
+ <eroot>
+ This is an alias for the repositories_configuration command.
+ """
+ return repositories_configuration(argv)
+
+def portdir(_argv):
"""
Returns the PORTDIR path.
+ Deprecated in favor of repositories_configuration command.
"""
+ print("WARNING: 'portageq portdir' is deprecated. Use 'portageq repositories_configuration' instead.", file=sys.stderr)
print(portage.settings["PORTDIR"])
-def config_protect(argv):
+def config_protect(_argv):
"""
Returns the CONFIG_PROTECT paths.
"""
print(portage.settings["CONFIG_PROTECT"])
-def config_protect_mask(argv):
+def config_protect_mask(_argv):
"""
Returns the CONFIG_PROTECT_MASK paths.
"""
print(portage.settings["CONFIG_PROTECT_MASK"])
-def portdir_overlay(argv):
+def portdir_overlay(_argv):
"""
Returns the PORTDIR_OVERLAY path.
+ Deprecated in favor of repositories_configuration command.
"""
+ print("WARNING: 'portageq portdir_overlay' is deprecated. Use 'portageq repositories_configuration' instead.", file=sys.stderr)
print(portage.settings["PORTDIR_OVERLAY"])
-def pkgdir(argv):
+def pkgdir(_argv):
"""
Returns the PKGDIR path.
"""
print(portage.settings["PKGDIR"])
-def distdir(argv):
+def distdir(_argv):
"""
Returns the DISTDIR path.
"""
print(portage.settings["DISTDIR"])
-def colormap(argv):
+def colormap(_argv):
"""
Display the color.map as environment variables.
"""
@@ -719,11 +741,15 @@ def envvar(argv):
return 2
for arg in argv:
+ if arg in ("PORTDIR", "PORTDIR_OVERLAY", "SYNC"):
+ print("WARNING: 'portageq envvar %s' is deprecated. Use 'portageq repositories_configuration' instead." % arg, file=sys.stderr)
if verbose:
- print(arg +"='"+ portage.settings[arg] +"'")
+ print(arg + "=" + portage._shell_quote(portage.settings[arg]))
else:
print(portage.settings[arg])
+
+@uses_eroot
def get_repos(argv):
"""<eroot>
Returns all repos with names (repo_name file) argv[0] = $EROOT
@@ -731,25 +757,137 @@ def get_repos(argv):
if len(argv) < 1:
print("ERROR: insufficient parameters!")
return 2
- print(" ".join(portage.db[argv[0]]["porttree"].dbapi.getRepositories()))
+ print(" ".join(reversed(portage.db[argv[0]]["vartree"].settings.repositories.prepos_order)))
+
+
+@uses_eroot
+def master_repositories(argv):
+ """<eroot> <repo_id>+
+ Returns space-separated list of master repositories for specified repository.
+ """
+ if len(argv) < 2:
+ print("ERROR: insufficient parameters!", file=sys.stderr)
+ return 3
+ for arg in argv[1:]:
+ if portage.dep._repo_name_re.match(arg) is None:
+ print("ERROR: invalid repository: %s" % arg, file=sys.stderr)
+ return 2
+ try:
+ repo = portage.db[argv[0]]["vartree"].settings.repositories[arg]
+ except KeyError:
+ print("")
+ return 1
+ else:
+ print(" ".join(x.name for x in repo.masters))
-get_repos.uses_root = True
+@uses_eroot
+def master_repos(argv):
+ """<eroot> <repo_id>+
+ This is an alias for the master_repositories command.
+ """
+ return master_repositories(argv)
+@uses_eroot
def get_repo_path(argv):
"""<eroot> <repo_id>+
Returns the path to the repo named argv[1], argv[0] = $EROOT
"""
if len(argv) < 2:
- print("ERROR: insufficient parameters!")
- return 2
+ print("ERROR: insufficient parameters!", file=sys.stderr)
+ return 3
for arg in argv[1:]:
- path = portage.db[argv[0]]["porttree"].dbapi.getRepositoryPath(arg)
+ if portage.dep._repo_name_re.match(arg) is None:
+ print("ERROR: invalid repository: %s" % arg, file=sys.stderr)
+ return 2
+ path = portage.db[argv[0]]["vartree"].settings.repositories.treemap.get(arg)
if path is None:
- path = ""
+ print("")
+ return 1
print(path)
-get_repo_path.uses_root = True
+@uses_eroot
+def available_eclasses(argv):
+ """<eroot> <repo_id>+
+ Returns space-separated list of available eclasses for specified repository.
+ """
+ if len(argv) < 2:
+ print("ERROR: insufficient parameters!", file=sys.stderr)
+ return 3
+ for arg in argv[1:]:
+ if portage.dep._repo_name_re.match(arg) is None:
+ print("ERROR: invalid repository: %s" % arg, file=sys.stderr)
+ return 2
+ try:
+ repo = portage.db[argv[0]]["vartree"].settings.repositories[arg]
+ except KeyError:
+ print("")
+ return 1
+ else:
+ print(" ".join(sorted(repo.eclass_db.eclasses)))
+
+
+@uses_eroot
+def eclass_path(argv):
+ """<eroot> <repo_id> <eclass>+
+ Returns the path to specified eclass for specified repository.
+ """
+ if len(argv) < 3:
+ print("ERROR: insufficient parameters!", file=sys.stderr)
+ return 3
+ if portage.dep._repo_name_re.match(argv[1]) is None:
+ print("ERROR: invalid repository: %s" % argv[1], file=sys.stderr)
+ return 2
+ try:
+ repo = portage.db[argv[0]]["vartree"].settings.repositories[argv[1]]
+ except KeyError:
+ print("")
+ return 1
+ else:
+ retval = 0
+ for arg in argv[2:]:
+ try:
+ eclass = repo.eclass_db.eclasses[arg]
+ except KeyError:
+ print("")
+ retval = 1
+ else:
+ print(eclass.location)
+ return retval
+
+
+@uses_eroot
+def license_path(argv):
+ """<eroot> <repo_id> <license>+
+ Returns the path to specified license for specified repository.
+ """
+ if len(argv) < 3:
+ print("ERROR: insufficient parameters!", file=sys.stderr)
+ return 3
+ if portage.dep._repo_name_re.match(argv[1]) is None:
+ print("ERROR: invalid repository: %s" % argv[1], file=sys.stderr)
+ return 2
+ try:
+ repo = portage.db[argv[0]]["vartree"].settings.repositories[argv[1]]
+ except KeyError:
+ print("")
+ return 1
+ else:
+ retval = 0
+ for arg in argv[2:]:
+ eclass_path = ""
+ paths = reversed([os.path.join(x.location, 'licenses', arg) for x in list(repo.masters) + [repo]])
+ for path in paths:
+ if os.path.exists(path):
+ eclass_path = path
+ break
+ if eclass_path == "":
+ retval = 1
+ print(eclass_path)
+ return retval
+
+
+@uses_eroot
def list_preserved_libs(argv):
"""<eroot>
Print a list of libraries preserved during a package update in the form
@@ -771,21 +909,296 @@ def list_preserved_libs(argv):
msg.append('\n')
writemsg_stdout(''.join(msg), noiselevel=-1)
return rValue
-list_preserved_libs.uses_root = True
+
+
+class MaintainerEmailMatcher(object):
+ def __init__(self, maintainer_emails):
+ self._re = re.compile("^(%s)$" % "|".join(maintainer_emails))
+
+ def __call__(self, metadata_xml):
+ match = False
+ matcher = self._re.match
+ for x in metadata_xml.maintainers():
+ if x.email is not None and matcher(x.email) is not None:
+ match = True
+ break
+ return match
+
+class HerdMatcher(object):
+ def __init__(self, herds):
+ self._herds = frozenset(herds)
+
+ def __call__(self, metadata_xml):
+ herds = self._herds
+ return any(x in herds for x in metadata_xml.herds())
+
+
+def pquery(parser, opts, args):
+ """[options] [atom]+
+ Emulates a subset of Pkgcore's pquery tool.
+ """
+
+ portdb = portage.db[portage.root]['porttree'].dbapi
+ root_config = RootConfig(portdb.settings,
+ portage.db[portage.root], None)
+
+ def _pkg(cpv, repo_name):
+ try:
+ metadata = dict(zip(
+ Package.metadata_keys,
+ portdb.aux_get(cpv,
+ Package.metadata_keys,
+ myrepo=repo_name)))
+ except KeyError:
+ raise portage.exception.PackageNotFound(cpv)
+ return Package(built=False, cpv=cpv,
+ installed=False, metadata=metadata,
+ root_config=root_config,
+ type_name="ebuild")
+
+ need_metadata = False
+ atoms = []
+ for arg in args:
+ if "/" not in arg.split(":")[0]:
+ atom = insert_category_into_atom(arg, '*')
+ if atom is None:
+ writemsg("ERROR: Invalid atom: '%s'\n" % arg,
+ noiselevel=-1)
+ return 2
+ else:
+ atom = arg
+
+ try:
+ atom = portage.dep.Atom(atom, allow_wildcard=True, allow_repo=True)
+ except portage.exception.InvalidAtom:
+ writemsg("ERROR: Invalid atom: '%s'\n" % arg,
+ noiselevel=-1)
+ return 2
+
+ if atom.slot is not None:
+ need_metadata = True
+
+ atoms.append(atom)
+
+ if "*/*" in atoms:
+ del atoms[:]
+ need_metadata = False
+
+ if not opts.no_filters:
+ need_metadata = True
+
+ xml_matchers = []
+ if opts.maintainer_email:
+ maintainer_emails = []
+ for x in opts.maintainer_email:
+ maintainer_emails.extend(x.split(","))
+ xml_matchers.append(MaintainerEmailMatcher(maintainer_emails))
+ if opts.herd is not None:
+ herds = []
+ for x in opts.herd:
+ herds.extend(x.split(","))
+ xml_matchers.append(HerdMatcher(herds))
+
+ repos = []
+ if opts.all_repos:
+ repos.extend(portdb.repositories.get_repo_for_location(location)
+ for location in portdb.porttrees)
+ elif opts.repo is not None:
+ repos.append(portdb.repositories[opts.repo])
+ else:
+ repos.append(portdb.repositories.mainRepo())
+
+ if not atoms:
+ names = None
+ categories = list(portdb.categories)
+ else:
+ category_wildcard = False
+ name_wildcard = False
+ categories = []
+ names = []
+ for atom in atoms:
+ category, name = portage.catsplit(atom.cp)
+ categories.append(category)
+ names.append(name)
+ if "*" in category:
+ category_wildcard = True
+ if "*" in name:
+ name_wildcard = True
+
+ if category_wildcard:
+ categories = list(portdb.categories)
+ else:
+ categories = list(set(categories))
+
+ if name_wildcard:
+ names = None
+ else:
+ names = sorted(set(names))
+
+ no_version = opts.no_version
+ categories.sort()
+
+ for category in categories:
+ if names is None:
+ cp_list = portdb.cp_all(categories=(category,))
+ else:
+ cp_list = [category + "/" + name for name in names]
+ for cp in cp_list:
+ matches = []
+ for repo in repos:
+ match = True
+ if xml_matchers:
+ metadata_xml_path = os.path.join(
+ repo.location, cp, 'metadata.xml')
+ try:
+ metadata_xml = MetaDataXML(metadata_xml_path, None)
+ except (EnvironmentError, SyntaxError):
+ match = False
+ else:
+ for matcher in xml_matchers:
+ if not matcher(metadata_xml):
+ match = False
+ break
+ if not match:
+ continue
+ cpv_list = portdb.cp_list(cp, mytree=[repo.location])
+ if atoms:
+ for cpv in cpv_list:
+ pkg = None
+ for atom in atoms:
+ if atom.repo is not None and \
+ atom.repo != repo.name:
+ continue
+ if not portage.match_from_list(atom, [cpv]):
+ continue
+ if need_metadata:
+ if pkg is None:
+ try:
+ pkg = _pkg(cpv, repo.name)
+ except portage.exception.PackageNotFound:
+ continue
+
+ if not (opts.no_filters or pkg.visible):
+ continue
+ if not portage.match_from_list(atom, [pkg]):
+ continue
+ matches.append(cpv)
+ break
+ if no_version and matches:
+ break
+ elif opts.no_filters:
+ matches.extend(cpv_list)
+ else:
+ for cpv in cpv_list:
+ try:
+ pkg = _pkg(cpv, repo.name)
+ except portage.exception.PackageNotFound:
+ continue
+ else:
+ if pkg.visible:
+ matches.append(cpv)
+ if no_version:
+ break
+
+ if no_version and matches:
+ break
+
+ if not matches:
+ continue
+
+ if no_version:
+ writemsg_stdout("%s\n" % (cp,), noiselevel=-1)
+ else:
+ matches = list(set(matches))
+ portdb._cpv_sort_ascending(matches)
+ for cpv in matches:
+ writemsg_stdout("%s\n" % (cpv,), noiselevel=-1)
+
+ return os.EX_OK
+
#-----------------------------------------------------------------------------
#
# DO NOT CHANGE CODE BEYOND THIS POINT - IT'S NOT NEEDED!
#
-if not portage.const._ENABLE_PRESERVE_LIBS:
- del list_preserved_libs
-
-non_commands = frozenset(['elog', 'eval_atom_use',
- 'exithandler', 'expand_new_virt', 'main',
- 'usage', 'writemsg', 'writemsg_stdout'])
+non_commands = frozenset(['elog', 'eval_atom_use', 'exithandler', 'main', 'usage', 'uses_eroot'])
commands = sorted(k for k, v in globals().items() \
- if k not in non_commands and isinstance(v, types.FunctionType))
+ if k not in non_commands and isinstance(v, types.FunctionType) and v.__module__ == "__main__")
+
+
+def add_pquery_arguments(parser):
+ pquery_option_groups = (
+ (
+ 'Repository matching options',
+ (
+ {
+ "longopt": "--no-filters",
+ "action": "store_true",
+ "help": "no visibility filters (ACCEPT_KEYWORDS, package masking, etc)"
+ },
+ {
+ "longopt": "--repo",
+ "help": "repo to use (default is PORTDIR if omitted)"
+ },
+ {
+ "longopt": "--all-repos",
+ "help": "search all repos"
+ }
+ )
+ ),
+ (
+ 'Package matching options',
+ (
+ {
+ "longopt": "--herd",
+ "action": "append",
+ "help": "exact match on a herd"
+ },
+ {
+ "longopt": "--maintainer-email",
+ "action": "append",
+ "help": "comma-separated list of maintainer email regexes to search for"
+ }
+ )
+ ),
+ (
+ 'Output formatting',
+ (
+ {
+ "shortopt": "-n",
+ "longopt": "--no-version",
+ "action": "store_true",
+ "help": "collapse multiple matching versions together"
+ },
+ )
+ ),
+ )
+
+ for group_title, opt_data in pquery_option_groups:
+ arg_group = parser.add_argument_group(group_title)
+ for opt_info in opt_data:
+ pargs = []
+ try:
+ pargs.append(opt_info["shortopt"])
+ except KeyError:
+ pass
+ try:
+ pargs.append(opt_info["longopt"])
+ except KeyError:
+ pass
+
+ kwargs = {}
+ try:
+ kwargs["action"] = opt_info["action"]
+ except KeyError:
+ pass
+ try:
+ kwargs["help"] = opt_info["help"]
+ except KeyError:
+ pass
+ arg_group.add_argument(*pargs, **portage._native_kwargs(kwargs))
+
def usage(argv):
print(">>> Portage information query tool")
@@ -798,7 +1211,7 @@ def usage(argv):
# Show our commands -- we do this by scanning the functions in this
# file, and formatting each functions documentation.
#
- help_mode = '--help' in sys.argv
+ help_mode = '--help' in argv
for name in commands:
# Drop non-functions
obj = globals()[name]
@@ -812,12 +1225,21 @@ def usage(argv):
lines = doc.lstrip("\n").split("\n")
print(" " + name + " " + lines[0].strip())
- if (len(sys.argv) > 1):
+ if len(argv) > 1:
if (not help_mode):
lines = lines[:-1]
for line in lines[1:]:
print(" " + line.strip())
- if (len(sys.argv) == 1):
+
+ print()
+ print('Pkgcore pquery compatible options:')
+ print()
+ parser = ArgumentParser(add_help=False,
+ usage='portageq pquery [options] [atom ...]')
+ add_pquery_arguments(parser)
+ parser.print_help()
+
+ if len(argv) == 1:
print("\nRun portageq with --help for info")
atom_validate_strict = "EBUILD_PHASE" in os.environ
@@ -836,52 +1258,84 @@ else:
def elog(elog_funcname, lines):
pass
-def main():
+def main(argv):
+
+ argv = portage._decode_argv(argv)
nocolor = os.environ.get('NOCOLOR')
if nocolor in ('yes', 'true'):
portage.output.nocolor()
- if len(sys.argv) < 2:
- usage(sys.argv)
- sys.exit(os.EX_USAGE)
+ parser = ArgumentParser(add_help=False)
- for x in sys.argv:
- if x in ("-h", "--help"):
- usage(sys.argv)
- sys.exit(os.EX_OK)
- elif x == "--version":
- print("Portage", portage.VERSION)
- sys.exit(os.EX_OK)
-
- cmd = sys.argv[1]
- function = globals().get(cmd)
- if function is None or cmd not in commands:
- usage(sys.argv)
+ # used by envvar
+ parser.add_argument("-v", dest="verbose", action="store_true")
+
+ actions = parser.add_argument_group('Actions')
+ actions.add_argument("-h", "--help", action="store_true")
+ actions.add_argument("--version", action="store_true")
+
+ add_pquery_arguments(parser)
+
+ opts, args = parser.parse_known_args(argv[1:])
+
+ if opts.help:
+ usage(argv)
+ return os.EX_OK
+ elif opts.version:
+ print("Portage", portage.VERSION)
+ return os.EX_OK
+
+ cmd = None
+ if args and args[0] in commands:
+ cmd = args[0]
+
+ if cmd == 'pquery':
+ cmd = None
+ args = args[1:]
+
+ if cmd is None:
+ return pquery(parser, opts, args)
+
+ if opts.verbose:
+ # used by envvar
+ args.append("-v")
+
+ argv = argv[:1] + args
+
+ if len(argv) < 2:
+ usage(argv)
sys.exit(os.EX_USAGE)
+
function = globals()[cmd]
- uses_root = getattr(function, "uses_root", False) and len(sys.argv) > 2
- if uses_root:
- if not os.path.isdir(sys.argv[2]):
- sys.stderr.write("Not a directory: '%s'\n" % sys.argv[2])
+ uses_eroot = getattr(function, "uses_eroot", False) and len(argv) > 2
+ if uses_eroot:
+ if not os.path.isdir(argv[2]):
+ sys.stderr.write("Not a directory: '%s'\n" % argv[2])
sys.stderr.write("Run portageq with --help for info\n")
sys.stderr.flush()
sys.exit(os.EX_USAGE)
- eprefix = portage.const.EPREFIX
- eroot = portage.util.normalize_path(sys.argv[2])
+ eprefix = portage.settings["EPREFIX"]
+ eroot = portage.util.normalize_path(argv[2])
+
if eprefix:
- root = eroot[:1-len(eprefix)]
+ if not eroot.endswith(eprefix):
+ sys.stderr.write("ERROR: This version of portageq"
+ " only supports <eroot>s ending in"
+ " '%s'. The provided <eroot>, '%s',"
+ " doesn't.\n" % (eprefix, eroot))
+ sys.stderr.flush()
+ sys.exit(os.EX_USAGE)
+ root = eroot[:1 - len(eprefix)]
else:
root = eroot
+
os.environ["ROOT"] = root
- args = sys.argv[2:]
- if args and isinstance(args[0], bytes):
- for i in range(len(args)):
- args[i] = portage._unicode_decode(args[i])
+ args = argv[2:]
try:
- if uses_root:
+ if uses_eroot:
args[0] = portage.settings['EROOT']
retval = function(args)
if retval:
@@ -902,6 +1356,7 @@ def main():
portage.writemsg("\nPlease use a more specific atom.\n", noiselevel=-1)
sys.exit(1)
-main()
+if __name__ == '__main__':
+ sys.exit(main(sys.argv))
#-----------------------------------------------------------------------------
diff --git a/bin/quickpkg b/bin/quickpkg
index 76259c5c1..90277ade3 100755
--- a/bin/quickpkg
+++ b/bin/quickpkg
@@ -1,23 +1,20 @@
-#!/usr/bin/python
-# Copyright 1999-2012 Gentoo Foundation
+#!/usr/bin/python -b
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
import errno
import math
-import optparse
import signal
import sys
import tarfile
-try:
- import portage
-except ImportError:
- from os import path as osp
- sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
- import portage
-
+from os import path as osp
+pym_path = osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym")
+sys.path.insert(0, pym_path)
+import portage
+portage._internal_caller = True
from portage import os
from portage import xpak
from portage.dbapi.dep_expand import dep_expand
@@ -28,6 +25,7 @@ from portage.util import ConfigProtect, ensure_dirs, shlex_split
from portage.dbapi.vartree import dblink, tar_contents
from portage.checksum import perform_md5
from portage._sets import load_default_config, SETPREFIX
+from portage.util._argparse import ArgumentParser
def quickpkg_atom(options, infos, arg, eout):
settings = portage.settings
@@ -291,29 +289,28 @@ def quickpkg_main(options, args, eout):
if __name__ == "__main__":
usage = "quickpkg [options] <list of package atoms or package sets>"
- parser = optparse.OptionParser(usage=usage)
- parser.add_option("--umask",
+ parser = ArgumentParser(usage=usage)
+ parser.add_argument("--umask",
default="0077",
help="umask used during package creation (default is 0077)")
- parser.add_option("--ignore-default-opts",
+ parser.add_argument("--ignore-default-opts",
action="store_true",
help="do not use the QUICKPKG_DEFAULT_OPTS environment variable")
- parser.add_option("--include-config",
- type="choice",
+ parser.add_argument("--include-config",
choices=["y","n"],
default="n",
metavar="<y|n>",
help="include all files protected by CONFIG_PROTECT (as a security precaution, default is 'n')")
- parser.add_option("--include-unmodified-config",
- type="choice",
+ parser.add_argument("--include-unmodified-config",
choices=["y","n"],
default="n",
metavar="<y|n>",
help="include files protected by CONFIG_PROTECT that have not been modified since installation (as a security precaution, default is 'n')")
- options, args = parser.parse_args(sys.argv[1:])
+ options, args = parser.parse_known_args(sys.argv[1:])
if not options.ignore_default_opts:
- default_opts = portage.settings.get("QUICKPKG_DEFAULT_OPTS","").split()
- options, args = parser.parse_args(default_opts + sys.argv[1:])
+ default_opts = shlex_split(
+ portage.settings.get("QUICKPKG_DEFAULT_OPTS", ""))
+ options, args = parser.parse_known_args(default_opts + sys.argv[1:])
if not args:
parser.error("no packages atoms given")
try:
diff --git a/bin/regenworld b/bin/regenworld
index 3199fdf90..32e8e5c33 100755
--- a/bin/regenworld
+++ b/bin/regenworld
@@ -1,17 +1,15 @@
-#!/usr/bin/python
-# Copyright 1999-2011 Gentoo Foundation
+#!/usr/bin/python -b
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
import sys
-try:
- import portage
-except ImportError:
- from os import path as osp
- sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
- import portage
-
+from os import path as osp
+pym_path = osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym")
+sys.path.insert(0, pym_path)
+import portage
+portage._internal_caller = True
from portage import os
from portage._sets.files import StaticFileSet, WorldSelectedSet
diff --git a/bin/repoman b/bin/repoman
index 795c7ce77..888892b30 100755
--- a/bin/repoman
+++ b/bin/repoman
@@ -1,20 +1,19 @@
-#!/usr/bin/python -O
-# Copyright 1999-2012 Gentoo Foundation
+#!/usr/bin/python -bO
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# Next to do: dep syntax checking in mask files
# Then, check to make sure deps are satisfiable (to avoid "can't find match for" problems)
# that last one is tricky because multiple profiles need to be checked.
-from __future__ import print_function
+from __future__ import print_function, unicode_literals
-import calendar
+import codecs
import copy
import errno
import formatter
import io
import logging
-import optparse
import re
import signal
import stat
@@ -24,23 +23,20 @@ import tempfile
import textwrap
import time
import platform
-
-try:
- from urllib.request import urlopen as urllib_request_urlopen
-except ImportError:
- from urllib import urlopen as urllib_request_urlopen
-
from itertools import chain
from stat import S_ISDIR
try:
- import portage
+ from urllib.parse import urlparse
except ImportError:
- from os import path as osp
- sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
- import portage
+ from urlparse import urlparse
+
+from os import path as osp
+pym_path = osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym")
+sys.path.insert(0, pym_path)
+import portage
+portage._internal_caller = True
portage._disable_legacy_globals()
-portage.dep._internal_warnings = True
try:
import xml.etree.ElementTree
@@ -58,9 +54,9 @@ except (ImportError, SystemError, RuntimeError, Exception):
sys.exit(1)
from portage import os
-from portage import subprocess_getstatusoutput
from portage import _encodings
from portage import _unicode_encode
+import repoman.checks
from repoman.checks import run_checks
from repoman import utilities
from repoman.herdbase import make_herd_base
@@ -69,18 +65,18 @@ from _emerge.RootConfig import RootConfig
from _emerge.userquery import userquery
import portage.checksum
import portage.const
+import portage.repository.config
from portage import cvstree, normalize_path
from portage import util
-from portage.exception import (FileNotFound, MissingParameter,
+from portage.exception import (FileNotFound, InvalidAtom, MissingParameter,
ParseError, PermissionDenied)
-from portage.manifest import _prohibited_filename_chars_re as \
- disallowed_filename_chars_re
+from portage.dep import Atom
from portage.process import find_binary, spawn
from portage.output import bold, create_color_func, \
green, nocolor, red
from portage.output import ConsoleStyleFile, StyleWriter
from portage.util import writemsg_level
-from portage.util._desktop_entry import validate_desktop_entry
+from portage.util._argparse import ArgumentParser
from portage.package.ebuild.digestgen import digestgen
from portage.eapi import eapi_has_iuse_defaults, eapi_has_required_use
@@ -93,6 +89,7 @@ util.initialize_logger()
max_desc_len = 100
allowed_filename_chars="a-zA-Z0-9._-+:"
pv_toolong_re = re.compile(r'[0-9]{19,}')
+GPG_KEY_ID_REGEX = r'(0x)?([0-9a-fA-F]{8}|[0-9a-fA-F]{16}|[0-9a-fA-F]{24}|[0-9a-fA-F]{32}|[0-9a-fA-F]{40})!?'
bad = create_color_func("BAD")
# A sane umask is needed for files that portage creates.
@@ -116,41 +113,14 @@ def err(txt):
warn(txt)
sys.exit(1)
-def exithandler(signum=None, frame=None):
+def exithandler(signum=None, _frame=None):
logging.fatal("Interrupted; exiting...")
if signum is None:
sys.exit(1)
else:
sys.exit(128 + signum)
-signal.signal(signal.SIGINT,exithandler)
-
-class RepomanHelpFormatter(optparse.IndentedHelpFormatter):
- """Repoman needs it's own HelpFormatter for now, because the default ones
- murder the help text."""
-
- def __init__(self, indent_increment=1, max_help_position=24, width=150, short_first=1):
- optparse.HelpFormatter.__init__(self, indent_increment, max_help_position, width, short_first)
-
- def format_description(self, description):
- return description
-
-class RepomanOptionParser(optparse.OptionParser):
- """Add the on_tail function, ruby has it, optionParser should too
- """
-
- def __init__(self, *args, **kwargs):
- optparse.OptionParser.__init__(self, *args, **kwargs)
- self.tail = ""
-
- def on_tail(self, description):
- self.tail += description
-
- def format_help(self, formatter=None):
- result = optparse.OptionParser.format_help(self, formatter)
- result += self.tail
- return result
-
+signal.signal(signal.SIGINT, exithandler)
def ParseArgs(argv, qahelp):
"""This function uses a customized optionParser to parse command line arguments for repoman
@@ -161,8 +131,7 @@ def ParseArgs(argv, qahelp):
(opts, args), just like a call to parser.parse_args()
"""
- if argv and isinstance(argv[0], bytes):
- argv = [portage._unicode_decode(x) for x in argv]
+ argv = portage._decode_argv(argv)
modes = {
'commit' : 'Run a scan then commit changes',
@@ -172,102 +141,113 @@ def ParseArgs(argv, qahelp):
'help' : 'Show this screen',
'manifest' : 'Generate a Manifest (fetches files if necessary)',
'manifest-check' : 'Check Manifests for missing or incorrect digests',
- 'scan' : 'Scan directory tree for QA issues'
+ 'scan' : 'Scan directory tree for QA issues'
+ }
+
+ output_choices = {
+ 'default' : 'The normal output format',
+ 'column' : 'Columnar output suitable for use with grep'
}
mode_keys = list(modes)
mode_keys.sort()
- parser = RepomanOptionParser(formatter=RepomanHelpFormatter(), usage="%prog [options] [mode]")
- parser.description = green(" ".join((os.path.basename(argv[0]), "1.2")))
- parser.description += "\nCopyright 1999-2007 Gentoo Foundation"
- parser.description += "\nDistributed under the terms of the GNU General Public License v2"
- parser.description += "\nmodes: " + " | ".join(map(green,mode_keys))
+ output_keys = sorted(output_choices)
- parser.add_option('-a', '--ask', dest='ask', action='store_true', default=False,
+ parser = ArgumentParser(usage="repoman [options] [mode]",
+ description="Modes: %s" % " | ".join(mode_keys),
+ epilog="For more help consult the man page.")
+
+ parser.add_argument('-a', '--ask', dest='ask', action='store_true', default=False,
help='Request a confirmation before commiting')
- parser.add_option('-m', '--commitmsg', dest='commitmsg',
+ parser.add_argument('-m', '--commitmsg', dest='commitmsg',
help='specify a commit message on the command line')
- parser.add_option('-M', '--commitmsgfile', dest='commitmsgfile',
+ parser.add_argument('-M', '--commitmsgfile', dest='commitmsgfile',
help='specify a path to a file that contains a commit message')
- parser.add_option('--digest',
- type='choice', choices=('y', 'n'), metavar='<y|n>',
+ parser.add_argument('--digest',
+ choices=('y', 'n'), metavar='<y|n>',
help='Automatically update Manifest digests for modified files')
- parser.add_option('-p', '--pretend', dest='pretend', default=False,
+ parser.add_argument('-p', '--pretend', dest='pretend', default=False,
action='store_true', help='don\'t commit or fix anything; just show what would be done')
-
- parser.add_option('-q', '--quiet', dest="quiet", action="count", default=0,
+
+ parser.add_argument('-q', '--quiet', dest="quiet", action="count", default=0,
help='do not print unnecessary messages')
- parser.add_option(
- '--echangelog', type='choice', choices=('y', 'n', 'force'), metavar="<y|n|force>",
+ parser.add_argument(
+ '--echangelog', choices=('y', 'n', 'force'), metavar="<y|n|force>",
help='for commit mode, call echangelog if ChangeLog is unmodified (or '
'regardless of modification if \'force\' is specified)')
- parser.add_option('-f', '--force', dest='force', default=False, action='store_true',
+ parser.add_argument('--experimental-inherit', choices=('y', 'n'),
+ metavar="<y|n>", default='n',
+ help='Enable experimental inherit.missing checks which may misbehave'
+ ' when the internal eclass database becomes outdated')
+
+ parser.add_argument('-f', '--force', dest='force', default=False, action='store_true',
help='Commit with QA violations')
- parser.add_option('--vcs', dest='vcs',
+ parser.add_argument('--vcs', dest='vcs',
help='Force using specific VCS instead of autodetection')
- parser.add_option('-v', '--verbose', dest="verbosity", action='count',
+ parser.add_argument('-v', '--verbose', dest="verbosity", action='count',
help='be very verbose in output', default=0)
- parser.add_option('-V', '--version', dest='version', action='store_true',
+ parser.add_argument('-V', '--version', dest='version', action='store_true',
help='show version info')
- parser.add_option('-x', '--xmlparse', dest='xml_parse', action='store_true',
+ parser.add_argument('-x', '--xmlparse', dest='xml_parse', action='store_true',
default=False, help='forces the metadata.xml parse check to be carried out')
- parser.add_option(
- '--if-modified', type='choice', choices=('y', 'n'), default='n',
+ parser.add_argument(
+ '--if-modified', choices=('y', 'n'), default='n',
metavar="<y|n>",
help='only check packages that have uncommitted modifications')
- parser.add_option('-i', '--ignore-arches', dest='ignore_arches', action='store_true',
+ parser.add_argument('-i', '--ignore-arches', dest='ignore_arches', action='store_true',
default=False, help='ignore arch-specific failures (where arch != host)')
- parser.add_option("--ignore-default-opts",
+ parser.add_argument("--ignore-default-opts",
action="store_true",
help="do not use the REPOMAN_DEFAULT_OPTS environment variable")
- parser.add_option('-I', '--ignore-masked', dest='ignore_masked', action='store_true',
+ parser.add_argument('-I', '--ignore-masked', dest='ignore_masked', action='store_true',
default=False, help='ignore masked packages (not allowed with commit mode)')
- parser.add_option('-d', '--include-dev', dest='include_dev', action='store_true',
+ parser.add_argument('--include-arches', dest='include_arches',
+ metavar='ARCHES', action='append',
+ help='A space separated list of arches used to '
+ 'filter the selection of profiles for dependency checks')
+
+ parser.add_argument('-d', '--include-dev', dest='include_dev', action='store_true',
default=False, help='include dev profiles in dependency checks')
- parser.add_option('--unmatched-removal', dest='unmatched_removal', action='store_true',
+ parser.add_argument('-e', '--include-exp-profiles', choices=('y', 'n'),
+ default=False, help='include exp profiles in dependency checks',
+ metavar='<y|n>')
+
+ parser.add_argument('--unmatched-removal', dest='unmatched_removal', action='store_true',
default=False, help='enable strict checking of package.mask and package.unmask files for unmatched removal atoms')
- parser.add_option('--without-mask', dest='without_mask', action='store_true',
+ parser.add_argument('--without-mask', dest='without_mask', action='store_true',
default=False, help='behave as if no package.mask entries exist (not allowed with commit mode)')
- parser.add_option('--mode', type='choice', dest='mode', choices=list(modes),
- help='specify which mode repoman will run in (default=full)')
-
- parser.on_tail("\n " + green("Modes".ljust(20) + " Description\n"))
+ parser.add_argument('--output-style', dest='output_style', choices=output_keys,
+ help='select output type', default='default')
- for k in mode_keys:
- parser.on_tail(" %s %s\n" % (k.ljust(20), modes[k]))
-
- parser.on_tail("\n " + green("QA keyword".ljust(20) + " Description\n"))
-
- sorted_qa = list(qahelp)
- sorted_qa.sort()
- for k in sorted_qa:
- parser.on_tail(" %s %s\n" % (k.ljust(20), qahelp[k]))
+ parser.add_argument('--mode', dest='mode', choices=mode_keys,
+ help='specify which mode repoman will run in (default=full)')
- opts, args = parser.parse_args(argv[1:])
+ opts, args = parser.parse_known_args(argv[1:])
if not opts.ignore_default_opts:
- default_opts = repoman_settings.get("REPOMAN_DEFAULT_OPTS", "").split()
+ default_opts = portage.util.shlex_split(
+ repoman_settings.get("REPOMAN_DEFAULT_OPTS", ""))
if default_opts:
- opts, args = parser.parse_args(default_opts + sys.argv[1:])
+ opts, args = parser.parse_known_args(default_opts + sys.argv[1:])
if opts.mode == 'help':
parser.print_help(short=False)
@@ -282,16 +262,10 @@ def ParseArgs(argv, qahelp):
if not opts.mode:
opts.mode = 'full'
-
+
if opts.mode == 'ci':
opts.mode = 'commit' # backwards compat shortcut
- if opts.mode == 'commit' and not (opts.force or opts.pretend):
- if opts.ignore_masked:
- parser.error('Commit mode and --ignore-masked are not compatible')
- if opts.without_mask:
- parser.error('Commit mode and --without-mask are not compatible')
-
# Use the verbosity and quiet options to fiddle with the loglevel appropriately
for val in range(opts.verbosity):
logger = logging.getLogger()
@@ -301,101 +275,99 @@ def ParseArgs(argv, qahelp):
logger = logging.getLogger()
logger.setLevel(logger.getEffectiveLevel() + 10)
+ if opts.mode == 'commit' and not (opts.force or opts.pretend):
+ if opts.ignore_masked:
+ opts.ignore_masked = False
+ logging.warn('Commit mode automatically disables --ignore-masked')
+ if opts.without_mask:
+ opts.without_mask = False
+ logging.warn('Commit mode automatically disables --without-mask')
+
return (opts, args)
-qahelp={
- "CVS/Entries.IO_error":"Attempting to commit, and an IO error was encountered access the Entries file",
- "desktop.invalid":"desktop-file-validate reports errors in a *.desktop file",
- "ebuild.invalidname":"Ebuild files with a non-parseable or syntactically incorrect name (or using 2.1 versioning extensions)",
- "ebuild.namenomatch":"Ebuild files that do not have the same name as their parent directory",
- "changelog.ebuildadded":"An ebuild was added but the ChangeLog was not modified",
- "changelog.missing":"Missing ChangeLog files",
- "ebuild.notadded":"Ebuilds that exist but have not been added to cvs",
- "ebuild.patches":"PATCHES variable should be a bash array to ensure white space safety",
- "changelog.notadded":"ChangeLogs that exist but have not been added to cvs",
- "dependency.unknown" : "Ebuild has a dependency that refers to an unknown package (which may be valid if it is a blocker for a renamed/removed package, or is an alternative choice provided by an overlay)",
- "file.executable":"Ebuilds, digests, metadata.xml, Manifest, and ChangeLog do not need the executable bit",
- "file.size":"Files in the files directory must be under 20 KiB",
- "file.size.fatal":"Files in the files directory must be under 60 KiB",
- "file.name":"File/dir name must be composed of only the following chars: %s " % allowed_filename_chars,
- "file.UTF8":"File is not UTF8 compliant",
- "inherit.deprecated":"Ebuild inherits a deprecated eclass",
- "inherit.missing":"Ebuild uses functions from an eclass but does not inherit it",
- "inherit.unused":"Ebuild inherits an eclass but does not use it",
- "java.eclassesnotused":"With virtual/jdk in DEPEND you must inherit a java eclass",
- "wxwidgets.eclassnotused":"Ebuild DEPENDs on x11-libs/wxGTK without inheriting wxwidgets.eclass",
- "KEYWORDS.dropped":"Ebuilds that appear to have dropped KEYWORDS for some arch",
- "KEYWORDS.missing":"Ebuilds that have a missing or empty KEYWORDS variable",
- "KEYWORDS.stable":"Ebuilds that have been added directly with stable KEYWORDS",
- "KEYWORDS.stupid":"Ebuilds that use KEYWORDS=-* instead of package.mask",
- "LICENSE.missing":"Ebuilds that have a missing or empty LICENSE variable",
- "LICENSE.virtual":"Virtuals that have a non-empty LICENSE variable",
- "DESCRIPTION.missing":"Ebuilds that have a missing or empty DESCRIPTION variable",
- "DESCRIPTION.toolong":"DESCRIPTION is over %d characters" % max_desc_len,
- "EAPI.definition":"EAPI definition does not conform to PMS section 7.3.1 (first non-comment, non-blank line)",
- "EAPI.deprecated":"Ebuilds that use features that are deprecated in the current EAPI",
- "EAPI.incompatible":"Ebuilds that use features that are only available with a different EAPI",
- "EAPI.unsupported":"Ebuilds that have an unsupported EAPI version (you must upgrade portage)",
- "SLOT.invalid":"Ebuilds that have a missing or invalid SLOT variable value",
- "HOMEPAGE.missing":"Ebuilds that have a missing or empty HOMEPAGE variable",
- "HOMEPAGE.virtual":"Virtuals that have a non-empty HOMEPAGE variable",
- "DEPEND.bad":"User-visible ebuilds with bad DEPEND settings (matched against *visible* ebuilds)",
- "RDEPEND.bad":"User-visible ebuilds with bad RDEPEND settings (matched against *visible* ebuilds)",
- "PDEPEND.bad":"User-visible ebuilds with bad PDEPEND settings (matched against *visible* ebuilds)",
- "DEPEND.badmasked":"Masked ebuilds with bad DEPEND settings (matched against *all* ebuilds)",
- "RDEPEND.badmasked":"Masked ebuilds with RDEPEND settings (matched against *all* ebuilds)",
- "PDEPEND.badmasked":"Masked ebuilds with PDEPEND settings (matched against *all* ebuilds)",
- "DEPEND.badindev":"User-visible ebuilds with bad DEPEND settings (matched against *visible* ebuilds) in developing arch",
- "RDEPEND.badindev":"User-visible ebuilds with bad RDEPEND settings (matched against *visible* ebuilds) in developing arch",
- "PDEPEND.badindev":"User-visible ebuilds with bad PDEPEND settings (matched against *visible* ebuilds) in developing arch",
- "DEPEND.badmaskedindev":"Masked ebuilds with bad DEPEND settings (matched against *all* ebuilds) in developing arch",
- "RDEPEND.badmaskedindev":"Masked ebuilds with RDEPEND settings (matched against *all* ebuilds) in developing arch",
- "PDEPEND.badmaskedindev":"Masked ebuilds with PDEPEND settings (matched against *all* ebuilds) in developing arch",
- "PDEPEND.suspect":"PDEPEND contains a package that usually only belongs in DEPEND.",
- "DEPEND.syntax":"Syntax error in DEPEND (usually an extra/missing space/parenthesis)",
- "RDEPEND.syntax":"Syntax error in RDEPEND (usually an extra/missing space/parenthesis)",
- "PDEPEND.syntax":"Syntax error in PDEPEND (usually an extra/missing space/parenthesis)",
- "DEPEND.badtilde":"DEPEND uses the ~ dep operator with a non-zero revision part, which is useless (the revision is ignored)",
- "RDEPEND.badtilde":"RDEPEND uses the ~ dep operator with a non-zero revision part, which is useless (the revision is ignored)",
- "PDEPEND.badtilde":"PDEPEND uses the ~ dep operator with a non-zero revision part, which is useless (the revision is ignored)",
- "LICENSE.syntax":"Syntax error in LICENSE (usually an extra/missing space/parenthesis)",
- "PROVIDE.syntax":"Syntax error in PROVIDE (usually an extra/missing space/parenthesis)",
- "PROPERTIES.syntax":"Syntax error in PROPERTIES (usually an extra/missing space/parenthesis)",
- "RESTRICT.syntax":"Syntax error in RESTRICT (usually an extra/missing space/parenthesis)",
- "REQUIRED_USE.syntax":"Syntax error in REQUIRED_USE (usually an extra/missing space/parenthesis)",
- "SRC_URI.syntax":"Syntax error in SRC_URI (usually an extra/missing space/parenthesis)",
- "SRC_URI.mirror":"A uri listed in profiles/thirdpartymirrors is found in SRC_URI",
- "ebuild.syntax":"Error generating cache entry for ebuild; typically caused by ebuild syntax error or digest verification failure",
- "ebuild.output":"A simple sourcing of the ebuild produces output; this breaks ebuild policy.",
- "ebuild.nesteddie":"Placing 'die' inside ( ) prints an error, but doesn't stop the ebuild.",
- "variable.invalidchar":"A variable contains an invalid character that is not part of the ASCII character set",
- "variable.readonly":"Assigning a readonly variable",
- "variable.usedwithhelpers":"Ebuild uses D, ROOT, ED, EROOT or EPREFIX with helpers",
- "LIVEVCS.stable":"This ebuild is a live checkout from a VCS but has stable keywords.",
- "LIVEVCS.unmasked":"This ebuild is a live checkout from a VCS but has keywords and is not masked in the global package.mask.",
- "IUSE.invalid":"This ebuild has a variable in IUSE that is not in the use.desc or its metadata.xml file",
- "IUSE.missing":"This ebuild has a USE conditional which references a flag that is not listed in IUSE",
- "IUSE.undefined":"This ebuild does not define IUSE (style guideline says to define IUSE even when empty)",
- "LICENSE.invalid":"This ebuild is listing a license that doesnt exist in portages license/ dir.",
- "KEYWORDS.invalid":"This ebuild contains KEYWORDS that are not listed in profiles/arch.list or for which no valid profile was found",
- "RDEPEND.implicit":"RDEPEND is unset in the ebuild which triggers implicit RDEPEND=$DEPEND assignment (prior to EAPI 4)",
- "RDEPEND.suspect":"RDEPEND contains a package that usually only belongs in DEPEND.",
- "RESTRICT.invalid":"This ebuild contains invalid RESTRICT values.",
- "digest.assumed":"Existing digest must be assumed correct (Package level only)",
- "digest.missing":"Some files listed in SRC_URI aren't referenced in the Manifest",
- "digest.unused":"Some files listed in the Manifest aren't referenced in SRC_URI",
- "ebuild.majorsyn":"This ebuild has a major syntax error that may cause the ebuild to fail partially or fully",
- "ebuild.minorsyn":"This ebuild has a minor syntax error that contravenes gentoo coding style",
- "ebuild.badheader":"This ebuild has a malformed header",
- "manifest.bad":"Manifest has missing or incorrect digests",
- "metadata.missing":"Missing metadata.xml files",
- "metadata.bad":"Bad metadata.xml files",
- "metadata.warning":"Warnings in metadata.xml files",
- "portage.internal":"The ebuild uses an internal Portage function",
- "virtual.oldstyle":"The ebuild PROVIDEs an old-style virtual (see GLEP 37)",
- "virtual.suspect":"Ebuild contains a package that usually should be pulled via virtual/, not directly.",
- "usage.obsolete":"The ebuild makes use of an obsolete construct",
- "upstream.workaround":"The ebuild works around an upstream bug, an upstream bug should be filed and tracked in bugs.gentoo.org"
+qahelp = {
+ "CVS/Entries.IO_error": "Attempting to commit, and an IO error was encountered access the Entries file",
+ "ebuild.invalidname": "Ebuild files with a non-parseable or syntactically incorrect name (or using 2.1 versioning extensions)",
+ "ebuild.namenomatch": "Ebuild files that do not have the same name as their parent directory",
+ "changelog.ebuildadded": "An ebuild was added but the ChangeLog was not modified",
+ "changelog.missing": "Missing ChangeLog files",
+ "ebuild.notadded": "Ebuilds that exist but have not been added to cvs",
+ "ebuild.patches": "PATCHES variable should be a bash array to ensure white space safety",
+ "changelog.notadded": "ChangeLogs that exist but have not been added to cvs",
+ "dependency.bad": "User-visible ebuilds with unsatisfied dependencies (matched against *visible* ebuilds)",
+ "dependency.badmasked": "Masked ebuilds with unsatisfied dependencies (matched against *all* ebuilds)",
+ "dependency.badindev": "User-visible ebuilds with unsatisfied dependencies (matched against *visible* ebuilds) in developing arch",
+ "dependency.badmaskedindev": "Masked ebuilds with unsatisfied dependencies (matched against *all* ebuilds) in developing arch",
+ "dependency.badtilde": "Uses the ~ dep operator with a non-zero revision part, which is useless (the revision is ignored)",
+ "dependency.syntax": "Syntax error in dependency string (usually an extra/missing space/parenthesis)",
+ "dependency.unknown": "Ebuild has a dependency that refers to an unknown package (which may be valid if it is a blocker for a renamed/removed package, or is an alternative choice provided by an overlay)",
+ "file.executable": "Ebuilds, digests, metadata.xml, Manifest, and ChangeLog do not need the executable bit",
+ "file.size": "Files in the files directory must be under 20 KiB",
+ "file.size.fatal": "Files in the files directory must be under 60 KiB",
+ "file.name": "File/dir name must be composed of only the following chars: %s " % allowed_filename_chars,
+ "file.UTF8": "File is not UTF8 compliant",
+ "inherit.deprecated": "Ebuild inherits a deprecated eclass",
+ "inherit.missing": "Ebuild uses functions from an eclass but does not inherit it",
+ "inherit.unused": "Ebuild inherits an eclass but does not use it",
+ "java.eclassesnotused": "With virtual/jdk in DEPEND you must inherit a java eclass",
+ "wxwidgets.eclassnotused": "Ebuild DEPENDs on x11-libs/wxGTK without inheriting wxwidgets.eclass",
+ "KEYWORDS.dropped": "Ebuilds that appear to have dropped KEYWORDS for some arch",
+ "KEYWORDS.missing": "Ebuilds that have a missing or empty KEYWORDS variable",
+ "KEYWORDS.stable": "Ebuilds that have been added directly with stable KEYWORDS",
+ "KEYWORDS.stupid": "Ebuilds that use KEYWORDS=-* instead of package.mask",
+ "LICENSE.missing": "Ebuilds that have a missing or empty LICENSE variable",
+ "LICENSE.virtual": "Virtuals that have a non-empty LICENSE variable",
+ "DESCRIPTION.missing": "Ebuilds that have a missing or empty DESCRIPTION variable",
+ "DESCRIPTION.toolong": "DESCRIPTION is over %d characters" % max_desc_len,
+ "EAPI.definition": "EAPI definition does not conform to PMS section 7.3.1 (first non-comment, non-blank line)",
+ "EAPI.deprecated": "Ebuilds that use features that are deprecated in the current EAPI",
+ "EAPI.incompatible": "Ebuilds that use features that are only available with a different EAPI",
+ "EAPI.unsupported": "Ebuilds that have an unsupported EAPI version (you must upgrade portage)",
+ "SLOT.invalid": "Ebuilds that have a missing or invalid SLOT variable value",
+ "HOMEPAGE.missing": "Ebuilds that have a missing or empty HOMEPAGE variable",
+ "HOMEPAGE.virtual": "Virtuals that have a non-empty HOMEPAGE variable",
+ "PDEPEND.suspect": "PDEPEND contains a package that usually only belongs in DEPEND.",
+ "LICENSE.syntax": "Syntax error in LICENSE (usually an extra/missing space/parenthesis)",
+ "PROVIDE.syntax": "Syntax error in PROVIDE (usually an extra/missing space/parenthesis)",
+ "PROPERTIES.syntax": "Syntax error in PROPERTIES (usually an extra/missing space/parenthesis)",
+ "RESTRICT.syntax": "Syntax error in RESTRICT (usually an extra/missing space/parenthesis)",
+ "REQUIRED_USE.syntax": "Syntax error in REQUIRED_USE (usually an extra/missing space/parenthesis)",
+ "SRC_URI.syntax": "Syntax error in SRC_URI (usually an extra/missing space/parenthesis)",
+ "SRC_URI.mirror": "A uri listed in profiles/thirdpartymirrors is found in SRC_URI",
+ "ebuild.syntax": "Error generating cache entry for ebuild; typically caused by ebuild syntax error or digest verification failure",
+ "ebuild.output": "A simple sourcing of the ebuild produces output; this breaks ebuild policy.",
+ "ebuild.nesteddie": "Placing 'die' inside ( ) prints an error, but doesn't stop the ebuild.",
+ "variable.invalidchar": "A variable contains an invalid character that is not part of the ASCII character set",
+ "variable.readonly": "Assigning a readonly variable",
+ "variable.usedwithhelpers": "Ebuild uses D, ROOT, ED, EROOT or EPREFIX with helpers",
+ "LIVEVCS.stable": "This ebuild is a live checkout from a VCS but has stable keywords.",
+ "LIVEVCS.unmasked": "This ebuild is a live checkout from a VCS but has keywords and is not masked in the global package.mask.",
+ "IUSE.invalid": "This ebuild has a variable in IUSE that is not in the use.desc or its metadata.xml file",
+ "IUSE.missing": "This ebuild has a USE conditional which references a flag that is not listed in IUSE",
+ "IUSE.rubydeprecated": "The ebuild has set a ruby interpreter in USE_RUBY, that is not available as a ruby target anymore",
+ "LICENSE.invalid": "This ebuild is listing a license that doesnt exist in portages license/ dir.",
+ "LICENSE.deprecated": "This ebuild is listing a deprecated license.",
+ "KEYWORDS.invalid": "This ebuild contains KEYWORDS that are not listed in profiles/arch.list or for which no valid profile was found",
+ "RDEPEND.implicit": "RDEPEND is unset in the ebuild which triggers implicit RDEPEND=$DEPEND assignment (prior to EAPI 4)",
+ "RDEPEND.suspect": "RDEPEND contains a package that usually only belongs in DEPEND.",
+ "RESTRICT.invalid": "This ebuild contains invalid RESTRICT values.",
+ "digest.assumed": "Existing digest must be assumed correct (Package level only)",
+ "digest.missing": "Some files listed in SRC_URI aren't referenced in the Manifest",
+ "digest.unused": "Some files listed in the Manifest aren't referenced in SRC_URI",
+ "ebuild.majorsyn": "This ebuild has a major syntax error that may cause the ebuild to fail partially or fully",
+ "ebuild.minorsyn": "This ebuild has a minor syntax error that contravenes gentoo coding style",
+ "ebuild.badheader": "This ebuild has a malformed header",
+ "manifest.bad": "Manifest has missing or incorrect digests",
+ "metadata.missing": "Missing metadata.xml files",
+ "metadata.bad": "Bad metadata.xml files",
+ "metadata.warning": "Warnings in metadata.xml files",
+ "portage.internal": "The ebuild uses an internal Portage function or variable",
+ "repo.eapi.banned": "The ebuild uses an EAPI which is banned by the repository's metadata/layout.conf settings",
+ "repo.eapi.deprecated": "The ebuild uses an EAPI which is deprecated by the repository's metadata/layout.conf settings",
+ "virtual.oldstyle": "The ebuild PROVIDEs an old-style virtual (see GLEP 37)",
+ "virtual.suspect": "Ebuild contains a package that usually should be pulled via virtual/, not directly.",
+ "usage.obsolete": "The ebuild makes use of an obsolete construct",
+ "upstream.workaround": "The ebuild works around an upstream bug, an upstream bug should be filed and tracked in bugs.gentoo.org"
}
qacats = list(qahelp)
@@ -409,19 +381,18 @@ qawarnings = set((
"digest.unused",
"ebuild.notadded",
"ebuild.nesteddie",
-"desktop.invalid",
-"DEPEND.badmasked","RDEPEND.badmasked","PDEPEND.badmasked",
-"DEPEND.badindev","RDEPEND.badindev","PDEPEND.badindev",
-"DEPEND.badmaskedindev","RDEPEND.badmaskedindev","PDEPEND.badmaskedindev",
-"DEPEND.badtilde", "RDEPEND.badtilde", "PDEPEND.badtilde",
+"dependency.badmasked",
+"dependency.badindev",
+"dependency.badmaskedindev",
+"dependency.badtilde",
"DESCRIPTION.toolong",
"EAPI.deprecated",
"HOMEPAGE.virtual",
+"LICENSE.deprecated",
"LICENSE.virtual",
"KEYWORDS.dropped",
"KEYWORDS.stupid",
"KEYWORDS.missing",
-"IUSE.undefined",
"PDEPEND.suspect",
"RDEPEND.implicit",
"RDEPEND.suspect",
@@ -437,23 +408,21 @@ qawarnings = set((
"wxwidgets.eclassnotused",
"metadata.warning",
"portage.internal",
+"repo.eapi.deprecated",
"usage.obsolete",
"upstream.workaround",
"LIVEVCS.stable",
"LIVEVCS.unmasked",
+"IUSE.rubydeprecated",
))
-if portage.const._ENABLE_INHERIT_CHECK:
- # This is experimental, so it's non-fatal.
- qawarnings.add("inherit.missing")
-
non_ascii_re = re.compile(r'[^\x00-\x7f]')
missingvars = ["KEYWORDS", "LICENSE", "DESCRIPTION", "HOMEPAGE"]
allvars = set(x for x in portage.auxdbkeys if not x.startswith("UNUSED_"))
allvars.update(Package.metadata_keys)
allvars = sorted(allvars)
-commitmessage=None
+commitmessage = None
for x in missingvars:
x += ".missing"
if x not in qacats:
@@ -462,19 +431,10 @@ for x in missingvars:
qawarnings.add(x)
valid_restrict = frozenset(["binchecks", "bindist",
- "fetch", "installsources", "mirror",
- "primaryuri", "strip", "test", "userpriv"])
-
-live_eclasses = frozenset([
- "bzr",
- "cvs",
- "darcs",
- "git",
- "git-2",
- "mercurial",
- "subversion",
- "tla",
-])
+ "fetch", "installsources", "mirror", "preserve-libs",
+ "primaryuri", "splitdebug", "strip", "test", "userpriv"])
+
+live_eclasses = portage.const.LIVE_ECLASSES
suspect_rdepend = frozenset([
"app-arch/cabextract",
@@ -520,14 +480,25 @@ suspect_virtual = {
"dev-util/pkgconf":"virtual/pkgconfig",
"dev-util/pkgconfig":"virtual/pkgconfig",
"dev-util/pkgconfig-openbsd":"virtual/pkgconfig",
+ "dev-libs/libusb":"virtual/libusb",
+ "dev-libs/libusbx":"virtual/libusb",
+ "dev-libs/libusb-compat":"virtual/libusb",
}
+ruby_deprecated = frozenset([
+ "ruby_targets_ree18",
+])
+
+metadata_xml_encoding = 'UTF-8'
+metadata_xml_declaration = '<?xml version="1.0" encoding="%s"?>' % \
+ (metadata_xml_encoding,)
+metadata_doctype_name = 'pkgmetadata'
metadata_dtd_uri = 'http://www.gentoo.org/dtd/metadata.dtd'
# force refetch if the local copy creation time is older than this
metadata_dtd_ctime_interval = 60 * 60 * 24 * 7 # 7 days
# file.executable
-no_exec = frozenset(["Manifest","ChangeLog","metadata.xml"])
+no_exec = frozenset(["Manifest", "ChangeLog", "metadata.xml"])
options, arguments = ParseArgs(sys.argv, qahelp)
@@ -535,6 +506,11 @@ if options.version:
print("Portage", portage.VERSION)
sys.exit(0)
+if options.experimental_inherit == 'y':
+ # This is experimental, so it's non-fatal.
+ qawarnings.add("inherit.missing")
+ repoman.checks._init(experimental_inherit=True)
+
# Set this to False when an extraordinary issue (generally
# something other than a QA issue) makes it impossible to
# commit (like if Manifest generation fails).
@@ -584,14 +560,29 @@ if options.mode == 'commit' and not options.pretend and not vcs:
logging.info("Not in a version controlled repository; enabling pretend mode.")
options.pretend = True
-# Ensure that PORTDIR_OVERLAY contains the repository corresponding to $PWD.
-repoman_settings['PORTDIR_OVERLAY'] = "%s %s" % \
- (repoman_settings.get('PORTDIR_OVERLAY', ''),
- portage._shell_quote(portdir_overlay))
-# We have to call the config constructor again so
-# that config.repositories is initialized correctly.
-repoman_settings = portage.config(config_root=config_root, local_config=False,
- env=dict(os.environ, PORTDIR_OVERLAY=repoman_settings['PORTDIR_OVERLAY']))
+# Ensure that current repository is in the list of enabled repositories.
+repodir = os.path.realpath(portdir_overlay)
+try:
+ repoman_settings.repositories.get_repo_for_location(repodir)
+except KeyError:
+ repo_name = portage.repository.config.RepoConfig._read_valid_repo_name(portdir_overlay)[0]
+ layout_conf_data = portage.repository.config.parse_layout_conf(portdir_overlay)[0]
+ if layout_conf_data['repo-name']:
+ repo_name = layout_conf_data['repo-name']
+ tmp_conf_file = io.StringIO(textwrap.dedent("""
+ [%s]
+ location = %s
+ """) % (repo_name, portdir_overlay))
+ # Ensure that the repository corresponding to $PWD overrides a
+ # repository of the same name referenced by the existing PORTDIR
+ # or PORTDIR_OVERLAY settings.
+ repoman_settings['PORTDIR_OVERLAY'] = "%s %s" % \
+ (repoman_settings.get('PORTDIR_OVERLAY', ''),
+ portage._shell_quote(portdir_overlay))
+ repositories = portage.repository.config.load_repository_config(repoman_settings, extra_files=[tmp_conf_file])
+ # We have to call the config constructor again so that attributes
+ # dependent on config.repositories are initialized correctly.
+ repoman_settings = portage.config(config_root=config_root, local_config=False, repositories=repositories)
root = repoman_settings['EROOT']
trees = {
@@ -601,10 +592,15 @@ portdb = trees[root]['porttree'].dbapi
# Constrain dependency resolution to the master(s)
# that are specified in layout.conf.
-repodir = os.path.realpath(portdir_overlay)
repo_config = repoman_settings.repositories.get_repo_for_location(repodir)
portdb.porttrees = list(repo_config.eclass_db.porttrees)
portdir = portdb.porttrees[0]
+commit_env = os.environ.copy()
+# list() is for iteration on a copy.
+for repo in list(repoman_settings.repositories):
+ # all paths are canonical
+ if repo.location not in repo_config.eclass_db.porttrees:
+ del repoman_settings.repositories[repo.name]
if repo_config.allow_provide_virtual:
qawarnings.add("virtual.oldstyle")
@@ -615,6 +611,15 @@ if repo_config.sign_commit:
# the commit arguments. If key_id is unspecified, then it must be
# configured by `git config user.signingkey key_id`.
vcs_local_opts.append("--gpg-sign")
+ if repoman_settings.get("PORTAGE_GPG_DIR"):
+ # Pass GNUPGHOME to git for bug #462362.
+ commit_env["GNUPGHOME"] = repoman_settings["PORTAGE_GPG_DIR"]
+
+ # Pass GPG_TTY to git for bug #477728.
+ try:
+ commit_env["GPG_TTY"] = os.ttyname(sys.stdin.fileno())
+ except OSError:
+ pass
# In order to disable manifest signatures, repos may set
# "sign-manifests = false" in metadata/layout.conf. This
@@ -623,6 +628,25 @@ if repo_config.sign_commit:
sign_manifests = "sign" in repoman_settings.features and \
repo_config.sign_manifest
+if repo_config.sign_manifest and repo_config.name == "gentoo" and \
+ options.mode in ("commit",) and not sign_manifests:
+ msg = ("The '%s' repository has manifest signatures enabled, "
+ "but FEATURES=sign is currently disabled. In order to avoid this "
+ "warning, enable FEATURES=sign in make.conf. Alternatively, "
+ "repositories can disable manifest signatures by setting "
+ "'sign-manifests = false' in metadata/layout.conf.") % \
+ (repo_config.name,)
+ for line in textwrap.wrap(msg, 60):
+ logging.warn(line)
+
+if sign_manifests and options.mode in ("commit",) and \
+ repoman_settings.get("PORTAGE_GPG_KEY") and \
+ re.match(r'^%s$' % GPG_KEY_ID_REGEX,
+ repoman_settings["PORTAGE_GPG_KEY"]) is None:
+ logging.error("PORTAGE_GPG_KEY value is invalid: %s" %
+ repoman_settings["PORTAGE_GPG_KEY"])
+ sys.exit(1)
+
manifest_hashes = repo_config.manifest_hashes
if manifest_hashes is None:
manifest_hashes = portage.const.MANIFEST2_HASH_DEFAULTS
@@ -652,19 +676,6 @@ if options.mode in ("commit", "fix", "manifest"):
logging.error(line)
sys.exit(1)
-if "commit" == options.mode and \
- repo_config.name == "gentoo" and \
- "RMD160" in manifest_hashes and \
- "RMD160" not in portage.checksum.hashorigin_map:
- msg = "Please install " \
- "pycrypto or enable python's ssl USE flag in order " \
- "to enable RMD160 hash support. See bug #198398 for " \
- "more information."
- prefix = bad(" * ")
- for line in textwrap.wrap(msg, 70):
- print(prefix + line)
- sys.exit(1)
-
if options.echangelog is None and repo_config.update_changelog:
options.echangelog = 'y'
@@ -689,18 +700,9 @@ logging.debug("vcs: %s" % (vcs,))
logging.debug("repo config: %s" % (repo_config,))
logging.debug("options: %s" % (options,))
-# Generate an appropriate PORTDIR_OVERLAY value for passing into the
-# profile-specific config constructor calls.
-env = os.environ.copy()
-env['PORTDIR'] = portdir
-env['PORTDIR_OVERLAY'] = ' '.join(portdb.porttrees[1:])
-
-logging.info('Setting paths:')
-logging.info('PORTDIR = "' + portdir + '"')
-logging.info('PORTDIR_OVERLAY = "%s"' % env['PORTDIR_OVERLAY'])
-
# It's confusing if these warnings are displayed without the user
# being told which profile they come from, so disable them.
+env = os.environ.copy()
env['FEATURES'] = env.get('FEATURES', '') + ' -unknown-features-warn'
categories = []
@@ -724,7 +726,7 @@ repolevel = len(reposplit)
# check if it's in $PORTDIR/$CATEGORY/$PN , otherwise bail if commiting.
# Reason for this is if they're trying to commit in just $FILESDIR/*, the Manifest needs updating.
# this check ensures that repoman knows where it is, and the manifest recommit is at least possible.
-if options.mode == 'commit' and repolevel not in [1,2,3]:
+if options.mode == 'commit' and repolevel not in [1, 2, 3]:
print(red("***")+" Commit attempts *must* be from within a vcs co, category, or package directory.")
print(red("***")+" Attempting to commit from a packages files directory will be blocked for instance.")
print(red("***")+" This is intended behaviour, to ensure the manifest is recommitted for a package.")
@@ -737,10 +739,76 @@ if repolevel == 1:
startdir = repodir
else:
startdir = normalize_path(mydir)
- startdir = os.path.join(repodir, *startdir.split(os.sep)[-2-repolevel+3:])
+ startdir = os.path.join(repodir, *startdir.split(os.sep)[-2 - repolevel + 3:])
def caterror(mycat):
- err(mycat+" is not an official category. Skipping QA checks in this directory.\nPlease ensure that you add "+catdir+" to "+repodir+"/profiles/categories\nif it is a new category.")
+ err(mycat + " is not an official category. Skipping QA checks in this directory.\nPlease ensure that you add " + catdir + " to " + repodir + "/profiles/categories\nif it is a new category.")
+
+def repoman_getstatusoutput(cmd):
+ """
+ Implements an interface similar to getstatusoutput(), but with
+ customized unicode handling (see bug #310789) and without the shell.
+ """
+ args = portage.util.shlex_split(cmd)
+
+ if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000 and \
+ not os.path.isabs(args[0]):
+ # Python 3.1 _execvp throws TypeError for non-absolute executable
+ # path passed as bytes (see http://bugs.python.org/issue8513).
+ fullname = find_binary(args[0])
+ if fullname is None:
+ raise portage.exception.CommandNotFound(args[0])
+ args[0] = fullname
+
+ encoding = _encodings['fs']
+ args = [_unicode_encode(x,
+ encoding=encoding, errors='strict') for x in args]
+ proc = subprocess.Popen(args, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ output = portage._unicode_decode(proc.communicate()[0],
+ encoding=encoding, errors='strict')
+ if output and output[-1] == "\n":
+ # getstatusoutput strips one newline
+ output = output[:-1]
+ return (proc.wait(), output)
+
+class repoman_popen(portage.proxy.objectproxy.ObjectProxy):
+ """
+ Implements an interface similar to os.popen(), but with customized
+ unicode handling (see bug #310789) and without the shell.
+ """
+
+ __slots__ = ('_proc', '_stdout')
+
+ def __init__(self, cmd):
+ args = portage.util.shlex_split(cmd)
+
+ if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000 and \
+ not os.path.isabs(args[0]):
+ # Python 3.1 _execvp throws TypeError for non-absolute executable
+ # path passed as bytes (see http://bugs.python.org/issue8513).
+ fullname = find_binary(args[0])
+ if fullname is None:
+ raise portage.exception.CommandNotFound(args[0])
+ args[0] = fullname
+
+ encoding = _encodings['fs']
+ args = [_unicode_encode(x,
+ encoding=encoding, errors='strict') for x in args]
+ proc = subprocess.Popen(args, stdout=subprocess.PIPE)
+ object.__setattr__(self, '_proc', proc)
+ object.__setattr__(self, '_stdout',
+ codecs.getreader(encoding)(proc.stdout, 'strict'))
+
+ def _get_target(self):
+ return object.__getattribute__(self, '_stdout')
+
+ __enter__ = _get_target
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ proc = object.__getattribute__(self, '_proc')
+ proc.wait()
+ proc.stdout.close()
class ProfileDesc(object):
__slots__ = ('abs_path', 'arch', 'status', 'sub_path', 'tree_path',)
@@ -818,18 +886,18 @@ for path in portdb.porttrees:
continue
if len(arch) != 3:
err("wrong format: \"" + bad(x.strip()) + "\" in " + \
- desc_path + " line %d" % (i+1, ))
+ desc_path + " line %d" % (i + 1, ))
elif arch[0] not in kwlist:
err("invalid arch: \"" + bad(arch[0]) + "\" in " + \
- desc_path + " line %d" % (i+1, ))
+ desc_path + " line %d" % (i + 1, ))
elif arch[2] not in valid_profile_types:
err("invalid profile type: \"" + bad(arch[2]) + "\" in " + \
- desc_path + " line %d" % (i+1, ))
+ desc_path + " line %d" % (i + 1, ))
profile_desc = ProfileDesc(arch[0], arch[2], arch[1], path)
if not os.path.isdir(profile_desc.abs_path):
logging.error(
"Invalid %s profile (%s) for arch %s in %s line %d",
- arch[2], arch[1], arch[0], desc_path, i+1)
+ arch[2], arch[1], arch[0], desc_path, i + 1)
continue
if os.path.exists(
os.path.join(profile_desc.abs_path, 'deprecated')):
@@ -876,11 +944,16 @@ for x in repoman_settings.archlist():
if x[0] == "~":
continue
if x not in profiles:
- print(red("\""+x+"\" doesn't have a valid profile listed in profiles.desc."))
+ print(red("\"" + x + "\" doesn't have a valid profile listed in profiles.desc."))
print(red("You need to either \"cvs update\" your profiles dir or follow this"))
- print(red("up with the "+x+" team."))
+ print(red("up with the " + x + " team."))
print()
+liclist_deprecated = set()
+if "DEPRECATED" in repoman_settings._license_manager._license_groups:
+ liclist_deprecated.update(
+ repoman_settings._license_manager.expandLicenseTokens(["@DEPRECATED"]))
+
if not liclist:
logging.fatal("Couldn't find licenses?")
sys.exit(1)
@@ -893,34 +966,34 @@ if not uselist:
logging.fatal("Couldn't find use.desc?")
sys.exit(1)
-scanlist=[]
-if repolevel==2:
- #we are inside a category directory
- catdir=reposplit[-1]
+scanlist = []
+if repolevel == 2:
+ # we are inside a category directory
+ catdir = reposplit[-1]
if catdir not in categories:
caterror(catdir)
- mydirlist=os.listdir(startdir)
+ mydirlist = os.listdir(startdir)
for x in mydirlist:
if x == "CVS" or x.startswith("."):
continue
- if os.path.isdir(startdir+"/"+x):
- scanlist.append(catdir+"/"+x)
+ if os.path.isdir(startdir + "/" + x):
+ scanlist.append(catdir + "/" + x)
repo_subdir = catdir + os.sep
-elif repolevel==1:
+elif repolevel == 1:
for x in categories:
- if not os.path.isdir(startdir+"/"+x):
+ if not os.path.isdir(startdir + "/" + x):
continue
- for y in os.listdir(startdir+"/"+x):
+ for y in os.listdir(startdir + "/" + x):
if y == "CVS" or y.startswith("."):
continue
- if os.path.isdir(startdir+"/"+x+"/"+y):
- scanlist.append(x+"/"+y)
+ if os.path.isdir(startdir + "/" + x + "/" + y):
+ scanlist.append(x + "/" + y)
repo_subdir = ""
-elif repolevel==3:
+elif repolevel == 3:
catdir = reposplit[-2]
if catdir not in categories:
caterror(catdir)
- scanlist.append(catdir+"/"+reposplit[-1])
+ scanlist.append(catdir + "/" + reposplit[-1])
repo_subdir = scanlist[-1] + os.sep
else:
msg = 'Repoman is unable to determine PORTDIR or PORTDIR_OVERLAY' + \
@@ -952,7 +1025,7 @@ def vcs_files_to_cps(vcs_file_iter):
if category in categories:
for filename in vcs_file_iter:
f_split = filename.split(os.sep)
- # ['.', pn,...]
+ # ['.', pn, ...]
if len(f_split) > 2:
modified_cps.append(category + "/" + f_split[1])
@@ -960,7 +1033,7 @@ def vcs_files_to_cps(vcs_file_iter):
# repolevel == 1
for filename in vcs_file_iter:
f_split = filename.split(os.sep)
- # ['.', category, pn,...]
+ # ['.', category, pn, ...]
if len(f_split) > 3 and f_split[1] in categories:
modified_cps.append("/".join(f_split[1:3]))
@@ -968,12 +1041,12 @@ def vcs_files_to_cps(vcs_file_iter):
def git_supports_gpg_sign():
status, cmd_output = \
- subprocess_getstatusoutput("git --version")
+ repoman_getstatusoutput("git --version")
cmd_output = cmd_output.split()
if cmd_output:
version = re.match(r'^(\d+)\.(\d+)\.(\d+)', cmd_output[-1])
if version is not None:
- version = [int(x) for x in version.groups()[1:]]
+ version = [int(x) for x in version.groups()]
if version[0] > 1 or \
(version[0] == 1 and version[1] > 7) or \
(version[0] == 1 and version[1] == 7 and version[2] >= 9):
@@ -1002,47 +1075,16 @@ def dev_keywords(profiles):
dev_keywords = dev_keywords(profiles)
-stats={}
-fails={}
-
-# provided by the desktop-file-utils package
-desktop_file_validate = find_binary("desktop-file-validate")
-desktop_pattern = re.compile(r'.*\.desktop$')
+stats = {}
+fails = {}
for x in qacats:
- stats[x]=0
- fails[x]=[]
+ stats[x] = 0
+ fails[x] = []
xmllint_capable = False
metadata_dtd = os.path.join(repoman_settings["DISTDIR"], 'metadata.dtd')
-def parsedate(s):
- """Parse a RFC 822 date and time string.
- This is required for python3 compatibility, since the
- rfc822.parsedate() function is not available."""
-
- s_split = []
- for x in s.upper().split():
- for y in x.split(','):
- if y:
- s_split.append(y)
-
- if len(s_split) != 6:
- return None
-
- # %a, %d %b %Y %H:%M:%S %Z
- a, d, b, Y, H_M_S, Z = s_split
-
- # Convert month to integer, since strptime %w is locale-dependent.
- month_map = {'JAN':1, 'FEB':2, 'MAR':3, 'APR':4, 'MAY':5, 'JUN':6,
- 'JUL':7, 'AUG':8, 'SEP':9, 'OCT':10, 'NOV':11, 'DEC':12}
- m = month_map.get(b)
- if m is None:
- return None
- m = str(m).rjust(2, '0')
-
- return time.strptime(':'.join((Y, m, d, H_M_S)), '%Y:%m:%d:%H:%M:%S')
-
def fetch_metadata_dtd():
"""
Fetch metadata.dtd if it doesn't exist or the ctime is older than
@@ -1071,45 +1113,40 @@ def fetch_metadata_dtd():
print(green("***") + " the local copy of metadata.dtd " + \
"needs to be refetched, doing that now")
print()
+ parsed_url = urlparse(metadata_dtd_uri)
+ setting = 'FETCHCOMMAND_' + parsed_url.scheme.upper()
+ fcmd = repoman_settings.get(setting)
+ if not fcmd:
+ fcmd = repoman_settings.get('FETCHCOMMAND')
+ if not fcmd:
+ logging.error("FETCHCOMMAND is unset")
+ return False
+
+ destdir = repoman_settings["DISTDIR"]
+ fd, metadata_dtd_tmp = tempfile.mkstemp(
+ prefix='metadata.dtd.', dir=destdir)
+ os.close(fd)
+
try:
- url_f = urllib_request_urlopen(metadata_dtd_uri)
- msg_info = url_f.info()
- last_modified = msg_info.get('last-modified')
- if last_modified is not None:
- last_modified = parsedate(last_modified)
- if last_modified is not None:
- last_modified = calendar.timegm(last_modified)
-
- metadata_dtd_tmp = "%s.%s" % (metadata_dtd, os.getpid())
- try:
- local_f = open(metadata_dtd_tmp, mode='wb')
- local_f.write(url_f.read())
- local_f.close()
- if last_modified is not None:
- try:
- os.utime(metadata_dtd_tmp,
- (int(last_modified), int(last_modified)))
- except OSError:
- # This fails on some odd non-unix-like filesystems.
- # We don't really need the mtime to be preserved
- # anyway here (currently we use ctime to trigger
- # fetch), so just ignore it.
- pass
- os.rename(metadata_dtd_tmp, metadata_dtd)
- finally:
- try:
- os.unlink(metadata_dtd_tmp)
- except OSError:
- pass
+ if not portage.getbinpkg.file_get(metadata_dtd_uri,
+ destdir, fcmd=fcmd,
+ filename=os.path.basename(metadata_dtd_tmp)):
+ logging.error("failed to fetch metadata.dtd from '%s'" %
+ metadata_dtd_uri)
+ return False
- url_f.close()
+ try:
+ portage.util.apply_secpass_permissions(metadata_dtd_tmp,
+ gid=portage.data.portage_gid, mode=0o664, mask=0o2)
+ except portage.exception.PortageException:
+ pass
- except EnvironmentError as e:
- print()
- print(red("!!!")+" attempting to fetch '%s', caught" % metadata_dtd_uri)
- print(red("!!!")+" exception '%s' though." % (e,))
- print(red("!!!")+" fetching new metadata.dtd failed, aborting")
- return False
+ os.rename(metadata_dtd_tmp, metadata_dtd)
+ finally:
+ try:
+ os.unlink(metadata_dtd_tmp)
+ except OSError:
+ pass
return True
@@ -1117,14 +1154,14 @@ if options.mode == "manifest":
pass
elif not find_binary('xmllint'):
print(red("!!! xmllint not found. Can't check metadata.xml.\n"))
- if options.xml_parse or repolevel==3:
+ if options.xml_parse or repolevel == 3:
print(red("!!!")+" sorry, xmllint is needed. failing\n")
sys.exit(1)
else:
if not fetch_metadata_dtd():
sys.exit(1)
- #this can be problematic if xmllint changes their output
- xmllint_capable=True
+ # this can be problematic if xmllint changes their output
+ xmllint_capable = True
if options.mode == 'commit' and vcs:
utilities.detect_vcs_conflicts(options, vcs)
@@ -1151,45 +1188,46 @@ if vcs == "cvs":
myremoved = cvstree.findremoved(mycvstree, recursive=1, basedir="./")
elif vcs == "svn":
- with os.popen("svn status") as f:
+ with repoman_popen("svn status") as f:
svnstatus = f.readlines()
- mychanged = [ "./" + elem.split()[-1:][0] for elem in svnstatus if elem and elem[:1] in "MR" ]
- mynew = [ "./" + elem.split()[-1:][0] for elem in svnstatus if elem.startswith("A") ]
+ mychanged = ["./" + elem.split()[-1:][0] for elem in svnstatus if elem and elem[:1] in "MR"]
+ mynew = ["./" + elem.split()[-1:][0] for elem in svnstatus if elem.startswith("A")]
if options.if_modified == "y":
- myremoved = [ "./" + elem.split()[-1:][0] for elem in svnstatus if elem.startswith("D")]
+ myremoved = ["./" + elem.split()[-1:][0] for elem in svnstatus if elem.startswith("D")]
elif vcs == "git":
- with os.popen("git diff-index --name-only "
+ with repoman_popen("git diff-index --name-only "
"--relative --diff-filter=M HEAD") as f:
mychanged = f.readlines()
mychanged = ["./" + elem[:-1] for elem in mychanged]
- with os.popen("git diff-index --name-only "
+ with repoman_popen("git diff-index --name-only "
"--relative --diff-filter=A HEAD") as f:
mynew = f.readlines()
mynew = ["./" + elem[:-1] for elem in mynew]
if options.if_modified == "y":
- with os.popen("git diff-index --name-only "
+ with repoman_popen("git diff-index --name-only "
"--relative --diff-filter=D HEAD") as f:
myremoved = f.readlines()
myremoved = ["./" + elem[:-1] for elem in myremoved]
elif vcs == "bzr":
- with os.popen("bzr status -S .") as f:
+ with repoman_popen("bzr status -S .") as f:
bzrstatus = f.readlines()
- mychanged = [ "./" + elem.split()[-1:][0].split('/')[-1:][0] for elem in bzrstatus if elem and elem[1:2] == "M" ]
- mynew = [ "./" + elem.split()[-1:][0].split('/')[-1:][0] for elem in bzrstatus if elem and ( elem[1:2] == "NK" or elem[0:1] == "R" ) ]
+ mychanged = ["./" + elem.split()[-1:][0].split('/')[-1:][0] for elem in bzrstatus if elem and elem[1:2] == "M"]
+ mynew = ["./" + elem.split()[-1:][0].split('/')[-1:][0] for elem in bzrstatus if elem and (elem[1:2] == "NK" or elem[0:1] == "R")]
if options.if_modified == "y":
- myremoved = [ "./" + elem.split()[-3:-2][0].split('/')[-1:][0] for elem in bzrstatus if elem and ( elem[1:2] == "K" or elem[0:1] == "R" ) ]
+ myremoved = ["./" + elem.split()[-3:-2][0].split('/')[-1:][0] for elem in bzrstatus if elem and (elem[1:2] == "K" or elem[0:1] == "R")]
elif vcs == "hg":
- with os.popen("hg status --no-status --modified .") as f:
+ with repoman_popen("hg status --no-status --modified .") as f:
mychanged = f.readlines()
mychanged = ["./" + elem.rstrip() for elem in mychanged]
- mynew = os.popen("hg status --no-status --added .").readlines()
+ with repoman_popen("hg status --no-status --added .") as f:
+ mynew = f.readlines()
mynew = ["./" + elem.rstrip() for elem in mynew]
if options.if_modified == "y":
- with os.popen("hg status --no-status --removed .") as f:
+ with repoman_popen("hg status --no-status --removed .") as f:
myremoved = f.readlines()
myremoved = ["./" + elem.rstrip() for elem in myremoved]
@@ -1211,10 +1249,15 @@ dofail = 0
# NOTE: match-all caches are not shared due to potential
# differences between profiles in _get_implicit_iuse.
-arch_caches={}
+arch_caches = {}
arch_xmatch_caches = {}
shared_xmatch_caches = {"cp-list":{}}
+include_arches = None
+if options.include_arches:
+ include_arches = set()
+ include_arches.update(*[x.split() for x in options.include_arches])
+
# Disable the "ebuild.notadded" check when not in commit mode and
# running `svn status` in every package dir will be too expensive.
@@ -1222,12 +1265,37 @@ check_ebuild_notadded = not \
(vcs == "svn" and repolevel < 3 and options.mode != "commit")
# Build a regex from thirdpartymirrors for the SRC_URI.mirror check.
-thirdpartymirrors = []
-for v in repoman_settings.thirdpartymirrors().values():
+thirdpartymirrors = {}
+for k, v in repoman_settings.thirdpartymirrors().items():
for v in v:
if not v.endswith("/"):
v += "/"
- thirdpartymirrors.append(v)
+ thirdpartymirrors[v] = k
+
+class _XMLParser(xml.etree.ElementTree.XMLParser):
+
+ def __init__(self, data, **kwargs):
+ xml.etree.ElementTree.XMLParser.__init__(self, **kwargs)
+ self._portage_data = data
+ if hasattr(self, 'parser'):
+ self._base_XmlDeclHandler = self.parser.XmlDeclHandler
+ self.parser.XmlDeclHandler = self._portage_XmlDeclHandler
+ self._base_StartDoctypeDeclHandler = \
+ self.parser.StartDoctypeDeclHandler
+ self.parser.StartDoctypeDeclHandler = \
+ self._portage_StartDoctypeDeclHandler
+
+ def _portage_XmlDeclHandler(self, version, encoding, standalone):
+ if self._base_XmlDeclHandler is not None:
+ self._base_XmlDeclHandler(version, encoding, standalone)
+ self._portage_data["XML_DECLARATION"] = (version, encoding, standalone)
+
+ def _portage_StartDoctypeDeclHandler(self, doctypeName, systemId, publicId,
+ has_internal_subset):
+ if self._base_StartDoctypeDeclHandler is not None:
+ self._base_StartDoctypeDeclHandler(doctypeName, systemId, publicId,
+ has_internal_subset)
+ self._portage_data["DOCTYPE"] = (doctypeName, systemId, publicId)
class _MetadataTreeBuilder(xml.etree.ElementTree.TreeBuilder):
"""
@@ -1252,13 +1320,13 @@ if options.if_modified == "y":
chain(mychanged, mynew, myremoved)))
for x in effective_scanlist:
- #ebuilds and digests added to cvs respectively.
+ # ebuilds and digests added to cvs respectively.
logging.info("checking package %s" % x)
# save memory by discarding xmatch caches from previous package(s)
arch_xmatch_caches.clear()
- eadded=[]
- catdir,pkgdir=x.split("/")
- checkdir=repodir+"/"+x
+ eadded = []
+ catdir, pkgdir = x.split("/")
+ checkdir = repodir + "/" + x
checkdir_relative = ""
if repolevel < 3:
checkdir_relative = os.path.join(pkgdir, checkdir_relative)
@@ -1340,15 +1408,15 @@ for x in effective_scanlist:
if options.mode == 'manifest-check':
continue
- checkdirlist=os.listdir(checkdir)
- ebuildlist=[]
+ checkdirlist = os.listdir(checkdir)
+ ebuildlist = []
pkgs = {}
allvalid = True
for y in checkdirlist:
if (y in no_exec or y.endswith(".ebuild")) and \
- stat.S_IMODE(os.stat(os.path.join(checkdir, y)).st_mode) & 0o111:
- stats["file.executable"] += 1
- fails["file.executable"].append(os.path.join(checkdir, y))
+ stat.S_IMODE(os.stat(os.path.join(checkdir, y)).st_mode) & 0o111:
+ stats["file.executable"] += 1
+ fails["file.executable"].append(os.path.join(checkdir, y))
if y.endswith(".ebuild"):
pf = y[:-7]
ebuildlist.append(pf)
@@ -1389,19 +1457,19 @@ for x in effective_scanlist:
ebuildlist = [pkg.pf for pkg in ebuildlist]
for y in checkdirlist:
- m = disallowed_filename_chars_re.search(y.strip(os.sep))
- if m is not None:
+ index = repo_config.find_invalid_path_char(y)
+ if index != -1:
y_relative = os.path.join(checkdir_relative, y)
if vcs is not None and not vcs_new_changed(y_relative):
# If the file isn't in the VCS new or changed set, then
# assume that it's an irrelevant temporary file (Manifest
# entries are not generated for file names containing
# prohibited characters). See bug #406877.
- m = None
- if m is not None:
+ index = -1
+ if index != -1:
stats["file.name"] += 1
fails["file.name"].append("%s/%s: char '%s'" % \
- (checkdir, y, m.group(0)))
+ (checkdir, y, y[index]))
if not (y in ("ChangeLog", "metadata.xml") or y.endswith(".ebuild")):
continue
@@ -1412,7 +1480,7 @@ for x in effective_scanlist:
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'])
for l in f:
- line +=1
+ line += 1
except UnicodeDecodeError as ue:
stats["file.UTF8"] += 1
s = ue.object[:ue.start]
@@ -1427,10 +1495,10 @@ for x in effective_scanlist:
if vcs in ("git", "hg") and check_ebuild_notadded:
if vcs == "git":
- myf = os.popen("git ls-files --others %s" % \
+ myf = repoman_popen("git ls-files --others %s" % \
(portage._shell_quote(checkdir_relative),))
if vcs == "hg":
- myf = os.popen("hg status --no-status --unknown %s" % \
+ myf = repoman_popen("hg status --no-status --unknown %s" % \
(portage._shell_quote(checkdir_relative),))
for l in myf:
if l[:-1][-7:] == ".ebuild":
@@ -1442,21 +1510,23 @@ for x in effective_scanlist:
if vcs in ("cvs", "svn", "bzr") and check_ebuild_notadded:
try:
if vcs == "cvs":
- myf=open(checkdir+"/CVS/Entries","r")
+ myf = open(checkdir + "/CVS/Entries", "r")
if vcs == "svn":
- myf = os.popen("svn status --depth=files --verbose " + checkdir)
+ myf = repoman_popen("svn status --depth=files --verbose " +
+ portage._shell_quote(checkdir))
if vcs == "bzr":
- myf = os.popen("bzr ls -v --kind=file " + checkdir)
+ myf = repoman_popen("bzr ls -v --kind=file " +
+ portage._shell_quote(checkdir))
myl = myf.readlines()
myf.close()
for l in myl:
if vcs == "cvs":
- if l[0]!="/":
+ if l[0] != "/":
continue
- splitl=l[1:].split("/")
+ splitl = l[1:].split("/")
if not len(splitl):
continue
- if splitl[0][-7:]==".ebuild":
+ if splitl[0][-7:] == ".ebuild":
eadded.append(splitl[0][:-7])
if vcs == "svn":
if l[:1] == "?":
@@ -1474,8 +1544,9 @@ for x in effective_scanlist:
if l[-7:] == ".ebuild":
eadded.append(os.path.basename(l[:-7]))
if vcs == "svn":
- myf = os.popen("svn status " + checkdir)
- myl=myf.readlines()
+ myf = repoman_popen("svn status " +
+ portage._shell_quote(checkdir))
+ myl = myf.readlines()
myf.close()
for l in myl:
if l[0] == "A":
@@ -1485,7 +1556,7 @@ for x in effective_scanlist:
except IOError:
if vcs == "cvs":
stats["CVS/Entries.IO_error"] += 1
- fails["CVS/Entries.IO_error"].append(checkdir+"/CVS/Entries")
+ fails["CVS/Entries.IO_error"].append(checkdir + "/CVS/Entries")
else:
raise
continue
@@ -1493,7 +1564,7 @@ for x in effective_scanlist:
mf = repoman_settings.repositories.get_repo_for_location(
os.path.dirname(os.path.dirname(checkdir)))
mf = mf.load_manifest(checkdir, repoman_settings["DISTDIR"])
- mydigests=mf.getTypeDigests("DIST")
+ mydigests = mf.getTypeDigests("DIST")
fetchlist_dict = portage.FetchlistDict(checkdir, repoman_settings, portdb)
myfiles_all = []
@@ -1509,7 +1580,7 @@ for x in effective_scanlist:
# This will be reported as an "ebuild.syntax" error.
pass
else:
- stats["SRC_URI.syntax"] = stats["SRC_URI.syntax"] + 1
+ stats["SRC_URI.syntax"] += 1
fails["SRC_URI.syntax"].append(
"%s.ebuild SRC_URI: %s" % (mykey, e))
del fetchlist_dict
@@ -1523,15 +1594,15 @@ for x in effective_scanlist:
for entry in mydigests:
if entry not in myfiles_all:
stats["digest.unused"] += 1
- fails["digest.unused"].append(checkdir+"::"+entry)
+ fails["digest.unused"].append(checkdir + "::" + entry)
for entry in myfiles_all:
if entry not in mydigests:
stats["digest.missing"] += 1
- fails["digest.missing"].append(checkdir+"::"+entry)
+ fails["digest.missing"].append(checkdir + "::" + entry)
del myfiles_all
- if os.path.exists(checkdir+"/files"):
- filesdirlist=os.listdir(checkdir+"/files")
+ if os.path.exists(checkdir + "/files"):
+ filesdirlist = os.listdir(checkdir + "/files")
# recurse through files directory
# use filesdirlist as a stack, appending directories as needed so people can't hide > 20k files in a subdirectory.
@@ -1551,77 +1622,110 @@ for x in effective_scanlist:
# !!! VCS "portability" alert! Need some function isVcsDir() or alike !!!
if y == "CVS" or y == ".svn":
continue
- for z in os.listdir(checkdir+"/files/"+y):
+ for z in os.listdir(checkdir + "/files/" + y):
if z == "CVS" or z == ".svn":
continue
- filesdirlist.append(y+"/"+z)
+ filesdirlist.append(y + "/" + z)
# Current policy is no files over 20 KiB, these are the checks. File size between
# 20 KiB and 60 KiB causes a warning, while file size over 60 KiB causes an error.
elif mystat.st_size > 61440:
stats["file.size.fatal"] += 1
- fails["file.size.fatal"].append("("+ str(mystat.st_size//1024) + " KiB) "+x+"/files/"+y)
+ fails["file.size.fatal"].append("(" + str(mystat.st_size//1024) + " KiB) " + x + "/files/" + y)
elif mystat.st_size > 20480:
stats["file.size"] += 1
- fails["file.size"].append("("+ str(mystat.st_size//1024) + " KiB) "+x+"/files/"+y)
+ fails["file.size"].append("(" + str(mystat.st_size//1024) + " KiB) " + x + "/files/" + y)
- m = disallowed_filename_chars_re.search(
- os.path.basename(y.rstrip(os.sep)))
- if m is not None:
+ index = repo_config.find_invalid_path_char(y)
+ if index != -1:
y_relative = os.path.join(checkdir_relative, "files", y)
if vcs is not None and not vcs_new_changed(y_relative):
# If the file isn't in the VCS new or changed set, then
# assume that it's an irrelevant temporary file (Manifest
# entries are not generated for file names containing
# prohibited characters). See bug #406877.
- m = None
- if m is not None:
+ index = -1
+ if index != -1:
stats["file.name"] += 1
fails["file.name"].append("%s/files/%s: char '%s'" % \
- (checkdir, y, m.group(0)))
-
- if desktop_file_validate and desktop_pattern.match(y):
- cmd_output = validate_desktop_entry(full_path)
- if cmd_output:
- # Note: in the future we may want to grab the
- # warnings in addition to the errors. We're
- # just doing errors now since we don't want
- # to generate too much noise at first.
- error_re = re.compile(r'.*\s*error:\s*(.*)')
- for line in cmd_output:
- error_match = error_re.match(line)
- if error_match is None:
- continue
- stats["desktop.invalid"] += 1
- fails["desktop.invalid"].append(
- relative_path + ': %s' % error_match.group(1))
-
+ (checkdir, y, y[index]))
del mydigests
if check_changelog and "ChangeLog" not in checkdirlist:
- stats["changelog.missing"]+=1
- fails["changelog.missing"].append(x+"/ChangeLog")
-
+ stats["changelog.missing"] += 1
+ fails["changelog.missing"].append(x + "/ChangeLog")
+
musedict = {}
- #metadata.xml file check
+ # metadata.xml file check
if "metadata.xml" not in checkdirlist:
- stats["metadata.missing"]+=1
- fails["metadata.missing"].append(x+"/metadata.xml")
- #metadata.xml parse check
+ stats["metadata.missing"] += 1
+ fails["metadata.missing"].append(x + "/metadata.xml")
+ # metadata.xml parse check
else:
metadata_bad = False
+ xml_info = {}
+ xml_parser = _XMLParser(xml_info, target=_MetadataTreeBuilder())
# read metadata.xml into memory
try:
_metadata_xml = xml.etree.ElementTree.parse(
- os.path.join(checkdir, "metadata.xml"),
- parser=xml.etree.ElementTree.XMLParser(
- target=_MetadataTreeBuilder()))
+ _unicode_encode(os.path.join(checkdir, "metadata.xml"),
+ encoding=_encodings['fs'], errors='strict'),
+ parser=xml_parser)
except (ExpatError, SyntaxError, EnvironmentError) as e:
metadata_bad = True
stats["metadata.bad"] += 1
fails["metadata.bad"].append("%s/metadata.xml: %s" % (x, e))
del e
else:
+ if not hasattr(xml_parser, 'parser') or \
+ sys.hexversion < 0x2070000 or \
+ (sys.hexversion > 0x3000000 and sys.hexversion < 0x3020000):
+ # doctype is not parsed with python 2.6 or 3.1
+ pass
+ else:
+ if "XML_DECLARATION" not in xml_info:
+ stats["metadata.bad"] += 1
+ fails["metadata.bad"].append("%s/metadata.xml: "
+ "xml declaration is missing on first line, "
+ "should be '%s'" % (x, metadata_xml_declaration))
+ else:
+ xml_version, xml_encoding, xml_standalone = \
+ xml_info["XML_DECLARATION"]
+ if xml_encoding is None or \
+ xml_encoding.upper() != metadata_xml_encoding:
+ stats["metadata.bad"] += 1
+ if xml_encoding is None:
+ encoding_problem = "but it is undefined"
+ else:
+ encoding_problem = "not '%s'" % xml_encoding
+ fails["metadata.bad"].append("%s/metadata.xml: "
+ "xml declaration encoding should be '%s', %s" %
+ (x, metadata_xml_encoding, encoding_problem))
+
+ if "DOCTYPE" not in xml_info:
+ metadata_bad = True
+ stats["metadata.bad"] += 1
+ fails["metadata.bad"].append("%s/metadata.xml: %s" % (x,
+ "DOCTYPE is missing"))
+ else:
+ doctype_name, doctype_system, doctype_pubid = \
+ xml_info["DOCTYPE"]
+ if doctype_system != metadata_dtd_uri:
+ stats["metadata.bad"] += 1
+ if doctype_system is None:
+ system_problem = "but it is undefined"
+ else:
+ system_problem = "not '%s'" % doctype_system
+ fails["metadata.bad"].append("%s/metadata.xml: "
+ "DOCTYPE: SYSTEM should refer to '%s', %s" %
+ (x, metadata_dtd_uri, system_problem))
+
+ if doctype_name != metadata_doctype_name:
+ stats["metadata.bad"] += 1
+ fails["metadata.bad"].append("%s/metadata.xml: "
+ "DOCTYPE: name should be '%s', not '%s'" %
+ (x, metadata_doctype_name, doctype_name))
+
# load USE flags from metadata.xml
try:
musedict = utilities.parse_metadata_use(_metadata_xml)
@@ -1629,6 +1733,22 @@ for x in effective_scanlist:
metadata_bad = True
stats["metadata.bad"] += 1
fails["metadata.bad"].append("%s/metadata.xml: %s" % (x, e))
+ else:
+ for atom in chain(*musedict.values()):
+ if atom is None:
+ continue
+ try:
+ atom = Atom(atom)
+ except InvalidAtom as e:
+ stats["metadata.bad"] += 1
+ fails["metadata.bad"].append(
+ "%s/metadata.xml: Invalid atom: %s" % (x, e))
+ else:
+ if atom.cp != x:
+ stats["metadata.bad"] += 1
+ fails["metadata.bad"].append(
+ ("%s/metadata.xml: Atom contains "
+ "unexpected cat/pn: %s") % (x, atom))
# Run other metadata.xml checkers
try:
@@ -1639,19 +1759,20 @@ for x in effective_scanlist:
fails["metadata.bad"].append("%s/metadata.xml: %s" % (x, e))
del e
- #Only carry out if in package directory or check forced
+ # Only carry out if in package directory or check forced
if xmllint_capable and not metadata_bad:
# xmlint can produce garbage output even on success, so only dump
# the ouput when it fails.
- st, out = subprocess_getstatusoutput(
- "xmllint --nonet --noout --dtdvalid '%s' '%s'" % \
- (metadata_dtd, os.path.join(checkdir, "metadata.xml")))
+ st, out = repoman_getstatusoutput(
+ "xmllint --nonet --noout --dtdvalid %s %s" % \
+ (portage._shell_quote(metadata_dtd),
+ portage._shell_quote(os.path.join(checkdir, "metadata.xml"))))
if st != os.EX_OK:
print(red("!!!") + " metadata.xml is invalid:")
for z in out.splitlines():
- print(red("!!! ")+z)
- stats["metadata.bad"]+=1
- fails["metadata.bad"].append(x+"/metadata.xml")
+ print(red("!!! ") + z)
+ stats["metadata.bad"] += 1
+ fails["metadata.bad"].append(x + "/metadata.xml")
del metadata_bad
muselist = frozenset(musedict)
@@ -1677,20 +1798,20 @@ for x in effective_scanlist:
fails['changelog.ebuildadded'].append(relative_path)
if vcs in ("cvs", "svn", "bzr") and check_ebuild_notadded and y not in eadded:
- #ebuild not added to vcs
- stats["ebuild.notadded"]=stats["ebuild.notadded"]+1
- fails["ebuild.notadded"].append(x+"/"+y+".ebuild")
- myesplit=portage.pkgsplit(y)
+ # ebuild not added to vcs
+ stats["ebuild.notadded"] += 1
+ fails["ebuild.notadded"].append(x + "/" + y + ".ebuild")
+ myesplit = portage.pkgsplit(y)
if myesplit is None or myesplit[0] != x.split("/")[-1] \
or pv_toolong_re.search(myesplit[1]) \
or pv_toolong_re.search(myesplit[2]):
- stats["ebuild.invalidname"]=stats["ebuild.invalidname"]+1
- fails["ebuild.invalidname"].append(x+"/"+y+".ebuild")
+ stats["ebuild.invalidname"] += 1
+ fails["ebuild.invalidname"].append(x + "/" + y + ".ebuild")
continue
- elif myesplit[0]!=pkgdir:
- print(pkgdir,myesplit[0])
- stats["ebuild.namenomatch"]=stats["ebuild.namenomatch"]+1
- fails["ebuild.namenomatch"].append(x+"/"+y+".ebuild")
+ elif myesplit[0] != pkgdir:
+ print(pkgdir, myesplit[0])
+ stats["ebuild.namenomatch"] += 1
+ fails["ebuild.namenomatch"].append(x + "/" + y + ".ebuild")
continue
pkg = pkgs[y]
@@ -1699,15 +1820,25 @@ for x in effective_scanlist:
allvalid = False
for k, msgs in pkg.invalid.items():
for msg in msgs:
- stats[k] = stats[k] + 1
- fails[k].append("%s %s" % (relative_path, msg))
+ stats[k] += 1
+ fails[k].append("%s: %s" % (relative_path, msg))
continue
- myaux = pkg.metadata
+ myaux = pkg._metadata
eapi = myaux["EAPI"]
inherited = pkg.inherited
live_ebuild = live_eclasses.intersection(inherited)
+ if repo_config.eapi_is_banned(eapi):
+ stats["repo.eapi.banned"] += 1
+ fails["repo.eapi.banned"].append(
+ "%s: %s" % (relative_path, eapi))
+
+ elif repo_config.eapi_is_deprecated(eapi):
+ stats["repo.eapi.deprecated"] += 1
+ fails["repo.eapi.deprecated"].append(
+ "%s: %s" % (relative_path, eapi))
+
for k, v in myaux.items():
if not isinstance(v, basestring):
continue
@@ -1724,20 +1855,21 @@ for x in effective_scanlist:
for uri in portage.dep.use_reduce( \
myaux["SRC_URI"], matchall=True, is_src_uri=True, eapi=eapi, flat=True):
contains_mirror = False
- for mirror in thirdpartymirrors:
+ for mirror, mirror_alias in thirdpartymirrors.items():
if uri.startswith(mirror):
contains_mirror = True
break
if not contains_mirror:
continue
+ new_uri = "mirror://%s/%s" % (mirror_alias, uri[len(mirror):])
stats["SRC_URI.mirror"] += 1
fails["SRC_URI.mirror"].append(
- "%s: '%s' found in thirdpartymirrors" % \
- (relative_path, mirror))
+ "%s: '%s' found in thirdpartymirrors, use '%s'" % \
+ (relative_path, mirror, new_uri))
if myaux.get("PROVIDE"):
- stats["virtual.oldstyle"]+=1
+ stats["virtual.oldstyle"] += 1
fails["virtual.oldstyle"].append(relative_path)
for pos, missing_var in enumerate(missingvars):
@@ -1747,15 +1879,15 @@ for x in effective_scanlist:
continue
if live_ebuild and missing_var == "KEYWORDS":
continue
- myqakey=missingvars[pos]+".missing"
- stats[myqakey]=stats[myqakey]+1
- fails[myqakey].append(x+"/"+y+".ebuild")
+ myqakey = missingvars[pos] + ".missing"
+ stats[myqakey] += 1
+ fails[myqakey].append(x + "/" + y + ".ebuild")
if catdir == "virtual":
for var in ("HOMEPAGE", "LICENSE"):
if myaux.get(var):
myqakey = var + ".virtual"
- stats[myqakey] = stats[myqakey] + 1
+ stats[myqakey] += 1
fails[myqakey].append(relative_path)
# 14 is the length of DESCRIPTION=""
@@ -1772,7 +1904,7 @@ for x in effective_scanlist:
not keyword.startswith("-"):
stable_keywords.append(keyword)
if stable_keywords:
- if ebuild_path in new_ebuilds:
+ if ebuild_path in new_ebuilds and catdir != "virtual":
stable_keywords.sort()
stats["KEYWORDS.stable"] += 1
fails["KEYWORDS.stable"].append(
@@ -1782,10 +1914,10 @@ for x in effective_scanlist:
ebuild_archs = set(kw.lstrip("~") for kw in keywords \
if not kw.startswith("-"))
- previous_keywords = slot_keywords.get(myaux["SLOT"])
+ previous_keywords = slot_keywords.get(pkg.slot)
if previous_keywords is None:
- slot_keywords[myaux["SLOT"]] = set()
- elif ebuild_archs and not live_ebuild:
+ slot_keywords[pkg.slot] = set()
+ elif ebuild_archs and "*" not in ebuild_archs and not live_ebuild:
dropped_keywords = previous_keywords.difference(ebuild_archs)
if dropped_keywords:
stats["KEYWORDS.dropped"] += 1
@@ -1793,7 +1925,7 @@ for x in effective_scanlist:
relative_path + ": %s" % \
" ".join(sorted(dropped_keywords)))
- slot_keywords[myaux["SLOT"]].update(ebuild_archs)
+ slot_keywords[pkg.slot].update(ebuild_archs)
# KEYWORDS="-*" is a stupid replacement for package.mask and screws general KEYWORDS semantics
if "-*" in keywords:
@@ -1805,7 +1937,7 @@ for x in effective_scanlist:
haskeyword = True
if not haskeyword:
stats["KEYWORDS.stupid"] += 1
- fails["KEYWORDS.stupid"].append(x+"/"+y+".ebuild")
+ fails["KEYWORDS.stupid"].append(x + "/" + y + ".ebuild")
"""
Ebuilds that inherit a "Live" eclass (darcs,subversion,git,cvs,etc..) should
@@ -1833,37 +1965,53 @@ for x in effective_scanlist:
arches = [[repoman_settings["ARCH"], repoman_settings["ARCH"],
repoman_settings["ACCEPT_KEYWORDS"].split()]]
else:
- arches=[]
- for keyword in myaux["KEYWORDS"].split():
- if (keyword[0]=="-"):
+ arches = set()
+ for keyword in keywords:
+ if keyword[0] == "-":
continue
- elif (keyword[0]=="~"):
- arches.append([keyword, keyword[1:], [keyword[1:], keyword]])
+ elif keyword[0] == "~":
+ arch = keyword[1:]
+ if arch == "*":
+ for expanded_arch in profiles:
+ if expanded_arch == "**":
+ continue
+ arches.add((keyword, expanded_arch,
+ (expanded_arch, "~" + expanded_arch)))
+ else:
+ arches.add((keyword, arch, (arch, keyword)))
else:
- arches.append([keyword, keyword, [keyword]])
+ if keyword == "*":
+ for expanded_arch in profiles:
+ if expanded_arch == "**":
+ continue
+ arches.add((keyword, expanded_arch,
+ (expanded_arch,)))
+ else:
+ arches.add((keyword, keyword, (keyword,)))
if not arches:
# Use an empty profile for checking dependencies of
# packages that have empty KEYWORDS.
- arches.append(['**', '**', ['**']])
+ arches.add(('**', '**', ('**',)))
unknown_pkgs = set()
baddepsyntax = False
badlicsyntax = False
badprovsyntax = False
- catpkg = catdir+"/"+y
+ catpkg = catdir + "/" + y
inherited_java_eclass = "java-pkg-2" in inherited or \
"java-pkg-opt-2" in inherited
inherited_wxwidgets_eclass = "wxwidgets" in inherited
operator_tokens = set(["||", "(", ")"])
type_list, badsyntax = [], []
- for mytype in ("DEPEND", "RDEPEND", "PDEPEND",
- "LICENSE", "PROPERTIES", "PROVIDE"):
+ for mytype in Package._dep_keys + ("LICENSE", "PROPERTIES", "PROVIDE"):
mydepstr = myaux[mytype]
+ buildtime = mytype in Package._buildtime_keys
+ runtime = mytype in Package._runtime_keys
token_class = None
- if mytype in ("DEPEND", "RDEPEND", "PDEPEND"):
- token_class=portage.dep.Atom
+ if mytype.endswith("DEPEND"):
+ token_class = portage.dep.Atom
try:
atoms = portage.dep.use_reduce(mydepstr, matchall=1, flat=True, \
@@ -1872,8 +2020,8 @@ for x in effective_scanlist:
atoms = None
badsyntax.append(str(e))
- if atoms and mytype in ("DEPEND", "RDEPEND", "PDEPEND"):
- if mytype in ("RDEPEND", "PDEPEND") and \
+ if atoms and mytype.endswith("DEPEND"):
+ if runtime and \
"test?" in mydepstr.split():
stats[mytype + '.suspect'] += 1
fails[mytype + '.suspect'].append(relative_path + \
@@ -1902,21 +2050,21 @@ for x in effective_scanlist:
": %s: consider using '%s' instead of '%s'" %
(mytype, suspect_virtual[atom.cp], atom))
- if mytype == "DEPEND" and \
+ if buildtime and \
not is_blocker and \
not inherited_java_eclass and \
atom.cp == "virtual/jdk":
stats['java.eclassesnotused'] += 1
fails['java.eclassesnotused'].append(relative_path)
- elif mytype == "DEPEND" and \
+ elif buildtime and \
not is_blocker and \
not inherited_wxwidgets_eclass and \
atom.cp == "x11-libs/wxGTK":
stats['wxwidgets.eclassnotused'] += 1
fails['wxwidgets.eclassnotused'].append(
- relative_path + ": DEPENDs on x11-libs/wxGTK"
- " without inheriting wxwidgets.eclass")
- elif mytype in ("PDEPEND", "RDEPEND"):
+ (relative_path + ": %ss on x11-libs/wxGTK"
+ " without inheriting wxwidgets.eclass") % mytype)
+ elif runtime:
if not is_blocker and \
atom.cp in suspect_rdepend:
stats[mytype + '.suspect'] += 1
@@ -1925,21 +2073,26 @@ for x in effective_scanlist:
if atom.operator == "~" and \
portage.versions.catpkgsplit(atom.cpv)[3] != "r0":
- stats[mytype + '.badtilde'] += 1
- fails[mytype + '.badtilde'].append(
+ qacat = 'dependency.badtilde'
+ stats[qacat] += 1
+ fails[qacat].append(
(relative_path + ": %s uses the ~ operator"
" with a non-zero revision:" + \
" '%s'") % (mytype, atom))
type_list.extend([mytype] * (len(badsyntax) - len(type_list)))
- for m,b in zip(type_list, badsyntax):
- stats[m+".syntax"] += 1
- fails[m+".syntax"].append(catpkg+".ebuild "+m+": "+b)
+ for m, b in zip(type_list, badsyntax):
+ if m.endswith("DEPEND"):
+ qacat = "dependency.syntax"
+ else:
+ qacat = m + ".syntax"
+ stats[qacat] += 1
+ fails[qacat].append("%s: %s: %s" % (relative_path, m, b))
badlicsyntax = len([z for z in type_list if z == "LICENSE"])
badprovsyntax = len([z for z in type_list if z == "PROVIDE"])
- baddepsyntax = len(type_list) != badlicsyntax + badprovsyntax
+ baddepsyntax = len(type_list) != badlicsyntax + badprovsyntax
badlicsyntax = badlicsyntax > 0
badprovsyntax = badprovsyntax > 0
@@ -1955,7 +2108,7 @@ for x in effective_scanlist:
myuse.append(flag_name)
# uselist checks - metadata
- for mypos in range(len(myuse)-1,-1,-1):
+ for mypos in range(len(myuse)-1, -1, -1):
if myuse[mypos] and (myuse[mypos] in muselist):
del myuse[mypos]
@@ -1968,8 +2121,17 @@ for x in effective_scanlist:
" '%s'") % (eapi, myflag))
for mypos in range(len(myuse)):
- stats["IUSE.invalid"]=stats["IUSE.invalid"]+1
- fails["IUSE.invalid"].append(x+"/"+y+".ebuild: %s" % myuse[mypos])
+ stats["IUSE.invalid"] += 1
+ fails["IUSE.invalid"].append(x + "/" + y + ".ebuild: %s" % myuse[mypos])
+
+ # Check for outdated RUBY targets
+ if "ruby-ng" in inherited or "ruby-fakegem" in inherited or "ruby" in inherited:
+ ruby_intersection = pkg.iuse.all.intersection(ruby_deprecated)
+ if ruby_intersection:
+ for myruby in ruby_intersection:
+ stats["IUSE.rubydeprecated"] += 1
+ fails["IUSE.rubydeprecated"].append(
+ (relative_path + ": Deprecated ruby target: %s") % myruby)
# license checks
if not badlicsyntax:
@@ -1982,10 +2144,13 @@ for x in effective_scanlist:
# Need to check for "||" manually as no portage
# function will remove it without removing values.
if lic not in liclist and lic != "||":
- stats["LICENSE.invalid"]=stats["LICENSE.invalid"]+1
- fails["LICENSE.invalid"].append(x+"/"+y+".ebuild: %s" % lic)
+ stats["LICENSE.invalid"] += 1
+ fails["LICENSE.invalid"].append(x + "/" + y + ".ebuild: %s" % lic)
+ elif lic in liclist_deprecated:
+ stats["LICENSE.deprecated"] += 1
+ fails["LICENSE.deprecated"].append("%s: %s" % (relative_path, lic))
- #keyword checks
+ # keyword checks
myuse = myaux["KEYWORDS"].split()
for mykey in myuse:
if mykey not in ("-*", "*", "~*"):
@@ -1996,17 +2161,17 @@ for x in effective_scanlist:
myskey = myskey[1:]
if myskey not in kwlist:
stats["KEYWORDS.invalid"] += 1
- fails["KEYWORDS.invalid"].append(x+"/"+y+".ebuild: %s" % mykey)
+ fails["KEYWORDS.invalid"].append(x + "/" + y + ".ebuild: %s" % mykey)
elif myskey not in profiles:
stats["KEYWORDS.invalid"] += 1
- fails["KEYWORDS.invalid"].append(x+"/"+y+".ebuild: %s (profile invalid)" % mykey)
+ fails["KEYWORDS.invalid"].append(x + "/" + y + ".ebuild: %s (profile invalid)" % mykey)
- #restrict checks
+ # restrict checks
myrestrict = None
try:
myrestrict = portage.dep.use_reduce(myaux["RESTRICT"], matchall=1, flat=True)
except portage.exception.InvalidDependString as e:
- stats["RESTRICT.syntax"] = stats["RESTRICT.syntax"] + 1
+ stats["RESTRICT.syntax"] += 1
fails["RESTRICT.syntax"].append(
"%s: RESTRICT: %s" % (relative_path, e))
del e
@@ -2016,8 +2181,8 @@ for x in effective_scanlist:
if mybadrestrict:
stats["RESTRICT.invalid"] += len(mybadrestrict)
for mybad in mybadrestrict:
- fails["RESTRICT.invalid"].append(x+"/"+y+".ebuild: %s" % mybad)
- #REQUIRED_USE check
+ fails["RESTRICT.invalid"].append(x + "/" + y + ".ebuild: %s" % mybad)
+ # REQUIRED_USE check
required_use = myaux["REQUIRED_USE"]
if required_use:
if not eapi_has_required_use(eapi):
@@ -2027,9 +2192,9 @@ for x in effective_scanlist:
" not supported with EAPI='%s'" % (eapi,))
try:
portage.dep.check_required_use(required_use, (),
- pkg.iuse.is_valid_flag)
+ pkg.iuse.is_valid_flag, eapi=eapi)
except portage.exception.InvalidDependString as e:
- stats["REQUIRED_USE.syntax"] = stats["REQUIRED_USE.syntax"] + 1
+ stats["REQUIRED_USE.syntax"] += 1
fails["REQUIRED_USE.syntax"].append(
"%s: REQUIRED_USE: %s" % (relative_path, e))
del e
@@ -2062,127 +2227,154 @@ for x in effective_scanlist:
# user is intent on forcing the commit anyway.
continue
- for keyword,arch,groups in arches:
-
+ relevant_profiles = []
+ for keyword, arch, groups in arches:
if arch not in profiles:
# A missing profile will create an error further down
# during the KEYWORDS verification.
continue
-
- for prof in profiles[arch]:
- if prof.status not in ("stable", "dev") or \
- prof.status == "dev" and not options.include_dev:
+ if include_arches is not None:
+ if arch not in include_arches:
continue
- dep_settings = arch_caches.get(prof.sub_path)
- if dep_settings is None:
- dep_settings = portage.config(
- config_profile_path=prof.abs_path,
- config_incrementals=repoman_incrementals,
- config_root=config_root,
- local_config=False,
- _unmatched_removal=options.unmatched_removal,
- env=env)
- dep_settings.categories = repoman_settings.categories
- if options.without_mask:
- dep_settings._mask_manager_obj = \
- copy.deepcopy(dep_settings._mask_manager)
- dep_settings._mask_manager._pmaskdict.clear()
- arch_caches[prof.sub_path] = dep_settings
-
- xmatch_cache_key = (prof.sub_path, tuple(groups))
- xcache = arch_xmatch_caches.get(xmatch_cache_key)
- if xcache is None:
- portdb.melt()
- portdb.freeze()
- xcache = portdb.xcache
- xcache.update(shared_xmatch_caches)
- arch_xmatch_caches[xmatch_cache_key] = xcache
-
- trees[root]["porttree"].settings = dep_settings
- portdb.settings = dep_settings
- portdb.xcache = xcache
- # for package.use.mask support inside dep_check
- dep_settings.setcpv(pkg)
- dep_settings["ACCEPT_KEYWORDS"] = " ".join(groups)
- # just in case, prevent config.reset() from nuking these.
- dep_settings.backup_changes("ACCEPT_KEYWORDS")
-
- if not baddepsyntax:
- ismasked = not ebuild_archs or \
- pkg.cpv not in portdb.xmatch("match-visible", pkg.cp)
- if ismasked:
- if not have_pmasked:
- have_pmasked = bool(dep_settings._getMaskAtom(
- pkg.cpv, pkg.metadata))
- if options.ignore_masked:
- continue
- #we are testing deps for a masked package; give it some lee-way
- suffix="masked"
- matchmode = "minimum-all"
- else:
- suffix=""
- matchmode = "minimum-visible"
-
- if not have_dev_keywords:
- have_dev_keywords = \
- bool(dev_keywords.intersection(keywords))
-
- if prof.status == "dev":
- suffix=suffix+"indev"
-
- for mytype,mypos in [["DEPEND",len(missingvars)],["RDEPEND",len(missingvars)+1],["PDEPEND",len(missingvars)+2]]:
-
- mykey=mytype+".bad"+suffix
- myvalue = myaux[mytype]
- if not myvalue:
- continue
-
- success, atoms = portage.dep_check(myvalue, portdb,
- dep_settings, use="all", mode=matchmode,
- trees=trees)
-
- if success:
- if atoms:
-
- # Don't bother with dependency.unknown for
- # cases in which *DEPEND.bad is triggered.
- for atom in atoms:
- # dep_check returns all blockers and they
- # aren't counted for *DEPEND.bad, so we
- # ignore them here.
- if not atom.blocker:
- unknown_pkgs.discard(
- (mytype, atom.unevaluated_atom))
-
- if not prof.sub_path:
- # old-style virtuals currently aren't
- # resolvable with empty profile, since
- # 'virtuals' mappings are unavailable
- # (it would be expensive to search
- # for PROVIDE in all ebuilds)
- atoms = [atom for atom in atoms if not \
- (atom.cp.startswith('virtual/') and \
- not portdb.cp_list(atom.cp))]
-
- #we have some unsolvable deps
- #remove ! deps, which always show up as unsatisfiable
- atoms = [str(atom.unevaluated_atom) \
- for atom in atoms if not atom.blocker]
-
- #if we emptied out our list, continue:
- if not atoms:
- continue
- stats[mykey]=stats[mykey]+1
- fails[mykey].append("%s: %s(%s) %s" % \
- (relative_path, keyword,
- prof, repr(atoms)))
- else:
- stats[mykey]=stats[mykey]+1
- fails[mykey].append("%s: %s(%s) %s" % \
- (relative_path, keyword,
+ relevant_profiles.extend((keyword, groups, prof)
+ for prof in profiles[arch])
+
+ def sort_key(item):
+ return item[2].sub_path
+
+ relevant_profiles.sort(key=sort_key)
+
+ for keyword, groups, prof in relevant_profiles:
+
+ if not (prof.status == "stable" or \
+ (prof.status == "dev" and options.include_dev) or \
+ (prof.status == "exp" and options.include_exp_profiles == 'y')):
+ continue
+
+ dep_settings = arch_caches.get(prof.sub_path)
+ if dep_settings is None:
+ dep_settings = portage.config(
+ config_profile_path=prof.abs_path,
+ config_incrementals=repoman_incrementals,
+ config_root=config_root,
+ local_config=False,
+ _unmatched_removal=options.unmatched_removal,
+ env=env, repositories=repoman_settings.repositories)
+ dep_settings.categories = repoman_settings.categories
+ if options.without_mask:
+ dep_settings._mask_manager_obj = \
+ copy.deepcopy(dep_settings._mask_manager)
+ dep_settings._mask_manager._pmaskdict.clear()
+ arch_caches[prof.sub_path] = dep_settings
+
+ xmatch_cache_key = (prof.sub_path, tuple(groups))
+ xcache = arch_xmatch_caches.get(xmatch_cache_key)
+ if xcache is None:
+ portdb.melt()
+ portdb.freeze()
+ xcache = portdb.xcache
+ xcache.update(shared_xmatch_caches)
+ arch_xmatch_caches[xmatch_cache_key] = xcache
+
+ trees[root]["porttree"].settings = dep_settings
+ portdb.settings = dep_settings
+ portdb.xcache = xcache
+
+ dep_settings["ACCEPT_KEYWORDS"] = " ".join(groups)
+ # just in case, prevent config.reset() from nuking these.
+ dep_settings.backup_changes("ACCEPT_KEYWORDS")
+
+ # This attribute is used in dbapi._match_use() to apply
+ # use.stable.{mask,force} settings based on the stable
+ # status of the parent package. This is required in order
+ # for USE deps of unstable packages to be resolved correctly,
+ # since otherwise use.stable.{mask,force} settings of
+ # dependencies may conflict (see bug #456342).
+ dep_settings._parent_stable = dep_settings._isStable(pkg)
+
+ # Handle package.use*.{force,mask) calculation, for use
+ # in dep_check.
+ dep_settings.useforce = dep_settings._use_manager.getUseForce(
+ pkg, stable=dep_settings._parent_stable)
+ dep_settings.usemask = dep_settings._use_manager.getUseMask(
+ pkg, stable=dep_settings._parent_stable)
+
+ if not baddepsyntax:
+ ismasked = not ebuild_archs or \
+ pkg.cpv not in portdb.xmatch("match-visible", pkg.cp)
+ if ismasked:
+ if not have_pmasked:
+ have_pmasked = bool(dep_settings._getMaskAtom(
+ pkg.cpv, pkg._metadata))
+ if options.ignore_masked:
+ continue
+ # we are testing deps for a masked package; give it some lee-way
+ suffix = "masked"
+ matchmode = "minimum-all"
+ else:
+ suffix = ""
+ matchmode = "minimum-visible"
+
+ if not have_dev_keywords:
+ have_dev_keywords = \
+ bool(dev_keywords.intersection(keywords))
+
+ if prof.status == "dev":
+ suffix = suffix + "indev"
+
+ for mytype in Package._dep_keys:
+
+ mykey = "dependency.bad" + suffix
+ myvalue = myaux[mytype]
+ if not myvalue:
+ continue
+
+ success, atoms = portage.dep_check(myvalue, portdb,
+ dep_settings, use="all", mode=matchmode,
+ trees=trees)
+
+ if success:
+ if atoms:
+
+ # Don't bother with dependency.unknown for
+ # cases in which *DEPEND.bad is triggered.
+ for atom in atoms:
+ # dep_check returns all blockers and they
+ # aren't counted for *DEPEND.bad, so we
+ # ignore them here.
+ if not atom.blocker:
+ unknown_pkgs.discard(
+ (mytype, atom.unevaluated_atom))
+
+ if not prof.sub_path:
+ # old-style virtuals currently aren't
+ # resolvable with empty profile, since
+ # 'virtuals' mappings are unavailable
+ # (it would be expensive to search
+ # for PROVIDE in all ebuilds)
+ atoms = [atom for atom in atoms if not \
+ (atom.cp.startswith('virtual/') and \
+ not portdb.cp_list(atom.cp))]
+
+ # we have some unsolvable deps
+ # remove ! deps, which always show up as unsatisfiable
+ atoms = [str(atom.unevaluated_atom) \
+ for atom in atoms if not atom.blocker]
+
+ # if we emptied out our list, continue:
+ if not atoms:
+ continue
+ stats[mykey] += 1
+ fails[mykey].append("%s: %s: %s(%s) %s" % \
+ (relative_path, mytype, keyword,
prof, repr(atoms)))
+ else:
+ stats[mykey] += 1
+ fails[mykey].append("%s: %s: %s(%s) %s" % \
+ (relative_path, mytype, keyword,
+ prof, repr(atoms)))
if not baddepsyntax and unknown_pkgs:
type_map = {}
@@ -2208,11 +2400,11 @@ if options.if_modified == "y" and len(effective_scanlist) < 1:
if options.mode == "manifest":
sys.exit(dofail)
-#dofail will be set to 1 if we have failed in at least one non-warning category
-dofail=0
-#dowarn will be set to 1 if we tripped any warnings
-dowarn=0
-#dofull will be set if we should print a "repoman full" informational message
+# dofail will be set to 1 if we have failed in at least one non-warning category
+dofail = 0
+# dowarn will be set to 1 if we tripped any warnings
+dowarn = 0
+# dofull will be set if we should print a "repoman full" informational message
dofull = options.mode != 'full'
for x in qacats:
@@ -2240,29 +2432,20 @@ console_writer.style_listener = style_file.new_styles
f = formatter.AbstractFormatter(console_writer)
-utilities.format_qa_output(f, stats, fails, dofull, dofail, options, qawarnings)
+format_outputs = {
+ 'column': utilities.format_qa_output_column,
+ 'default': utilities.format_qa_output
+}
+
+format_output = format_outputs.get(options.output_style,
+ format_outputs['default'])
+format_output(f, stats, fails, dofull, dofail, options, qawarnings)
style_file.flush()
del console_writer, f, style_file
qa_output = qa_output.getvalue()
qa_output = qa_output.splitlines(True)
-def grouplist(mylist,seperator="/"):
- """(list,seperator="/") -- Takes a list of elements; groups them into
- same initial element categories. Returns a dict of {base:[sublist]}
- From: ["blah/foo","spork/spatula","blah/weee/splat"]
- To: {"blah":["foo","weee/splat"], "spork":["spatula"]}"""
- mygroups={}
- for x in mylist:
- xs=x.split(seperator)
- if xs[0]==".":
- xs=xs[1:]
- if xs[0] not in mygroups:
- mygroups[xs[0]]=[seperator.join(xs[1:])]
- else:
- mygroups[xs[0]]+=[seperator.join(xs[1:])]
- return mygroups
-
suggest_ignore_masked = False
suggest_include_dev = False
@@ -2311,65 +2494,65 @@ else:
myunadded = []
if vcs == "cvs":
try:
- myvcstree=portage.cvstree.getentries("./",recursive=1)
- myunadded=portage.cvstree.findunadded(myvcstree,recursive=1,basedir="./")
+ myvcstree = portage.cvstree.getentries("./", recursive=1)
+ myunadded = portage.cvstree.findunadded(myvcstree, recursive=1, basedir="./")
except SystemExit as e:
raise # TODO propagate this
except:
err("Error retrieving CVS tree; exiting.")
if vcs == "svn":
try:
- with os.popen("svn status --no-ignore") as f:
+ with repoman_popen("svn status --no-ignore") as f:
svnstatus = f.readlines()
- myunadded = [ "./"+elem.rstrip().split()[1] for elem in svnstatus if elem.startswith("?") or elem.startswith("I") ]
+ myunadded = ["./" + elem.rstrip().split()[1] for elem in svnstatus if elem.startswith("?") or elem.startswith("I")]
except SystemExit as e:
raise # TODO propagate this
except:
err("Error retrieving SVN info; exiting.")
if vcs == "git":
# get list of files not under version control or missing
- myf = os.popen("git ls-files --others")
- myunadded = [ "./" + elem[:-1] for elem in myf ]
+ myf = repoman_popen("git ls-files --others")
+ myunadded = ["./" + elem[:-1] for elem in myf]
myf.close()
if vcs == "bzr":
try:
- with os.popen("bzr status -S .") as f:
+ with repoman_popen("bzr status -S .") as f:
bzrstatus = f.readlines()
- myunadded = [ "./"+elem.rstrip().split()[1].split('/')[-1:][0] for elem in bzrstatus if elem.startswith("?") or elem[0:2] == " D" ]
+ myunadded = ["./" + elem.rstrip().split()[1].split('/')[-1:][0] for elem in bzrstatus if elem.startswith("?") or elem[0:2] == " D"]
except SystemExit as e:
raise # TODO propagate this
except:
err("Error retrieving bzr info; exiting.")
if vcs == "hg":
- with os.popen("hg status --no-status --unknown .") as f:
+ with repoman_popen("hg status --no-status --unknown .") as f:
myunadded = f.readlines()
myunadded = ["./" + elem.rstrip() for elem in myunadded]
-
+
# Mercurial doesn't handle manually deleted files as removed from
# the repository, so the user need to remove them before commit,
# using "hg remove [FILES]"
- with os.popen("hg status --no-status --deleted .") as f:
+ with repoman_popen("hg status --no-status --deleted .") as f:
mydeleted = f.readlines()
mydeleted = ["./" + elem.rstrip() for elem in mydeleted]
- myautoadd=[]
+ myautoadd = []
if myunadded:
- for x in range(len(myunadded)-1,-1,-1):
- xs=myunadded[x].split("/")
- if xs[-1]=="files":
+ for x in range(len(myunadded)-1, -1, -1):
+ xs = myunadded[x].split("/")
+ if xs[-1] == "files":
print("!!! files dir is not added! Please correct this.")
sys.exit(-1)
- elif xs[-1]=="Manifest":
+ elif xs[-1] == "Manifest":
# It's a manifest... auto add
- myautoadd+=[myunadded[x]]
+ myautoadd += [myunadded[x]]
del myunadded[x]
if myunadded:
print(red("!!! The following files are in your local tree but are not added to the master"))
print(red("!!! tree. Please remove them from the local tree or add them to the master tree."))
for x in myunadded:
- print(" ",x)
+ print(" ", x)
print()
print()
sys.exit(1)
@@ -2378,7 +2561,7 @@ else:
print(red("!!! The following files are removed manually from your local tree but are not"))
print(red("!!! removed from the repository. Please remove them, using \"hg remove [FILES]\"."))
for x in mydeleted:
- print(" ",x)
+ print(" ", x)
print()
print()
sys.exit(1)
@@ -2387,60 +2570,59 @@ else:
mycvstree = cvstree.getentries("./", recursive=1)
mychanged = cvstree.findchanged(mycvstree, recursive=1, basedir="./")
mynew = cvstree.findnew(mycvstree, recursive=1, basedir="./")
- myremoved=portage.cvstree.findremoved(mycvstree,recursive=1,basedir="./")
+ myremoved = portage.cvstree.findremoved(mycvstree, recursive=1, basedir="./")
bin_blob_pattern = re.compile("^-kb$")
no_expansion = set(portage.cvstree.findoption(mycvstree, bin_blob_pattern,
recursive=1, basedir="./"))
-
if vcs == "svn":
- with os.popen("svn status") as f:
+ with repoman_popen("svn status") as f:
svnstatus = f.readlines()
- mychanged = [ "./" + elem.split()[-1:][0] for elem in svnstatus if (elem[:1] in "MR" or elem[1:2] in "M")]
- mynew = [ "./" + elem.split()[-1:][0] for elem in svnstatus if elem.startswith("A")]
- myremoved = [ "./" + elem.split()[-1:][0] for elem in svnstatus if elem.startswith("D")]
+ mychanged = ["./" + elem.split()[-1:][0] for elem in svnstatus if (elem[:1] in "MR" or elem[1:2] in "M")]
+ mynew = ["./" + elem.split()[-1:][0] for elem in svnstatus if elem.startswith("A")]
+ myremoved = ["./" + elem.split()[-1:][0] for elem in svnstatus if elem.startswith("D")]
# Subversion expands keywords specified in svn:keywords properties.
- with os.popen("svn propget -R svn:keywords") as f:
+ with repoman_popen("svn propget -R svn:keywords") as f:
props = f.readlines()
expansion = dict(("./" + prop.split(" - ")[0], prop.split(" - ")[1].split()) \
for prop in props if " - " in prop)
elif vcs == "git":
- with os.popen("git diff-index --name-only "
+ with repoman_popen("git diff-index --name-only "
"--relative --diff-filter=M HEAD") as f:
mychanged = f.readlines()
mychanged = ["./" + elem[:-1] for elem in mychanged]
- with os.popen("git diff-index --name-only "
+ with repoman_popen("git diff-index --name-only "
"--relative --diff-filter=A HEAD") as f:
mynew = f.readlines()
mynew = ["./" + elem[:-1] for elem in mynew]
- with os.popen("git diff-index --name-only "
+ with repoman_popen("git diff-index --name-only "
"--relative --diff-filter=D HEAD") as f:
myremoved = f.readlines()
myremoved = ["./" + elem[:-1] for elem in myremoved]
if vcs == "bzr":
- with os.popen("bzr status -S .") as f:
+ with repoman_popen("bzr status -S .") as f:
bzrstatus = f.readlines()
- mychanged = [ "./" + elem.split()[-1:][0].split('/')[-1:][0] for elem in bzrstatus if elem and elem[1:2] == "M" ]
- mynew = [ "./" + elem.split()[-1:][0].split('/')[-1:][0] for elem in bzrstatus if elem and ( elem[1:2] in "NK" or elem[0:1] == "R" ) ]
- myremoved = [ "./" + elem.split()[-1:][0].split('/')[-1:][0] for elem in bzrstatus if elem.startswith("-") ]
- myremoved = [ "./" + elem.split()[-3:-2][0].split('/')[-1:][0] for elem in bzrstatus if elem and ( elem[1:2] == "K" or elem[0:1] == "R" ) ]
+ mychanged = ["./" + elem.split()[-1:][0].split('/')[-1:][0] for elem in bzrstatus if elem and elem[1:2] == "M"]
+ mynew = ["./" + elem.split()[-1:][0].split('/')[-1:][0] for elem in bzrstatus if elem and (elem[1:2] in "NK" or elem[0:1] == "R")]
+ myremoved = ["./" + elem.split()[-1:][0].split('/')[-1:][0] for elem in bzrstatus if elem.startswith("-")]
+ myremoved = ["./" + elem.split()[-3:-2][0].split('/')[-1:][0] for elem in bzrstatus if elem and (elem[1:2] == "K" or elem[0:1] == "R")]
# Bazaar expands nothing.
if vcs == "hg":
- with os.popen("hg status --no-status --modified .") as f:
+ with repoman_popen("hg status --no-status --modified .") as f:
mychanged = f.readlines()
mychanged = ["./" + elem.rstrip() for elem in mychanged]
- with os.popen("hg status --no-status --added .") as f:
+ with repoman_popen("hg status --no-status --added .") as f:
mynew = f.readlines()
mynew = ["./" + elem.rstrip() for elem in mynew]
- with os.popen("hg status --no-status --removed .") as f:
+ with repoman_popen("hg status --no-status --removed .") as f:
myremoved = f.readlines()
myremoved = ["./" + elem.rstrip() for elem in myremoved]
@@ -2499,21 +2681,54 @@ else:
commitmessage = commitmessage.rstrip()
changelog_msg = commitmessage
portage_version = getattr(portage, "VERSION", None)
+ gpg_key = repoman_settings.get("PORTAGE_GPG_KEY", "")
+ dco_sob = repoman_settings.get("DCO_SIGNED_OFF_BY", "")
if portage_version is None:
sys.stderr.write("Failed to insert portage version in message!\n")
sys.stderr.flush()
portage_version = "Unknown"
- unameout = platform.system() + " "
- if platform.system() in ["Darwin", "SunOS"]:
- unameout += platform.processor()
- else:
- unameout += platform.machine()
- commitmessage += "\n\n(Portage version: %s/%s/%s" % \
- (portage_version, vcs, unameout)
+
+ report_options = []
if options.force:
- commitmessage += ", RepoMan options: --force"
- commitmessage += ")"
+ report_options.append("--force")
+ if options.ignore_arches:
+ report_options.append("--ignore-arches")
+ if include_arches is not None:
+ report_options.append("--include-arches=\"%s\"" %
+ " ".join(sorted(include_arches)))
+
+ if vcs == "git":
+ # Use new footer only for git (see bug #438364).
+ commit_footer = "\n\nPackage-Manager: portage-%s" % portage_version
+ if report_options:
+ commit_footer += "\nRepoMan-Options: " + " ".join(report_options)
+ if sign_manifests:
+ commit_footer += "\nManifest-Sign-Key: %s" % (gpg_key, )
+ if dco_sob:
+ commit_footer += "\nSigned-off-by: %s" % (dco_sob, )
+ else:
+ unameout = platform.system() + " "
+ if platform.system() in ["Darwin", "SunOS"]:
+ unameout += platform.processor()
+ else:
+ unameout += platform.machine()
+ commit_footer = "\n\n"
+ if dco_sob:
+ commit_footer += "Signed-off-by: %s\n" % (dco_sob, )
+ commit_footer += "(Portage version: %s/%s/%s" % \
+ (portage_version, vcs, unameout)
+ if report_options:
+ commit_footer += ", RepoMan options: " + " ".join(report_options)
+ if sign_manifests:
+ commit_footer += ", signed Manifest commit with key %s" % \
+ (gpg_key, )
+ else:
+ commit_footer += ", unsigned Manifest commit"
+ commit_footer += ")"
+
+ commitmessage += commit_footer
+ broken_changelog_manifests = []
if options.echangelog in ('y', 'force'):
logging.info("checking for unmodified ChangeLog files")
committer_name = utilities.get_committer_name(env=repoman_settings)
@@ -2569,6 +2784,8 @@ else:
# regenerate Manifest for modified ChangeLog (bug #420735)
repoman_settings["O"] = checkdir
digestgen(mysettings=repoman_settings, myportdb=portdb)
+ else:
+ broken_changelog_manifests.append(x)
if myautoadd:
print(">>> Auto-Adding missing Manifest/ChangeLog file(s)...")
@@ -2578,15 +2795,17 @@ else:
portage.writemsg_stdout("(%s)\n" % " ".join(add_cmd),
noiselevel=-1)
else:
- if not (sys.hexversion >= 0x3000000 and sys.hexversion < 0x3020000):
- # Python 3.1 produces the following TypeError if raw bytes are
- # passed to subprocess.call():
- # File "/usr/lib/python3.1/subprocess.py", line 646, in __init__
- # errread, errwrite)
- # File "/usr/lib/python3.1/subprocess.py", line 1157, in _execute_child
- # raise child_exception
- # TypeError: expected an object with the buffer interface
- add_cmd = [_unicode_encode(arg) for arg in add_cmd]
+
+ if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000 and \
+ not os.path.isabs(add_cmd[0]):
+ # Python 3.1 _execvp throws TypeError for non-absolute executable
+ # path passed as bytes (see http://bugs.python.org/issue8513).
+ fullname = find_binary(add_cmd[0])
+ if fullname is None:
+ raise portage.exception.CommandNotFound(add_cmd[0])
+ add_cmd[0] = fullname
+
+ add_cmd = [_unicode_encode(arg) for arg in add_cmd]
retcode = subprocess.call(add_cmd)
if retcode != os.EX_OK:
logging.error(
@@ -2631,7 +2850,7 @@ else:
elif vcs == "svn":
if myfile not in expansion:
continue
-
+
# Subversion keywords are case-insensitive in svn:keywords properties, but case-sensitive in contents of files.
enabled_keywords = []
for k in expansion[myfile]:
@@ -2641,7 +2860,8 @@ else:
headerstring = "'\$(%s).*\$'" % "|".join(enabled_keywords)
- myout = subprocess_getstatusoutput("egrep -q "+headerstring+" "+myfile)
+ myout = repoman_getstatusoutput("egrep -q " + headerstring + " " +
+ portage._shell_quote(myfile))
if myout[0] == 0:
myheaders.append(myfile)
@@ -2688,7 +2908,7 @@ else:
if options.pretend:
print("(%s)" % (" ".join(commit_cmd),))
else:
- retval = spawn(commit_cmd, env=os.environ)
+ retval = spawn(commit_cmd, env=commit_env)
if retval != os.EX_OK:
writemsg_level(("!!! Exiting on %s (shell) " + \
"error code: %s\n") % (vcs, retval),
@@ -2729,14 +2949,38 @@ else:
gpgvars[k] = v
gpgcmd = portage.util.varexpand(gpgcmd, mydict=gpgvars)
if options.pretend:
- print("("+gpgcmd+")")
+ print("(" + gpgcmd + ")")
else:
- rValue = os.system(gpgcmd)
+ # Encode unicode manually for bug #310789.
+ gpgcmd = portage.util.shlex_split(gpgcmd)
+
+ if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000 and \
+ not os.path.isabs(gpgcmd[0]):
+ # Python 3.1 _execvp throws TypeError for non-absolute executable
+ # path passed as bytes (see http://bugs.python.org/issue8513).
+ fullname = find_binary(gpgcmd[0])
+ if fullname is None:
+ raise portage.exception.CommandNotFound(gpgcmd[0])
+ gpgcmd[0] = fullname
+
+ gpgcmd = [_unicode_encode(arg,
+ encoding=_encodings['fs'], errors='strict') for arg in gpgcmd]
+ rValue = subprocess.call(gpgcmd)
if rValue == os.EX_OK:
- os.rename(filename+".asc", filename)
+ os.rename(filename + ".asc", filename)
else:
raise portage.exception.PortageException("!!! gpg exited with '" + str(rValue) + "' status")
+ def need_signature(filename):
+ try:
+ with open(_unicode_encode(filename,
+ encoding=_encodings['fs'], errors='strict'), 'rb') as f:
+ return b"BEGIN PGP SIGNED MESSAGE" not in f.readline()
+ except IOError as e:
+ if e.errno in (errno.ENOENT, errno.ESTALE):
+ return False
+ raise
+
# When files are removed and re-added, the cvs server will put /Attic/
# inside the $Header path. This code detects the problem and corrects it
# so that the Manifest will generate correctly. See bug #169500.
@@ -2771,6 +3015,11 @@ else:
repoman_settings["O"] = os.path.join(repodir, x)
digestgen(mysettings=repoman_settings, myportdb=portdb)
+ elif broken_changelog_manifests:
+ for x in broken_changelog_manifests:
+ repoman_settings["O"] = os.path.join(repodir, x)
+ digestgen(mysettings=repoman_settings, myportdb=portdb)
+
signed = False
if sign_manifests:
signed = True
@@ -2779,7 +3028,7 @@ else:
chain(myupdates, myremoved, mymanifests))):
repoman_settings["O"] = os.path.join(repodir, x)
manifest_path = os.path.join(repoman_settings["O"], "Manifest")
- if not os.path.exists(manifest_path):
+ if not need_signature(manifest_path):
continue
gpgsign(manifest_path)
except portage.exception.PortageException as e:
@@ -2809,7 +3058,6 @@ else:
sys.exit(retval)
if True:
-
myfiles = mymanifests[:]
# If there are no header (SVN/CVS keywords) changes in
# the files, this Manifest commit must include the
@@ -2821,14 +3069,7 @@ else:
fd, commitmessagefile = tempfile.mkstemp(".repoman.msg")
mymsg = os.fdopen(fd, "wb")
- # strip the closing parenthesis
- mymsg.write(_unicode_encode(commitmessage[:-1]))
- if signed:
- mymsg.write(_unicode_encode(
- ", signed Manifest commit with key %s)" % \
- repoman_settings["PORTAGE_GPG_KEY"]))
- else:
- mymsg.write(b", unsigned Manifest commit)")
+ mymsg.write(_unicode_encode(commitmessage))
mymsg.close()
commit_cmd = []
@@ -2851,9 +3092,8 @@ else:
if options.pretend:
print("(%s)" % (" ".join(commit_cmd),))
else:
- retval = spawn(commit_cmd, env=os.environ)
+ retval = spawn(commit_cmd, env=commit_env)
if retval != os.EX_OK:
-
if repo_config.sign_commit and vcs == 'git' and \
not git_supports_gpg_sign():
# Inform user that newer git is needed (bug #403323).
@@ -2877,4 +3117,3 @@ else:
print("repoman was too scared by not seeing any familiar version control file that he forgot to commit anything")
print(green("RepoMan sez:"), "\"If everyone were like you, I'd be out of business!\"\n")
sys.exit(0)
-
diff --git a/bin/save-ebuild-env.sh b/bin/save-ebuild-env.sh
index 47a2acae5..98cff839e 100644
--- a/bin/save-ebuild-env.sh
+++ b/bin/save-ebuild-env.sh
@@ -1,8 +1,8 @@
#!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-# @FUNCTION: save_ebuild_env
+# @FUNCTION: __save_ebuild_env
# @DESCRIPTION:
# echo the current environment to stdout, filtering out redundant info.
#
@@ -10,11 +10,12 @@
# be excluded from the output. These function are not needed for installation
# or removal of the packages, and can therefore be safely excluded.
#
-save_ebuild_env() {
+__save_ebuild_env() {
(
if has --exclude-init-phases $* ; then
unset S _E_DOCDESTTREE_ _E_EXEDESTTREE_ \
- PORTAGE_DOCOMPRESS PORTAGE_DOCOMPRESS_SKIP
+ PORTAGE_DOCOMPRESS_SIZE_LIMIT PORTAGE_DOCOMPRESS \
+ PORTAGE_DOCOMPRESS_SKIP
if [[ -n $PYTHONPATH &&
${PYTHONPATH%%:*} -ef $PORTAGE_PYM_PATH ]] ; then
if [[ $PYTHONPATH == *:* ]] ; then
@@ -42,35 +43,51 @@ save_ebuild_env() {
for x in pkg_setup pkg_nofetch src_unpack src_prepare src_configure \
src_compile src_test src_install pkg_preinst pkg_postinst \
pkg_prerm pkg_postrm ; do
- unset -f default_$x _eapi{0,1,2,3,4}_$x
+ unset -f default_$x __eapi{0,1,2,3,4}_$x
done
unset x
- unset -f assert assert_sigpipe_ok dump_trace die diefunc \
- quiet_mode vecho elog_base eqawarn elog \
- esyslog einfo einfon ewarn eerror ebegin _eend eend KV_major \
- KV_minor KV_micro KV_to_int get_KV unset_colors set_colors has \
- has_phase_defined_up_to \
- hasv hasq qa_source qa_call \
- addread addwrite adddeny addpredict _sb_append_var \
+ unset -f assert __assert_sigpipe_ok \
+ __dump_trace die \
+ __quiet_mode __vecho __elog_base eqawarn elog \
+ einfo einfon ewarn eerror ebegin __eend eend KV_major \
+ KV_minor KV_micro KV_to_int get_KV __1 __1 has \
+ __has_phase_defined_up_to \
+ hasv hasq __qa_source __qa_call \
+ addread addwrite adddeny addpredict __sb_append_var \
use usev useq has_version portageq \
best_version use_with use_enable register_die_hook \
- keepdir unpack strip_duplicate_slashes econf einstall \
- dyn_setup dyn_unpack dyn_clean into insinto exeinto docinto \
+ unpack __strip_duplicate_slashes econf einstall \
+ __dyn_setup __dyn_unpack __dyn_clean \
+ into insinto exeinto docinto \
insopts diropts exeopts libopts docompress \
- abort_handler abort_prepare abort_configure abort_compile \
- abort_test abort_install dyn_prepare dyn_configure \
- dyn_compile dyn_test dyn_install \
- dyn_preinst dyn_pretend dyn_help debug-print debug-print-function \
- debug-print-section helpers_die inherit EXPORT_FUNCTIONS \
- nonfatal register_success_hook remove_path_entry \
- save_ebuild_env filter_readonly_variables preprocess_ebuild_env \
- set_unless_changed unset_unless_changed source_all_bashrcs \
- ebuild_main ebuild_phase ebuild_phase_with_hooks \
- _ebuild_arg_to_phase _ebuild_phase_funcs default \
- _hasg _hasgq _unpack_tar \
+ __abort_handler __abort_prepare __abort_configure __abort_compile \
+ __abort_test __abort_install __dyn_prepare __dyn_configure \
+ __dyn_compile __dyn_test __dyn_install \
+ __dyn_pretend __dyn_help \
+ debug-print debug-print-function \
+ debug-print-section __helpers_die inherit EXPORT_FUNCTIONS \
+ nonfatal register_success_hook \
+ __hasg __hasgq \
+ __save_ebuild_env __set_colors __filter_readonly_variables \
+ __preprocess_ebuild_env \
+ __repo_attr __source_all_bashrcs \
+ __ebuild_main __ebuild_phase __ebuild_phase_with_hooks \
+ __ebuild_arg_to_phase __ebuild_phase_funcs default \
+ __unpack_tar __unset_colors \
${QA_INTERCEPTORS}
+ ___eapi_has_usex && unset -f usex
+ ___eapi_has_master_repositories && unset -f master_repositories
+ ___eapi_has_repository_path && unset -f repository_path
+ ___eapi_has_available_eclasses && unset -f available_eclasses
+ ___eapi_has_eclass_path && unset -f eclass_path
+ ___eapi_has_license_path && unset -f license_path
+ ___eapi_has_package_manager_build_user && unset -f package_manager_build_user
+ ___eapi_has_package_manager_build_group && unset -f package_manager_build_group
+
+ unset -f $(compgen -A function ___eapi_)
+
# portage config variables and variables set directly by portage
unset ACCEPT_LICENSE BAD BRACKET BUILD_PREFIX COLS \
DISTCC_DIR DISTDIR DOC_SYMLINKS_DIR \
diff --git a/bin/xattr-helper.py b/bin/xattr-helper.py
new file mode 100755
index 000000000..ea83a5e7c
--- /dev/null
+++ b/bin/xattr-helper.py
@@ -0,0 +1,190 @@
+#!/usr/bin/python -b
+# Copyright 2012-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+"""Dump and restore extended attributes.
+
+We use formats like that used by getfattr --dump. This is meant for shell
+helpers to save/restore. If you're looking for a python/portage API, see
+portage.util.movefile._copyxattr instead.
+
+https://en.wikipedia.org/wiki/Extended_file_attributes
+"""
+
+import array
+import os
+import re
+import sys
+
+from portage.util._argparse import ArgumentParser
+
+if hasattr(os, "getxattr"):
+
+ class xattr(object):
+ get = os.getxattr
+ set = os.setxattr
+ list = os.listxattr
+
+else:
+ import xattr
+
+
+_UNQUOTE_RE = re.compile(br'\\[0-7]{3}')
+_FS_ENCODING = sys.getfilesystemencoding()
+
+
+if sys.hexversion < 0x3000000:
+
+ def octal_quote_byte(b):
+ return b'\\%03o' % ord(b)
+
+ def unicode_encode(s):
+ if isinstance(s, unicode):
+ s = s.encode(_FS_ENCODING)
+ return s
+else:
+
+ def octal_quote_byte(b):
+ return ('\\%03o' % ord(b)).encode('ascii')
+
+ def unicode_encode(s):
+ if isinstance(s, str):
+ s = s.encode(_FS_ENCODING)
+ return s
+
+
+def quote(s, quote_chars):
+ """Convert all |quote_chars| in |s| to escape sequences
+
+ This is normally used to escape any embedded quotation marks.
+ """
+ quote_re = re.compile(b'[' + quote_chars + b']')
+ result = []
+ pos = 0
+ s_len = len(s)
+
+ while pos < s_len:
+ m = quote_re.search(s, pos=pos)
+ if m is None:
+ result.append(s[pos:])
+ pos = s_len
+ else:
+ start = m.start()
+ result.append(s[pos:start])
+ result.append(octal_quote_byte(s[start:start+1]))
+ pos = start + 1
+
+ return b''.join(result)
+
+
+def unquote(s):
+ """Process all escape sequences in |s|"""
+ result = []
+ pos = 0
+ s_len = len(s)
+
+ while pos < s_len:
+ m = _UNQUOTE_RE.search(s, pos=pos)
+ if m is None:
+ result.append(s[pos:])
+ pos = s_len
+ else:
+ start = m.start()
+ result.append(s[pos:start])
+ pos = start + 4
+ a = array.array('B')
+ a.append(int(s[start + 1:pos], 8))
+ try:
+ # Python >= 3.2
+ result.append(a.tobytes())
+ except AttributeError:
+ result.append(a.tostring())
+
+ return b''.join(result)
+
+
+def dump_xattrs(pathnames, file_out):
+ """Dump the xattr data for |pathnames| to |file_out|"""
+ # NOTE: Always quote backslashes, in order to ensure that they are
+ # not interpreted as quotes when they are processed by unquote.
+ quote_chars = b'\n\r\\\\'
+
+ for pathname in pathnames:
+ attrs = xattr.list(pathname)
+ if not attrs:
+ continue
+
+ file_out.write(b'# file: %s\n' % quote(pathname, quote_chars))
+ for attr in attrs:
+ attr = unicode_encode(attr)
+ value = xattr.get(pathname, attr)
+ file_out.write(b'%s="%s"\n' % (
+ quote(attr, b'=' + quote_chars),
+ quote(value, b'\0"' + quote_chars)))
+
+
+def restore_xattrs(file_in):
+ """Read |file_in| and restore xattrs content from it
+
+ This expects textual data in the format written by dump_xattrs.
+ """
+ pathname = None
+ for i, line in enumerate(file_in):
+ if line.startswith(b'# file: '):
+ pathname = unquote(line.rstrip(b'\n')[8:])
+ else:
+ parts = line.split(b'=', 1)
+ if len(parts) == 2:
+ if pathname is None:
+ raise ValueError('line %d: missing pathname' % (i + 1,))
+ attr = unquote(parts[0])
+ # strip trailing newline and quotes
+ value = unquote(parts[1].rstrip(b'\n')[1:-1])
+ xattr.set(pathname, attr, value)
+ elif line.strip():
+ raise ValueError('line %d: malformed entry' % (i + 1,))
+
+
+def main(argv):
+
+ parser = ArgumentParser(description=__doc__)
+ parser.add_argument('paths', nargs='*', default=[])
+
+ actions = parser.add_argument_group('Actions')
+ actions.add_argument('--dump',
+ action='store_true',
+ help='Dump the values of all extended '
+ 'attributes associated with null-separated'
+ ' paths read from stdin.')
+ actions.add_argument('--restore',
+ action='store_true',
+ help='Restore extended attributes using'
+ ' a dump read from stdin.')
+
+ options = parser.parse_args(argv)
+
+ if sys.hexversion >= 0x3000000:
+ file_in = sys.stdin.buffer.raw
+ else:
+ file_in = sys.stdin
+ if not options.paths:
+ options.paths += [x for x in file_in.read().split(b'\0') if x]
+
+ if options.dump:
+ if sys.hexversion >= 0x3000000:
+ file_out = sys.stdout.buffer
+ else:
+ file_out = sys.stdout
+ dump_xattrs(options.paths, file_out)
+
+ elif options.restore:
+ restore_xattrs(file_in)
+
+ else:
+ parser.error('missing action!')
+
+ return os.EX_OK
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:]))
diff --git a/bin/xpak-helper.py b/bin/xpak-helper.py
index ef74920db..c4391cde7 100755
--- a/bin/xpak-helper.py
+++ b/bin/xpak-helper.py
@@ -1,11 +1,12 @@
-#!/usr/bin/python
-# Copyright 2009-2011 Gentoo Foundation
+#!/usr/bin/python -b
+# Copyright 2009-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-import optparse
import sys
import portage
+portage._internal_caller = True
from portage import os
+from portage.util._argparse import ArgumentParser
def command_recompose(args):
@@ -45,8 +46,8 @@ def main(argv):
usage = "usage: %s COMMAND [args]" % \
os.path.basename(argv[0])
- parser = optparse.OptionParser(description=description, usage=usage)
- options, args = parser.parse_args(argv[1:])
+ parser = ArgumentParser(description=description, usage=usage)
+ options, args = parser.parse_known_args(argv[1:])
if not args:
parser.error("missing command argument")
diff --git a/cnf/dispatch-conf.conf b/cnf/dispatch-conf.conf
index 7eea44c8e..125b7cc1f 100644
--- a/cnf/dispatch-conf.conf
+++ b/cnf/dispatch-conf.conf
@@ -6,6 +6,7 @@
archive-dir=${EPREFIX}/etc/config-archive
# Use rcs for storing files in the archive directory?
+# NOTE: You should install dev-vcs/rcs before enabling this option.
# WARNING: When configured to use rcs, read and execute permissions of
# archived files may be inherited from the first check in of a working
# file, as documented in the ci(1) man page. This means that even if
diff --git a/cnf/make.conf b/cnf/make.conf.example
index ad2a5b775..6603b42c8 100644
--- a/cnf/make.conf
+++ b/cnf/make.conf.example
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# Contains local system settings for Portage system
@@ -78,6 +78,18 @@
#
#ACCEPT_KEYWORDS="~arch"
+# ACCEPT_LICENSE is used to mask packages based on licensing restrictions.
+# It may contain both license and group names, where group names are
+# prefixed with the '@' symbol. License groups are defined in the
+# license_groups file (see portage(5) man page). In addition to license
+# and group names, the * and -* wildcard tokens are also supported.
+#
+# Accept any license except those in the EULA license group (default).
+#ACCEPT_LICENSE="* -@EULA"
+#
+# Only accept licenses in the FREE license group (i.e. Free Software).
+#ACCEPT_LICENSE="-* @FREE"
+
# Portage Directories
# ===================
#
@@ -92,7 +104,7 @@
#
# PORTDIR is the location of the portage tree. This is the repository
# for all profile information as well as all ebuilds. If you change
-# this, you must update your /etc/make.profile symlink accordingly.
+# this, you must update your /etc/portage/make.profile symlink accordingly.
# ***Warning***
# Data stored inside PORTDIR is in peril of being overwritten or deleted by
# the emerge --sync command. The default value of PORTAGE_RSYNC_OPTS
@@ -289,8 +301,7 @@
# logging related variables:
# PORTAGE_ELOG_CLASSES: selects messages to be logged, possible values are:
# info, warn, error, log, qa, *
-# Warning: commenting this will disable elog
-PORTAGE_ELOG_CLASSES="log warn error"
+#PORTAGE_ELOG_CLASSES="log warn error"
# PORTAGE_ELOG_SYSTEM: selects the module(s) to process the log messages. Modules
# included in portage are (empty means logging is disabled):
@@ -312,7 +323,7 @@ PORTAGE_ELOG_CLASSES="log warn error"
# separated list of loglevels to override PORTAGE_ELOG_CLASSES
# for this module (e.g.
# PORTAGE_ELOG_SYSTEM="mail:warn,error syslog:* save")
-#PORTAGE_ELOG_SYSTEM="save_summary echo"
+#PORTAGE_ELOG_SYSTEM="save_summary:log,warn,error,qa echo"
# PORTAGE_ELOG_COMMAND: only used with the "custom" logging module. Specifies a command
# to process log messages. Two variables are expanded:
diff --git a/cnf/make.conf.alpha.diff b/cnf/make.conf.example.alpha.diff
index f0a4e3852..5306999d0 100644
--- a/cnf/make.conf.alpha.diff
+++ b/cnf/make.conf.example.alpha.diff
@@ -1,6 +1,6 @@
---- make.conf 2006-03-19 18:40:11.000000000 +0100
-+++ make.conf.alpha 2006-03-19 18:26:21.000000000 +0100
-@@ -23,6 +23,17 @@
+--- make.conf.example
++++ make.conf.example
+@@ -22,6 +22,17 @@
# Example:
#USE="X gtk gnome -alsa"
@@ -18,9 +18,9 @@
# Host and optimization settings
# ==============================
#
-@@ -33,10 +44,18 @@
- # package (and in some cases the libraries it uses) at default optimizations
- # before reporting errors to developers.
+@@ -39,10 +50,18 @@
+ # -frecord-gcc-switches, since otherwise the check could result in false
+ # positive results.
#
-# Please refer to the GCC manual for a list of possible values.
+# -mcpu=<cpu-type> means optimize code for the particular type of CPU. In
@@ -39,12 +39,12 @@
# If you set a CFLAGS above, then this line will set your default C++ flags to
# the same settings.
#CXXFLAGS="${CFLAGS}"
-@@ -61,7 +80,7 @@
+@@ -76,7 +95,7 @@
# DO NOT PUT ANYTHING BUT YOUR SPECIFIC ~ARCHITECTURE IN THE LIST.
# IF YOU ARE UNSURE OF YOUR ARCH, OR THE IMPLICATIONS, DO NOT MODIFY THIS.
#
-#ACCEPT_KEYWORDS="~arch"
+#ACCEPT_KEYWORDS="~alpha"
- # Portage Directories
- # ===================
+ # ACCEPT_LICENSE is used to mask packages based on licensing restrictions.
+ # It may contain both license and group names, where group names are
diff --git a/cnf/make.conf.amd64-fbsd.diff b/cnf/make.conf.example.amd64-fbsd.diff
index aa07d0657..1277b6dba 100644
--- a/cnf/make.conf.amd64-fbsd.diff
+++ b/cnf/make.conf.example.amd64-fbsd.diff
@@ -1,6 +1,6 @@
---- make.conf 2006-03-19 18:40:11.000000000 +0100
-+++ make.conf.amd64-fbsd 2006-03-19 18:26:21.000000000 +0100
-@@ -23,6 +23,11 @@
+--- make.conf.example
++++ make.conf.example
+@@ -22,6 +22,11 @@
# Example:
#USE="X gtk gnome -alsa"
@@ -12,9 +12,9 @@
# Host and optimization settings
# ==============================
#
-@@ -33,10 +38,35 @@
- # package (and in some cases the libraries it uses) at default optimizations
- # before reporting errors to developers.
+@@ -39,10 +44,35 @@
+ # -frecord-gcc-switches, since otherwise the check could result in false
+ # positive results.
#
-# Please refer to the GCC manual for a list of possible values.
+# -mcpu=<cpu-type> means optimize code for the particular type of CPU without
@@ -50,12 +50,12 @@
# If you set a CFLAGS above, then this line will set your default C++ flags to
# the same settings.
#CXXFLAGS="${CFLAGS}"
-@@ -61,7 +91,7 @@
+@@ -76,7 +106,7 @@
# DO NOT PUT ANYTHING BUT YOUR SPECIFIC ~ARCHITECTURE IN THE LIST.
# IF YOU ARE UNSURE OF YOUR ARCH, OR THE IMPLICATIONS, DO NOT MODIFY THIS.
#
-#ACCEPT_KEYWORDS="~arch"
+#ACCEPT_KEYWORDS="~amd64-fbsd"
- # Portage Directories
- # ===================
+ # ACCEPT_LICENSE is used to mask packages based on licensing restrictions.
+ # It may contain both license and group names, where group names are
diff --git a/cnf/make.conf.amd64.diff b/cnf/make.conf.example.amd64.diff
index b4a93fe2a..dfa4b426f 100644
--- a/cnf/make.conf.amd64.diff
+++ b/cnf/make.conf.example.amd64.diff
@@ -1,6 +1,6 @@
---- make.conf 2006-03-19 18:40:11.000000000 +0100
-+++ make.conf.amd64 2006-03-19 18:26:21.000000000 +0100
-@@ -23,6 +23,11 @@
+--- make.conf.example
++++ make.conf.example
+@@ -22,6 +22,11 @@
# Example:
#USE="X gtk gnome -alsa"
@@ -12,9 +12,9 @@
# Host and optimization settings
# ==============================
#
-@@ -33,10 +38,35 @@
- # package (and in some cases the libraries it uses) at default optimizations
- # before reporting errors to developers.
+@@ -39,10 +44,35 @@
+ # -frecord-gcc-switches, since otherwise the check could result in false
+ # positive results.
#
-# Please refer to the GCC manual for a list of possible values.
+# -mcpu=<cpu-type> means optimize code for the particular type of CPU without
@@ -50,12 +50,12 @@
# If you set a CFLAGS above, then this line will set your default C++ flags to
# the same settings.
#CXXFLAGS="${CFLAGS}"
-@@ -61,7 +91,7 @@
+@@ -76,7 +106,7 @@
# DO NOT PUT ANYTHING BUT YOUR SPECIFIC ~ARCHITECTURE IN THE LIST.
# IF YOU ARE UNSURE OF YOUR ARCH, OR THE IMPLICATIONS, DO NOT MODIFY THIS.
#
-#ACCEPT_KEYWORDS="~arch"
+#ACCEPT_KEYWORDS="~amd64"
- # Portage Directories
- # ===================
+ # ACCEPT_LICENSE is used to mask packages based on licensing restrictions.
+ # It may contain both license and group names, where group names are
diff --git a/cnf/make.conf.arm.diff b/cnf/make.conf.example.arm.diff
index e6924ad50..bb9356339 100644
--- a/cnf/make.conf.arm.diff
+++ b/cnf/make.conf.example.arm.diff
@@ -1,6 +1,6 @@
---- make.conf 2006-03-19 18:40:11.000000000 +0100
-+++ make.conf.arm 2006-03-19 18:26:21.000000000 +0100
-@@ -23,6 +23,19 @@
+--- make.conf.example
++++ make.conf.example
+@@ -22,6 +22,19 @@
# Example:
#USE="X gtk gnome -alsa"
@@ -20,9 +20,9 @@
# Host and optimization settings
# ==============================
#
-@@ -33,10 +46,22 @@
- # package (and in some cases the libraries it uses) at default optimizations
- # before reporting errors to developers.
+@@ -39,10 +52,22 @@
+ # -frecord-gcc-switches, since otherwise the check could result in false
+ # positive results.
#
-# Please refer to the GCC manual for a list of possible values.
-#
diff --git a/cnf/make.conf.hppa.diff b/cnf/make.conf.example.hppa.diff
index a1fa5ef7b..2d17b3794 100644
--- a/cnf/make.conf.hppa.diff
+++ b/cnf/make.conf.example.hppa.diff
@@ -1,6 +1,6 @@
---- make.conf 2006-03-19 18:40:11.000000000 +0100
-+++ make.conf.hppa 2006-03-24 18:36:24.000000000 +0100
-@@ -23,6 +23,18 @@
+--- make.conf.example
++++ make.conf.example
+@@ -22,6 +22,18 @@
# Example:
#USE="X gtk gnome -alsa"
@@ -19,9 +19,9 @@
# Host and optimization settings
# ==============================
#
-@@ -33,14 +45,38 @@
- # package (and in some cases the libraries it uses) at default optimizations
- # before reporting errors to developers.
+@@ -39,14 +51,38 @@
+ # -frecord-gcc-switches, since otherwise the check could result in false
+ # positive results.
#
-# Please refer to the GCC manual for a list of possible values.
+# -march=<cpu-type> means to take full advantage of the ABI and instructions
@@ -35,10 +35,9 @@
+#
+# Architectures types supported in gcc-3.2 and higher: 1.0, 1.1 and 2.0
+# Note that 64bit userspace is not yet implemented.
- #
--#CFLAGS="-O2 -pipe"
++#
+# Decent examples:
- #
++#
+#
+# Use this one if you have a hppa1.1
+#CFLAGS="-march=1.1 -O2 -pipe -mschedule=7100LC"
@@ -47,8 +46,9 @@
+# Note that -march=2.0 was unstable on some stations.
+# -march=1.0 will create problems too.
+#CFLAGS="-O2 -pipe -mschedule=8000"
-+#
-+#
+ #
+-#CFLAGS="-O2 -pipe"
+ #
# If you set a CFLAGS above, then this line will set your default C++ flags to
# the same settings.
#CXXFLAGS="${CFLAGS}"
@@ -60,12 +60,12 @@
# If you set a CFLAGS above, then this line will set your default FORTRAN 77
# flags to the same settings.
#FFLAGS="${CFLAGS}"
-@@ -61,7 +97,7 @@
+@@ -76,7 +112,7 @@
# DO NOT PUT ANYTHING BUT YOUR SPECIFIC ~ARCHITECTURE IN THE LIST.
# IF YOU ARE UNSURE OF YOUR ARCH, OR THE IMPLICATIONS, DO NOT MODIFY THIS.
#
-#ACCEPT_KEYWORDS="~arch"
+#ACCEPT_KEYWORDS="~hppa"
- # Portage Directories
- # ===================
+ # ACCEPT_LICENSE is used to mask packages based on licensing restrictions.
+ # It may contain both license and group names, where group names are
diff --git a/cnf/make.conf.ia64.diff b/cnf/make.conf.example.ia64.diff
index 3b144d78f..68a0cb01c 100644
--- a/cnf/make.conf.ia64.diff
+++ b/cnf/make.conf.example.ia64.diff
@@ -1,5 +1,5 @@
---- make.conf
-+++ make.conf.ia64
+--- make.conf.example
++++ make.conf.example
@@ -22,6 +22,13 @@
# Example:
#USE="X gtk gnome -alsa"
@@ -14,12 +14,12 @@
# Host and optimization settings
# ==============================
#
-@@ -75,7 +82,7 @@
+@@ -76,7 +83,7 @@
# DO NOT PUT ANYTHING BUT YOUR SPECIFIC ~ARCHITECTURE IN THE LIST.
# IF YOU ARE UNSURE OF YOUR ARCH, OR THE IMPLICATIONS, DO NOT MODIFY THIS.
#
-#ACCEPT_KEYWORDS="~arch"
+#ACCEPT_KEYWORDS="~ia64"
- # Portage Directories
- # ===================
+ # ACCEPT_LICENSE is used to mask packages based on licensing restrictions.
+ # It may contain both license and group names, where group names are
diff --git a/cnf/make.conf.m68k.diff b/cnf/make.conf.example.m68k.diff
index ac3d0748f..f96746142 100644
--- a/cnf/make.conf.m68k.diff
+++ b/cnf/make.conf.example.m68k.diff
@@ -1,6 +1,6 @@
---- make.conf
-+++ make.conf.m68k
-@@ -23,6 +23,13 @@
+--- make.conf.example
++++ make.conf.example
+@@ -22,6 +22,13 @@
# Example:
#USE="X gtk gnome -alsa"
@@ -14,7 +14,7 @@
# Host and optimization settings
# ==============================
#
-@@ -35,7 +42,7 @@
+@@ -41,7 +48,7 @@
#
# Please refer to the GCC manual for a list of possible values.
#
@@ -23,12 +23,12 @@
#
# If you set a CFLAGS above, then this line will set your default C++ flags to
# the same settings.
-@@ -61,7 +68,7 @@
+@@ -76,7 +83,7 @@
# DO NOT PUT ANYTHING BUT YOUR SPECIFIC ~ARCHITECTURE IN THE LIST.
# IF YOU ARE UNSURE OF YOUR ARCH, OR THE IMPLICATIONS, DO NOT MODIFY THIS.
#
-#ACCEPT_KEYWORDS="~arch"
+#ACCEPT_KEYWORDS="~m68k"
- # Portage Directories
- # ===================
+ # ACCEPT_LICENSE is used to mask packages based on licensing restrictions.
+ # It may contain both license and group names, where group names are
diff --git a/cnf/make.conf.mips.diff b/cnf/make.conf.example.mips.diff
index 1ee10ec7b..7d3d83de5 100644
--- a/cnf/make.conf.mips.diff
+++ b/cnf/make.conf.example.mips.diff
@@ -1,6 +1,6 @@
---- make.conf 2006-03-19 18:40:11.000000000 +0100
-+++ make.conf.mips 2006-03-19 18:26:21.000000000 +0100
-@@ -23,6 +23,13 @@
+--- make.conf.example
++++ make.conf.example
+@@ -22,6 +22,13 @@
# Example:
#USE="X gtk gnome -alsa"
@@ -14,9 +14,9 @@
# Host and optimization settings
# ==============================
#
-@@ -33,10 +40,15 @@
- # package (and in some cases the libraries it uses) at default optimizations
- # before reporting errors to developers.
+@@ -39,10 +46,15 @@
+ # -frecord-gcc-switches, since otherwise the check could result in false
+ # positive results.
#
-# Please refer to the GCC manual for a list of possible values.
+# -mcpu=<cpu-type> for MIPS systems selects the type of processor you want
@@ -32,12 +32,12 @@
# If you set a CFLAGS above, then this line will set your default C++ flags to
# the same settings.
#CXXFLAGS="${CFLAGS}"
-@@ -61,7 +73,7 @@
+@@ -76,7 +88,7 @@
# DO NOT PUT ANYTHING BUT YOUR SPECIFIC ~ARCHITECTURE IN THE LIST.
# IF YOU ARE UNSURE OF YOUR ARCH, OR THE IMPLICATIONS, DO NOT MODIFY THIS.
#
-#ACCEPT_KEYWORDS="~arch"
+#ACCEPT_KEYWORDS="~mips"
- # Portage Directories
- # ===================
+ # ACCEPT_LICENSE is used to mask packages based on licensing restrictions.
+ # It may contain both license and group names, where group names are
diff --git a/cnf/make.conf.ppc.diff b/cnf/make.conf.example.ppc.diff
index 76a97d3c2..b34de8e66 100644
--- a/cnf/make.conf.ppc.diff
+++ b/cnf/make.conf.example.ppc.diff
@@ -1,6 +1,6 @@
---- make.conf 2006-03-19 18:40:11.000000000 +0100
-+++ make.conf.ppc 2006-03-19 18:26:21.000000000 +0100
-@@ -23,6 +23,13 @@
+--- make.conf.example
++++ make.conf.example
+@@ -22,6 +22,13 @@
# Example:
#USE="X gtk gnome -alsa"
@@ -14,9 +14,9 @@
# Host and optimization settings
# ==============================
#
-@@ -33,10 +40,56 @@
- # package (and in some cases the libraries it uses) at default optimizations
- # before reporting errors to developers.
+@@ -39,10 +46,56 @@
+ # -frecord-gcc-switches, since otherwise the check could result in false
+ # positive results.
#
-# Please refer to the GCC manual for a list of possible values.
+# -mcpu=<cpu-type> for PowerPC systems selects the type of processor you want
@@ -44,13 +44,13 @@
+#
+# Long term testing has shown that -O3 opts can be unreliable on G4's but work
+# on G3 series processors or earlier.
-+#
-+# The following is the suggested CFLAGS for a generic G4 cpu
-+#
-+#CFLAGS="-O2 -pipe -mcpu=G4 -maltivec -mabi=altivec -fno-strict-aliasing"
#
-#CFLAGS="-O2 -pipe"
++# The following is the suggested CFLAGS for a generic G4 cpu
#
++#CFLAGS="-O2 -pipe -mcpu=G4 -maltivec -mabi=altivec -fno-strict-aliasing"
++#
++#
+# All non G4 PPC boxen should choose this next option. It will work fine for
+# all G3 and pre machines. (note it will not hurt pre G3 machines either to
+# use this mcpu option as it is the default for gcc 3.2.x anyway)
@@ -73,7 +73,7 @@
# If you set a CFLAGS above, then this line will set your default C++ flags to
# the same settings.
#CXXFLAGS="${CFLAGS}"
-@@ -61,7 +114,10 @@
+@@ -76,7 +129,10 @@
# DO NOT PUT ANYTHING BUT YOUR SPECIFIC ~ARCHITECTURE IN THE LIST.
# IF YOU ARE UNSURE OF YOUR ARCH, OR THE IMPLICATIONS, DO NOT MODIFY THIS.
#
@@ -83,5 +83,5 @@
+#
+#ACCEPT_KEYWORDS="~ppc"
- # Portage Directories
- # ===================
+ # ACCEPT_LICENSE is used to mask packages based on licensing restrictions.
+ # It may contain both license and group names, where group names are
diff --git a/cnf/make.conf.ppc64.diff b/cnf/make.conf.example.ppc64.diff
index 2aaec01f0..961508e8f 100644
--- a/cnf/make.conf.ppc64.diff
+++ b/cnf/make.conf.example.ppc64.diff
@@ -1,6 +1,6 @@
---- make.conf 2006-03-19 18:40:11.000000000 +0100
-+++ make.conf.ppc64 2006-03-19 18:26:21.000000000 +0100
-@@ -23,6 +23,13 @@
+--- make.conf.example
++++ make.conf.example
+@@ -22,6 +22,13 @@
# Example:
#USE="X gtk gnome -alsa"
@@ -14,9 +14,9 @@
# Host and optimization settings
# ==============================
#
-@@ -33,9 +40,38 @@
- # package (and in some cases the libraries it uses) at default optimizations
- # before reporting errors to developers.
+@@ -39,9 +46,38 @@
+ # -frecord-gcc-switches, since otherwise the check could result in false
+ # positive results.
#
-# Please refer to the GCC manual for a list of possible values.
+# -mtune=<cpu-type> for PowerPC64 systems instructs the gcc compiler that
@@ -33,15 +33,15 @@
+# 970 (aka G5), and power5.
+#
+# RS64 processors should specify power3.
-+#
+ #
+-#CFLAGS="-O2 -pipe"
+# Additional options of interest:
+#
+# -maltivec enables optional altivec support and should be used
+# only for 970 processors. It also requires that you have
+# the alitvec option compiled into your kernel to take full advantage of this
+# feature. Note: you should also include -mabi=altivec flag if using this option.
- #
--#CFLAGS="-O2 -pipe"
++#
+# -O3 for the most part seems ok but should be used with caution as
+# for instance app-editors/vim has problems if it is used. -O2 is a
+# good selection.
@@ -55,7 +55,7 @@
#
# If you set a CFLAGS above, then this line will set your default C++ flags to
# the same settings.
-@@ -61,7 +97,10 @@
+@@ -76,7 +112,10 @@
# DO NOT PUT ANYTHING BUT YOUR SPECIFIC ~ARCHITECTURE IN THE LIST.
# IF YOU ARE UNSURE OF YOUR ARCH, OR THE IMPLICATIONS, DO NOT MODIFY THIS.
#
@@ -65,5 +65,5 @@
+#
+#ACCEPT_KEYWORDS="ppc64"
- # Portage Directories
- # ===================
+ # ACCEPT_LICENSE is used to mask packages based on licensing restrictions.
+ # It may contain both license and group names, where group names are
diff --git a/cnf/make.conf.s390.diff b/cnf/make.conf.example.s390.diff
index 2acb085ba..c78076236 100644
--- a/cnf/make.conf.s390.diff
+++ b/cnf/make.conf.example.s390.diff
@@ -1,5 +1,5 @@
---- make.conf
-+++ make.conf.s390
+--- make.conf.example
++++ make.conf.example
@@ -22,6 +22,13 @@
# Example:
#USE="X gtk gnome -alsa"
@@ -14,12 +14,12 @@
# Host and optimization settings
# ==============================
#
-@@ -75,7 +82,7 @@
+@@ -76,7 +83,7 @@
# DO NOT PUT ANYTHING BUT YOUR SPECIFIC ~ARCHITECTURE IN THE LIST.
# IF YOU ARE UNSURE OF YOUR ARCH, OR THE IMPLICATIONS, DO NOT MODIFY THIS.
#
-#ACCEPT_KEYWORDS="~arch"
+#ACCEPT_KEYWORDS="~s390"
- # Portage Directories
- # ===================
+ # ACCEPT_LICENSE is used to mask packages based on licensing restrictions.
+ # It may contain both license and group names, where group names are
diff --git a/cnf/make.conf.sh.diff b/cnf/make.conf.example.sh.diff
index f2784e613..9699a708b 100644
--- a/cnf/make.conf.sh.diff
+++ b/cnf/make.conf.example.sh.diff
@@ -1,6 +1,6 @@
---- make.conf 2006-03-19 18:40:11.000000000 +0100
-+++ make.conf.sh 2006-03-19 18:26:21.000000000 +0100
-@@ -23,6 +23,19 @@
+--- make.conf.example
++++ make.conf.example
+@@ -22,6 +22,19 @@
# Example:
#USE="X gtk gnome -alsa"
@@ -20,20 +20,19 @@
# Host and optimization settings
# ==============================
#
-@@ -33,10 +46,15 @@
- # package (and in some cases the libraries it uses) at default optimizations
- # before reporting errors to developers.
+@@ -39,10 +52,15 @@
+ # -frecord-gcc-switches, since otherwise the check could result in false
+ # positive results.
#
-# Please refer to the GCC manual for a list of possible values.
--#
--#CFLAGS="-O2 -pipe"
+# -m# optimize code for the particular type of CPU. The number should match
+# your CHOST so if you are using "sh4-unknown-linux-gnu", you should have
+# -m4 below.
#
+-#CFLAGS="-O2 -pipe"
+# For a full listing of supported CPU models, please refer to the GCC website:
+# http://gcc.gnu.org/onlinedocs/gcc-3.3/gcc/SH-Options.html
-+#
+ #
+#CFLAGS="-m4 -O2 -pipe"
+
# If you set a CFLAGS above, then this line will set your default C++ flags to
diff --git a/cnf/make.conf.sparc-fbsd.diff b/cnf/make.conf.example.sparc-fbsd.diff
index f3d3bca8d..25e6f46c2 100644
--- a/cnf/make.conf.sparc-fbsd.diff
+++ b/cnf/make.conf.example.sparc-fbsd.diff
@@ -1,6 +1,6 @@
---- make.conf 2006-10-16 17:06:32 +0100
-+++ make.conf.sparc-fbsd 2006-10-16 17:09:22 +0100
-@@ -23,6 +23,13 @@
+--- make.conf.example
++++ make.conf.example
+@@ -22,6 +22,13 @@
# Example:
#USE="X gtk gnome -alsa"
@@ -14,12 +14,12 @@
# Host and optimization settings
# ==============================
#
-@@ -61,7 +68,7 @@
+@@ -76,7 +83,7 @@
# DO NOT PUT ANYTHING BUT YOUR SPECIFIC ~ARCHITECTURE IN THE LIST.
# IF YOU ARE UNSURE OF YOUR ARCH, OR THE IMPLICATIONS, DO NOT MODIFY THIS.
#
-#ACCEPT_KEYWORDS="~arch"
+ACCEPT_KEYWORDS="~sparc-fbsd"
- # Portage Directories
- # ===================
+ # ACCEPT_LICENSE is used to mask packages based on licensing restrictions.
+ # It may contain both license and group names, where group names are
diff --git a/cnf/make.conf.sparc.diff b/cnf/make.conf.example.sparc.diff
index e016a0b1e..c68a95aa9 100644
--- a/cnf/make.conf.sparc.diff
+++ b/cnf/make.conf.example.sparc.diff
@@ -1,6 +1,6 @@
---- make.conf 2006-03-19 18:40:11.000000000 +0100
-+++ make.conf.sparc 2006-03-19 18:26:21.000000000 +0100
-@@ -23,6 +23,15 @@
+--- make.conf.example
++++ make.conf.example
+@@ -22,6 +22,15 @@
# Example:
#USE="X gtk gnome -alsa"
@@ -16,9 +16,9 @@
# Host and optimization settings
# ==============================
#
-@@ -33,10 +42,34 @@
- # package (and in some cases the libraries it uses) at default optimizations
- # before reporting errors to developers.
+@@ -39,10 +48,34 @@
+ # -frecord-gcc-switches, since otherwise the check could result in false
+ # positive results.
#
-# Please refer to the GCC manual for a list of possible values.
-#
@@ -55,12 +55,12 @@
# If you set a CFLAGS above, then this line will set your default C++ flags to
# the same settings.
#CXXFLAGS="${CFLAGS}"
-@@ -61,7 +94,7 @@
+@@ -76,7 +109,7 @@
# DO NOT PUT ANYTHING BUT YOUR SPECIFIC ~ARCHITECTURE IN THE LIST.
# IF YOU ARE UNSURE OF YOUR ARCH, OR THE IMPLICATIONS, DO NOT MODIFY THIS.
#
-#ACCEPT_KEYWORDS="~arch"
+#ACCEPT_KEYWORDS="~sparc"
- # Portage Directories
- # ===================
+ # ACCEPT_LICENSE is used to mask packages based on licensing restrictions.
+ # It may contain both license and group names, where group names are
diff --git a/cnf/make.conf.x86-fbsd.diff b/cnf/make.conf.example.x86-fbsd.diff
index 9fec4f177..d5e02feb0 100644
--- a/cnf/make.conf.x86-fbsd.diff
+++ b/cnf/make.conf.example.x86-fbsd.diff
@@ -1,6 +1,6 @@
---- make.conf 2006-03-19 18:40:11.000000000 +0100
-+++ make.conf.x86-fbsd 2006-03-19 18:26:21.000000000 +0100
-@@ -23,6 +23,16 @@
+--- make.conf.example
++++ make.conf.example
+@@ -22,6 +22,16 @@
# Example:
#USE="X gtk gnome -alsa"
@@ -17,9 +17,9 @@
# Host and optimization settings
# ==============================
#
-@@ -33,10 +43,34 @@
- # package (and in some cases the libraries it uses) at default optimizations
- # before reporting errors to developers.
+@@ -39,10 +49,34 @@
+ # -frecord-gcc-switches, since otherwise the check could result in false
+ # positive results.
#
-# Please refer to the GCC manual for a list of possible values.
+# -mtune=<cpu-type> means optimize code for the particular type of CPU without
@@ -54,12 +54,12 @@
# If you set a CFLAGS above, then this line will set your default C++ flags to
# the same settings.
#CXXFLAGS="${CFLAGS}"
-@@ -61,7 +95,7 @@
+@@ -76,7 +110,7 @@
# DO NOT PUT ANYTHING BUT YOUR SPECIFIC ~ARCHITECTURE IN THE LIST.
# IF YOU ARE UNSURE OF YOUR ARCH, OR THE IMPLICATIONS, DO NOT MODIFY THIS.
#
-#ACCEPT_KEYWORDS="~arch"
+ACCEPT_KEYWORDS="~x86-fbsd"
- # Portage Directories
- # ===================
+ # ACCEPT_LICENSE is used to mask packages based on licensing restrictions.
+ # It may contain both license and group names, where group names are
diff --git a/cnf/make.conf.x86.diff b/cnf/make.conf.example.x86.diff
index f90845111..3247126d7 100644
--- a/cnf/make.conf.x86.diff
+++ b/cnf/make.conf.example.x86.diff
@@ -1,6 +1,6 @@
---- make.conf 2007-01-10 03:22:07.410548112 +0100
-+++ make.conf.x86 2007-01-10 03:22:13.206910362 +0100
-@@ -23,6 +23,15 @@
+--- make.conf.example
++++ make.conf.example
+@@ -22,6 +22,15 @@
# Example:
#USE="X gtk gnome -alsa"
@@ -16,9 +16,9 @@
# Host and optimization settings
# ==============================
#
-@@ -33,10 +42,65 @@
- # package (and in some cases the libraries it uses) at default optimizations
- # before reporting errors to developers.
+@@ -39,10 +48,65 @@
+ # -frecord-gcc-switches, since otherwise the check could result in false
+ # positive results.
#
-# Please refer to the GCC manual for a list of possible values.
-#
@@ -85,12 +85,12 @@
# If you set a CFLAGS above, then this line will set your default C++ flags to
# the same settings.
#CXXFLAGS="${CFLAGS}"
-@@ -61,7 +125,7 @@
+@@ -76,7 +140,7 @@
# DO NOT PUT ANYTHING BUT YOUR SPECIFIC ~ARCHITECTURE IN THE LIST.
# IF YOU ARE UNSURE OF YOUR ARCH, OR THE IMPLICATIONS, DO NOT MODIFY THIS.
#
-#ACCEPT_KEYWORDS="~arch"
+#ACCEPT_KEYWORDS="~x86"
- # Portage Directories
- # ===================
+ # ACCEPT_LICENSE is used to mask packages based on licensing restrictions.
+ # It may contain both license and group names, where group names are
diff --git a/cnf/make.globals b/cnf/make.globals
index ada91f8f0..013c5560e 100644
--- a/cnf/make.globals
+++ b/cnf/make.globals
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# System-wide defaults for the Portage system
@@ -19,24 +19,21 @@ LDFLAGS=""
FFLAGS=""
FCFLAGS=""
-# Default rsync mirror
-SYNC="rsync://rsync.gentoo.org/gentoo-portage"
-
# Default distfiles mirrors. This rotation has multiple hosts and is reliable.
# Approved by the mirror-admin team.
GENTOO_MIRRORS="http://distfiles.gentoo.org"
ACCEPT_LICENSE="* -@EULA"
ACCEPT_PROPERTIES="*"
+ACCEPT_RESTRICT="*"
-# Repository Paths
-PORTDIR=/usr/portage
-DISTDIR=${PORTDIR}/distfiles
-PKGDIR=${PORTDIR}/packages
-RPMDIR=${PORTDIR}/rpm
+# Miscellaneous paths
+DISTDIR="/usr/portage/distfiles"
+PKGDIR="/usr/portage/packages"
+RPMDIR="/usr/portage/rpm"
# Temporary build directory
-PORTAGE_TMPDIR=/var/tmp
+PORTAGE_TMPDIR="/var/tmp"
# Fetching command (3 tries, passive ftp for firewall compatibility)
FETCHCOMMAND="wget -t 3 -T 60 --passive-ftp -O \"\${DISTDIR}/\${FILE}\" \"\${URI}\""
@@ -45,17 +42,19 @@ RESUMECOMMAND="wget -c -t 3 -T 60 --passive-ftp -O \"\${DISTDIR}/\${FILE}\" \"\$
FETCHCOMMAND_RSYNC="rsync -avP \"\${URI}\" \"\${DISTDIR}/\${FILE}\""
RESUMECOMMAND_RSYNC="rsync -avP \"\${URI}\" \"\${DISTDIR}/\${FILE}\""
-FETCHCOMMAND_SSH="bash -c \"x=\\\${2#ssh://} ; host=\\\${x%%/*} ; port=\\\${host##*:} ; host=\\\${host%:*} ; [[ \\\${host} = \\\${port} ]] && port=22 ; exec rsync --rsh=\\\"ssh -p\\\${port}\\\" -avP \\\"\\\${host}:/\\\${x#*/}\\\" \\\"\\\$1\\\"\" rsync \"\${DISTDIR}/\${FILE}\" \"\${URI}\""
+# NOTE: rsync will evaluate quotes embedded inside PORTAGE_SSH_OPTS
+FETCHCOMMAND_SSH="bash -c \"x=\\\${2#ssh://} ; host=\\\${x%%/*} ; port=\\\${host##*:} ; host=\\\${host%:*} ; [[ \\\${host} = \\\${port} ]] && port=22 ; exec rsync --rsh=\\\"ssh -p\\\${port} \\\${3}\\\" -avP \\\"\\\${host}:/\\\${x#*/}\\\" \\\"\\\$1\\\"\" rsync \"\${DISTDIR}/\${FILE}\" \"\${URI}\" \"\${PORTAGE_SSH_OPTS}\""
RESUMECOMMAND_SSH=${FETCHCOMMAND_SSH}
-FETCHCOMMAND_SFTP="bash -c \"x=\\\${2#sftp://} ; host=\\\${x%%/*} ; port=\\\${host##*:} ; host=\\\${host%:*} ; [[ \\\${host} = \\\${port} ]] && port=22 ; exec sftp -P \\\${port} \\\"\\\${host}:/\\\${x#*/}\\\" \\\"\\\$1\\\"\" sftp \"\${DISTDIR}/\${FILE}\" \"\${URI}\""
+# NOTE: bash eval is used to evaluate quotes embedded inside PORTAGE_SSH_OPTS
+FETCHCOMMAND_SFTP="bash -c \"x=\\\${2#sftp://} ; host=\\\${x%%/*} ; port=\\\${host##*:} ; host=\\\${host%:*} ; [[ \\\${host} = \\\${port} ]] && port=22 ; eval \\\"declare -a ssh_opts=(\\\${3})\\\" ; exec sftp -P \\\${port} \\\"\\\${ssh_opts[@]}\\\" \\\"\\\${host}:/\\\${x#*/}\\\" \\\"\\\$1\\\"\" sftp \"\${DISTDIR}/\${FILE}\" \"\${URI}\" \"\${PORTAGE_SSH_OPTS}\""
# Default user options
FEATURES="assume-digests binpkg-logs
config-protect-if-modified distlocks ebuild-locks
- fixlafiles news parallel-fetch parse-eapi-ebuild-head protect-owned
+ fixlafiles merge-sync news parallel-fetch preserve-libs protect-owned
sandbox sfperms strict unknown-features-warn unmerge-logs
- unmerge-orphans userfetch"
+ unmerge-orphans userfetch userpriv usersandbox usersync"
# Ignore file collisions in /lib/modules since files inside this directory
# are never unmerged, and therefore collisions must be ignored in order for
@@ -63,16 +62,9 @@ FEATURES="assume-digests binpkg-logs
# Ignore file collisions for unowned *.pyo and *.pyc files, this helps during
# transition from compiling python modules in live file system to compiling
# them in src_install() function.
-COLLISION_IGNORE="/lib/modules/* *.py[co]"
+COLLISION_IGNORE="/lib/modules/* *.py[co] *\$py.class"
UNINSTALL_IGNORE="/lib/modules/*"
-# Enable preserve-libs for testing with portage versions that support it.
-# This setting is commented out for portage versions that don't support it.
-FEATURES="${FEATURES} preserve-libs"
-
-# Default chunksize for binhost comms
-PORTAGE_BINHOST_CHUNKSIZE="3000"
-
# By default wait 5 secs before cleaning a package
CLEAN_DELAY="5"
@@ -100,7 +92,7 @@ PORTAGE_RSYNC_RETRIES="-1"
# Number of seconds rsync will wait before timing out.
#RSYNC_TIMEOUT="180"
-PORTAGE_RSYNC_OPTS="--recursive --links --safe-links --perms --times --compress --force --whole-file --delete --stats --human-readable --timeout=180 --exclude=/distfiles --exclude=/local --exclude=/packages"
+PORTAGE_RSYNC_OPTS="--recursive --links --safe-links --perms --times --omit-dir-times --compress --force --whole-file --delete --stats --human-readable --timeout=180 --exclude=/distfiles --exclude=/local --exclude=/packages"
# The number of days after the last `emerge --sync` that a warning
# message should be produced.
@@ -121,7 +113,7 @@ PORTAGE_WORKDIR_MODE="0700"
# Some defaults for elog
PORTAGE_ELOG_CLASSES="log warn error"
-PORTAGE_ELOG_SYSTEM="save_summary echo"
+PORTAGE_ELOG_SYSTEM="save_summary:log,warn,error,qa echo"
PORTAGE_ELOG_MAILURI="root"
PORTAGE_ELOG_MAILSUBJECT="[portage] ebuild log for \${PACKAGE} on \${HOST}"
@@ -130,6 +122,10 @@ PORTAGE_ELOG_MAILFROM="portage@localhost"
# Signing command used by repoman
PORTAGE_GPG_SIGNING_COMMAND="gpg --sign --digest-algo SHA256 --clearsign --yes --default-key \"\${PORTAGE_GPG_KEY}\" --homedir \"\${PORTAGE_GPG_DIR}\" \"\${FILE}\""
+# Security labels are special, see bug #461868.
+# system.nfs4_acl attributes are irrelevant, see bug #475496.
+PORTAGE_XATTR_EXCLUDE="security.* system.nfs4_acl"
+
# *****************************
# ** DO NOT EDIT THIS FILE **
# ***************************************************
diff --git a/cnf/metadata.dtd b/cnf/metadata.dtd
index d97642a72..ff2649cad 100644
--- a/cnf/metadata.dtd
+++ b/cnf/metadata.dtd
@@ -5,7 +5,7 @@
<!ATTLIST catmetadata pkgname CDATA "">
<!-- Metadata for a package -->
-<!ELEMENT pkgmetadata ( (herd|maintainer|longdescription|use|upstream)* )>
+<!ELEMENT pkgmetadata ( (herd|maintainer|natural-name|longdescription|use|upstream)* )>
<!ATTLIST pkgmetadata pkgname CDATA "">
<!-- One tag for each herd this package is assigned to. -->
@@ -14,6 +14,9 @@
<!-- One tag for each maintainer of a package, multiple allowed-->
<!ELEMENT maintainer ( email, (description| name)* )>
+ <!-- Natural name for package, example: LibreOffice (for app-office/libreoffice) -->
+ <!ELEMENT natural-name (#PCDATA) >
+
<!-- A long description of the package in freetext-->
<!ELEMENT longdescription (#PCDATA|pkg|cat)* >
@@ -61,7 +64,7 @@
<!ELEMENT bugs-to (#PCDATA)>
<!-- specify a type of package identification tracker -->
<!ELEMENT remote-id (#PCDATA)>
- <!ATTLIST remote-id type (freshmeat|sourceforge|sourceforge-jp|cpan|vim|google-code|ctan|pypi|rubyforge|cran) #REQUIRED>
+ <!ATTLIST remote-id type (bitbucket|cpan|cpan-module|cpe|cran|ctan|freecode|freshmeat|github|gitorious|google-code|launchpad|pear|pecl|pypi|rubyforge|rubygems|sourceforge|sourceforge-jp|vim) #REQUIRED>
<!-- category/package information for cross-linking in descriptions
and useflag descriptions -->
diff --git a/cnf/repos.conf b/cnf/repos.conf
new file mode 100644
index 000000000..8c657daae
--- /dev/null
+++ b/cnf/repos.conf
@@ -0,0 +1,7 @@
+[DEFAULT]
+main-repo = gentoo
+
+[gentoo]
+location = /usr/portage
+sync-type = rsync
+sync-uri = rsync://rsync.gentoo.org/gentoo-portage
diff --git a/cnf/sets/portage.conf b/cnf/sets/portage.conf
index c5c787bb5..b73afb19f 100644
--- a/cnf/sets/portage.conf
+++ b/cnf/sets/portage.conf
@@ -51,7 +51,7 @@ class = portage.sets.libs.PreservedLibraryConsumerSet
[live-rebuild]
class = portage.sets.dbapi.VariableSet
variable = INHERITED
-includes = bzr cvs darcs git git-2 mercurial subversion tla
+includes = bzr cvs darcs git git-2 git-r3 mercurial subversion tla
# Installed packages that own files inside /lib/modules.
[module-rebuild]
diff --git a/doc/config/sets.docbook b/doc/config/sets.docbook
index f7eea7766..8f7441250 100644
--- a/doc/config/sets.docbook
+++ b/doc/config/sets.docbook
@@ -17,9 +17,8 @@
<varname>system</varname> or <varname>security</varname>.
<!-- TODO: Add reference to currently non-existing documentation about
set usage and default sets -->
- After that it will read repository specific configurations from
- <envar>PORTDIR</envar> and <envar>PORTDIR_OVERLAY</envar> that might
- include definitions of sets included in the repository.
+ After that it will read configurations located in repositories
+ configured in <filename>repos.conf</filename>.
Finally a system-specific set configuration may reside in
<filename>/etc/portage</filename> to either define additional sets or
alter the default and repository sets.
diff --git a/doc/package/ebuild.docbook b/doc/package/ebuild.docbook
index ba146ca99..c3b6caca3 100644
--- a/doc/package/ebuild.docbook
+++ b/doc/package/ebuild.docbook
@@ -11,5 +11,8 @@
&package_ebuild_eapi_4;
&package_ebuild_eapi_4_python;
&package_ebuild_eapi_4_slot_abi;
+&package_ebuild_eapi_5;
+&package_ebuild_eapi_5_progress;
+&package_ebuild_eapi_5_hdepend;
</section>
</chapter>
diff --git a/doc/package/ebuild/eapi/4-python.docbook b/doc/package/ebuild/eapi/4-python.docbook
index ec5fd83c4..a61ac0503 100644
--- a/doc/package/ebuild/eapi/4-python.docbook
+++ b/doc/package/ebuild/eapi/4-python.docbook
@@ -19,7 +19,6 @@
<listitem><para>docompress</para></listitem>
<listitem><para>exeopts</para></listitem>
<listitem><para>insopts</para></listitem>
- <listitem><para>keepdir</para></listitem>
<listitem><para>libopts</para></listitem>
<listitem><para>use</para></listitem>
<listitem><para>use_enable</para></listitem>
@@ -97,7 +96,7 @@
<section id='package-ebuild-eapi-4-python-repo-level-config'>
<title>Extended Repository-Level Configuration</title>
<para>
- Repository-level configuration in ${repository}/profiles is supported for the following files:
+ Repository-level configuration in ${repository_path}/profiles is supported for the following files:
<itemizedlist>
<listitem><para>make.defaults</para></listitem>
<listitem><para>package.use</para></listitem>
@@ -107,8 +106,11 @@
<listitem><para>use.mask</para></listitem>
</itemizedlist>
</para>
+ </section>
+ <section id='package-ebuild-eapi-4-python-directories'>
+ <title>Directories Allowed for Profile-Level and Repository-Level Configuration</title>
<para>
- By default, the following files in ${repository}/profiles can be also directories:
+ The following files can be directories:
<itemizedlist>
<listitem><para>package.mask</para></listitem>
<listitem><para>package.use</para></listitem>
@@ -119,4 +121,40 @@
</itemizedlist>
</para>
</section>
+ <section id='package-ebuild-eapi-4-python-use-aliases'>
+ <title>USE Flag Aliases</title>
+ <para>
+ USE flag aliases are supported to allow to satisfy dependencies of packages from other repositories, which require differently named USE flags. USE flag aliases are defined in ${repository_path}/profiles/use.aliases and ${repository_path}/profiles/package.use.aliases files.
+ </para>
+ <table><title>use.aliases Example</title>
+ <tgroup cols='1' align='left'>
+ <tbody>
+ <row>
+ <entry>real_flag1 alias1 alias2</entry>
+ </row>
+ <row>
+ <entry>real_flag2 alias3 alias4</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+ <table><title>package.use.aliases Example</title>
+ <tgroup cols='1' align='left'>
+ <tbody>
+ <row>
+ <entry>category/package1 real_flag1 alias1 alias2</entry>
+ </row>
+ <row>
+ <entry>category/package1 real_flag2 alias3 alias4</entry>
+ </row>
+ <row>
+ <entry>=category/package2-1* real_flag3 alias5 alias6</entry>
+ </row>
+ <row>
+ <entry>=category/package2-2* real_flag4 alias5 alias6</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+ </section>
</section>
diff --git a/doc/package/ebuild/eapi/4-slot-abi.docbook b/doc/package/ebuild/eapi/4-slot-abi.docbook
index 696d0bf74..08e2cef75 100644
--- a/doc/package/ebuild/eapi/4-slot-abi.docbook
+++ b/doc/package/ebuild/eapi/4-slot-abi.docbook
@@ -28,12 +28,12 @@ Refer to the
</para>
</section>
<section id='package-ebuild-eapi-4-slot-abi-metadata-dependency-atom-slot-abi-equal-operator'>
-<title>Dependency Atom SLOT/ABI := Operator</title>
+<title>Dependency Atom slot/sub-slot := Operator</title>
<para>
-Dependency atom syntax now supports SLOT/ABI := operators which allow the
-specific SLOT/ABI that a package is built against to be recorded, so that it's
+Dependency atom syntax now supports slot/sub-slot := operators which allow the
+specific slot/sub-slot that a package is built against to be recorded, so that it's
possible to automatically determine when a package needs to be rebuilt due to
-having a dependency upgraded to a different SLOT/ABI.
+having a dependency upgraded to a different slot/sub-slot.
</para>
<para>
For example, if a package is built
@@ -50,13 +50,13 @@ not contain a sub-slot part, the sub-slot is considered to be implicitly equal
to "4.8".
</para>
<para>
-When dependencies are rewritten as described above, the SLOT/ABI recorded in
+When dependencies are rewritten as described above, the slot/sub-slot recorded in
the atom is always equal to that of the highest matched version that is
installed at build time.
</para>
</section>
<section id='package-ebuild-eapi-4-slot-abi-metadata-dependency-atom-slot-abi-asterisk-operator'>
-<title>Dependency Atom SLOT/ABI :* Operator</title>
+<title>Dependency Atom slot/sub-slot :* Operator</title>
<para>
The new :* operator is used to express dependencies that can change versions
at runtime without requiring reverse dependencies to be rebuilt. For example,
diff --git a/doc/package/ebuild/eapi/5-hdepend.docbook b/doc/package/ebuild/eapi/5-hdepend.docbook
new file mode 100644
index 000000000..0f568bcdd
--- /dev/null
+++ b/doc/package/ebuild/eapi/5-hdepend.docbook
@@ -0,0 +1,32 @@
+<section id='package-ebuild-eapi-5-hdepend'>
+<title>EAPI 5-hdepend</title>
+<section id='package-ebuild-eapi-5-hdepend-metadata'>
+<title>Metadata</title>
+<section id='package-ebuild-eapi-5-hdepend-metadata-dependencies'>
+<title>Dependencies</title>
+<section id='package-ebuild-eapi-5-hdepend-metadata-dependencies-hdepend'>
+<title>HDEPEND</title>
+<para>
+The HDEPEND variable is used to represent build-time host dependencies. For
+build-time target dependencies, use DEPEND (if the host is the target then both
+HDEPEND and DEPEND will be installed on it). For EAPIs that support HDEPEND,
+the emerge --root-deps option will have no effect, since it is not needed
+when build-time dependencies are correctly specified with HDEPEND and DEPEND.
+If ebuilds using EAPIs which <emphasis>do not</emphasis> support HDEPEND are
+built in the same emerge run as those using EAPIs which <emphasis>do</emphasis>
+support HDEPEND, the emerge --root-deps option will only apply to the former.
+</para>
+</section>
+<section id='package-ebuild-eapi-5-hdepend-metadata-dependencies-targetroot'>
+<title>Special "targetroot" USE flag</title>
+<para>
+The special "targetroot" USE flag will be automatically enabled for packages
+that are built for installation into a target ROOT, and will otherwise be
+automatically disabled. This flag may be used to control conditional
+dependencies, and ebuilds that use this flag need to add it to IUSE unless it
+happens to be included in the profile's IUSE_IMPLICIT variable.
+</para>
+</section>
+</section>
+</section>
+</section>
diff --git a/doc/package/ebuild/eapi/5-progress.docbook b/doc/package/ebuild/eapi/5-progress.docbook
new file mode 100644
index 000000000..6493d7e81
--- /dev/null
+++ b/doc/package/ebuild/eapi/5-progress.docbook
@@ -0,0 +1,247 @@
+<section id='package-ebuild-eapi-5-progress'>
+ <title>EAPI 5-progress</title>
+ <para>
+ Also see the <ulink url="http://people.apache.org/~Arfrever/EAPI_5-progress_Specification">official EAPI 5-progress Specification</ulink>.
+ </para>
+ <section id='package-ebuild-eapi-5-progress-helpers'>
+ <title>Helpers</title>
+ <section id='package-ebuild-eapi-5-progress-helpers-master-repositories'>
+ <title>master_repositories</title>
+ <para>
+ New master_repositories function prints space-separated list of master repositories for specified repository.
+ </para>
+ </section>
+ <section id='package-ebuild-eapi-5-progress-helpers-repository-path'>
+ <title>repository_path</title>
+ <para>
+ New repository_path function prints path to specified repository.
+ </para>
+ </section>
+ <section id='package-ebuild-eapi-5-progress-helpers-available-eclasses'>
+ <title>available_eclasses</title>
+ <para>
+ New available_eclasses function prints space-separated list of available eclasses for current repository.
+ </para>
+ </section>
+ <section id='package-ebuild-eapi-5-progress-helpers-eclass-path'>
+ <title>eclass_path</title>
+ <para>
+ New eclass_path function prints path to specified eclass for current repository.
+ </para>
+ </section>
+ <section id='package-ebuild-eapi-5-progress-helpers-license-path'>
+ <title>license_path</title>
+ <para>
+ New license_path function prints path to specified license for current repository.
+ </para>
+ </section>
+ <section id='package-ebuild-eapi-5-progress-helpers-package-manager-build-user'>
+ <title>package_manager_build_user</title>
+ <para>
+ New package_manager_build_user function prints name of user used by package manager in build phases.
+ </para>
+ </section>
+ <section id='package-ebuild-eapi-5-progress-helpers-package-manager-build-group'>
+ <title>package_manager_build_group</title>
+ <para>
+ New package_manager_build_group function prints name of group used by package manager in build phases.
+ </para>
+ </section>
+ <section id='package-ebuild-eapi-5-progress-helpers-dohtml-extended-default-list-of-extensions'>
+ <title>Extended default list of extensions in dohtml</title>
+ <para>
+ dohtml by default additionally installs files with .ico, .svg, .xhtml and .xml extensions.
+ </para>
+ </section>
+ <section id='package-ebuild-eapi-5-progress-helpers-unpack-case-insensitive'>
+ <title>Case-insensitive matching of extensions in unpack</title>
+ <para>
+ unpack matches extensions case-insensitively.
+ </para>
+ </section>
+ <section id='package-ebuild-eapi-5-progress-helpers-banned-in-global-scope'>
+ <title>Helpers Banned in Global Scope</title>
+ <para>
+ <itemizedlist>
+ <listitem><para>diropts</para></listitem>
+ <listitem><para>docompress</para></listitem>
+ <listitem><para>exeopts</para></listitem>
+ <listitem><para>insopts</para></listitem>
+ <listitem><para>libopts</para></listitem>
+ <listitem><para>use</para></listitem>
+ <listitem><para>use_enable</para></listitem>
+ <listitem><para>use_with</para></listitem>
+ <listitem><para>useq</para></listitem>
+ <listitem><para>usev</para></listitem>
+ <listitem><para>usex</para></listitem>
+ </itemizedlist>
+ </para>
+ </section>
+ </section>
+ <section id='package-ebuild-eapi-5-progress-metadata'>
+ <title>Metadata</title>
+ <section id='package-ebuild-eapi-5-progress-metadata-package-names-allow-period-characters'>
+ <title>Support for Period Characters in Package Names</title>
+ <para>
+ The "." character is allowed in package names.
+ </para>
+ </section>
+ <section id='package-ebuild-eapi-5-progress-metadata-use-flags-allow-period-characters'>
+ <title>Support for Period Characters in USE Flags</title>
+ <para>
+ The "." character is allowed in USE flags.
+ </para>
+ </section>
+ <section id='package-ebuild-eapi-5-progress-metadata-repository-dependencies'>
+ <title>Repository Dependencies</title>
+ <para>
+ Repository dependencies are supported in atoms in DEPEND, PDEPEND and RDEPEND and atoms passed to best_version and has_version functions.
+ Repository dependency is specified by two colons followed by repository name.
+ </para>
+ <table><title>Repository Dependency Examples</title>
+ <tgroup cols='1' align='left'>
+ <colspec colname='atom'/>
+ <thead>
+ <row>
+ <entry>Atom</entry>
+ </row>
+ </thead>
+ <tbody>
+ <row>
+ <entry>dev-lang/python::progress</entry>
+ </row>
+ <row>
+ <entry>&gt;=dev-lang/python-3.2::progress</entry>
+ </row>
+ <row>
+ <entry>dev-lang/python:3.2::progress</entry>
+ </row>
+ <row>
+ <entry>dev-lang/python::progress[xml]</entry>
+ </row>
+ <row>
+ <entry>dev-lang/python:3.2::progress[xml]</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+ </section>
+ <section id='package-ebuild-eapi-5-progress-metadata-automatic-unpack-dependencies'>
+ <title>Automatic Unpack Dependencies</title>
+ <para>
+ Dependencies on packages required to unpack archives specified in SRC_URI are automatically appended to DEPEND. These dependencies are calculated from filename extensions of archives specified in SRC_URI. Dependencies (for ebuilds using given EAPI) corresponding to given filename extensions are configured in ${repository_path}/profiles/unpack_dependencies/${EAPI} files.
+ </para>
+ <table><title>Unpack Dependencies Configuration Examples</title>
+ <tgroup cols='1' align='left'>
+ <tbody>
+ <row>
+ <entry>bz2 app-arch/bzip2</entry>
+ </row>
+ <row>
+ <entry>gz app-arch/gzip</entry>
+ </row>
+ <row>
+ <entry>tar app-arch/tar</entry>
+ </row>
+ <row>
+ <entry>tar.bz2 app-arch/tar app-arch/bzip2</entry>
+ </row>
+ <row>
+ <entry>tar.gz app-arch/tar app-arch/gzip</entry>
+ </row>
+ <row>
+ <entry>zip app-arch/unzip</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+ </section>
+ </section>
+ <section id='package-ebuild-eapi-5-progress-globstar'>
+ <title>globstar shell option enabled by default</title>
+ <para>
+ globstar shell option is enabled by default, which enables recursive expansion of ** pattern in pathname expansion context.
+ </para>
+ </section>
+ <section id='package-ebuild-eapi-5-progress-variables'>
+ <title>Variables</title>
+ <section id='package-ebuild-eapi-5-progress-variables-repository'>
+ <title>REPOSITORY Variable</title>
+ <para>
+ The new REPOSITORY variable is set in ebuild environment. This variable contains name of repository, which contains currently used ebuild.
+ </para>
+ </section>
+ </section>
+ <section id='package-ebuild-eapi-5-progress-repo-level-config'>
+ <title>Extended Repository-Level Configuration</title>
+ <para>
+ Repository-level configuration in ${repository_path}/profiles is supported for the following files:
+ <itemizedlist>
+ <listitem><para>make.defaults</para></listitem>
+ <listitem><para>package.use</para></listitem>
+ <listitem><para>package.use.force</para></listitem>
+ <listitem><para>package.use.mask</para></listitem>
+ <listitem><para>package.use.stable.force</para></listitem>
+ <listitem><para>package.use.stable.mask</para></listitem>
+ <listitem><para>use.force</para></listitem>
+ <listitem><para>use.mask</para></listitem>
+ <listitem><para>use.stable.force</para></listitem>
+ <listitem><para>use.stable.mask</para></listitem>
+ </itemizedlist>
+ </para>
+ </section>
+ <section id='package-ebuild-eapi-5-progress-directories'>
+ <title>Directories Allowed for Profile-Level and Repository-Level Configuration</title>
+ <para>
+ The following files can be directories:
+ <itemizedlist>
+ <listitem><para>package.mask</para></listitem>
+ <listitem><para>package.use</para></listitem>
+ <listitem><para>package.use.force</para></listitem>
+ <listitem><para>package.use.mask</para></listitem>
+ <listitem><para>package.use.stable.force</para></listitem>
+ <listitem><para>package.use.stable.mask</para></listitem>
+ <listitem><para>use.force</para></listitem>
+ <listitem><para>use.mask</para></listitem>
+ <listitem><para>use.stable.force</para></listitem>
+ <listitem><para>use.stable.mask</para></listitem>
+ </itemizedlist>
+ </para>
+ </section>
+ <section id='package-ebuild-eapi-5-progress-use-aliases'>
+ <title>USE Flag Aliases</title>
+ <para>
+ USE flag aliases are supported to allow to satisfy dependencies of packages from other repositories, which require differently named USE flags. USE flag aliases are defined in ${repository_path}/profiles/use.aliases and ${repository_path}/profiles/package.use.aliases files.
+ </para>
+ <table><title>use.aliases Example</title>
+ <tgroup cols='1' align='left'>
+ <tbody>
+ <row>
+ <entry>real_flag1 alias1 alias2</entry>
+ </row>
+ <row>
+ <entry>real_flag2 alias3 alias4</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+ <table><title>package.use.aliases Example</title>
+ <tgroup cols='1' align='left'>
+ <tbody>
+ <row>
+ <entry>category/package1 real_flag1 alias1 alias2</entry>
+ </row>
+ <row>
+ <entry>category/package1 real_flag2 alias3 alias4</entry>
+ </row>
+ <row>
+ <entry>=category/package2-1* real_flag3 alias5 alias6</entry>
+ </row>
+ <row>
+ <entry>=category/package2-2* real_flag4 alias5 alias6</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+ </section>
+</section>
diff --git a/doc/package/ebuild/eapi/5.docbook b/doc/package/ebuild/eapi/5.docbook
new file mode 100644
index 000000000..376262e1c
--- /dev/null
+++ b/doc/package/ebuild/eapi/5.docbook
@@ -0,0 +1,232 @@
+<section id='package-ebuild-eapi-5'>
+<title>EAPI 5</title>
+<section id='package-ebuild-eapi-5-metadata'>
+<title>Metadata</title>
+<section id='package-ebuild-eapi-5-metadata-required-use-at-most-one-of'>
+<title>REQUIRED_USE supports new at-most-one-of operator</title>
+<para>
+The new at-most-one-of operator consists of the string '??',
+and is satisfied if zero or one (but no more) of its child
+elements is matched.
+</para>
+</section>
+<section id='package-ebuild-eapi-5-metadata-slot-sub-slot'>
+<title>SLOT supports optional "sub-slot" part</title>
+<para>
+The SLOT variable may contain an optional sub-slot part that
+follows the regular slot and is delimited by a / character.
+The sub-slot must be a valid slot name. The sub-slot is used
+to represent cases in which an upgrade to a new version of a
+package with a different sub-slot may require dependent
+packages to be rebuilt. When the sub-slot part is omitted from
+the SLOT definition, the package is considered to have an
+implicit sub-slot which is equal to the regular slot.
+</para>
+<para>
+Refer to the
+<link linkend="package-ebuild-eapi-5-metadata-dependency-atom-slot-operators">
+slot operators</link> documentation for more information about sub-slot usage.
+</para>
+</section>
+<section id='package-ebuild-eapi-5-metadata-dependency-atom-slot-operators'>
+<title>Dependency atom slot operators</title>
+<para>
+A slot dependency may contain an optional sub-slot part that
+follows the regular slot and is delimited by a / character.
+An operator slot dependency consists of a colon followed by
+one of the following operators:
+<itemizedlist>
+<listitem><para>
+* Indicates that any slot value is acceptable. In addition,
+for runtime dependencies, indicates that the package will not
+break if the matched package is uninstalled and replaced by
+a different matching package in a different slot.
+</para></listitem>
+<listitem><para>
+= Indicates that any slot value is acceptable. In addition,
+for runtime dependencies, indicates that the package will
+break unless a matching package with slot and sub-slot equal
+to the slot and sub-slot of the best installed version at the
+time the package was installed is available.
+</para></listitem>
+<listitem><para>
+slot= Indicates that only a specific slot value is acceptable,
+and otherwise behaves identically to the plain equals slot
+operator.
+</para></listitem>
+</itemizedlist>
+</para>
+<para>
+To implement the equals slot operator, the package manager
+will need to store the slot/sub-slot pair of the best installed
+version of the matching package. This syntax is only for package
+manager use and must not be used by ebuilds. The package manager
+may do this by inserting the appropriate slot/sub-slot pair
+between the colon and equals sign when saving the package's
+dependencies. The sub-slot part must not be omitted here
+(when the SLOT variable omits the sub-slot part, the package
+is considered to have an implicit sub-slot which is equal to
+the regular slot).
+</para>
+</section>
+</section>
+<section id='package-ebuild-eapi-5-profile'>
+<title>Profiles</title>
+<section id='package-ebuild-eapi-5-profile-iuse-injection'>
+<title>Profile IUSE Injection</title>
+<para>
+IUSE_EFFECTIVE is a variable calculated from IUSE and
+a variety of other sources described below. It is purely
+a conceptual variable; it is not exported to the ebuild
+environment. Values in IUSE_EFFECTIVE may legally be
+used in queries about an ebuild's state (for example, for use
+dependencies, for the use function, and for use in dependency
+specification conditional blocks).
+</para>
+<para>
+For EAPIs that support profile defined IUSE injection, IUSE_EFFECTIVE
+contains the following values:
+<itemizedlist>
+<listitem><para>
+All values in the calculated IUSE value.
+</para></listitem>
+<listitem><para>
+All values in the profile IUSE_IMPLICIT variable.
+</para></listitem>
+<listitem><para>
+All values in the profile variable named USE_EXPAND_VALUES_${v},
+where ${v} is any value in the intersection of the profile
+USE_EXPAND_UNPREFIXED and USE_EXPAND_IMPLICIT variables.
+</para></listitem>
+<listitem><para>
+All values for ${lower_v}_${x}, where ${x} is all values in
+the profile variable named USE_EXPAND_VALUES_${v}, where ${v}
+is any value in the intersection of the profile USE_EXPAND and
+USE_EXPAND_IMPLICIT variables and ${lower_v} is the lowercase
+equivalent of ${v}.
+</para></listitem>
+</itemizedlist>
+</para>
+<para>
+<table><title>Example Variable Settings</title>
+<tgroup cols='2' align='left' >
+<colspec colname='source'/>
+<colspec colname='destination'/>
+<thead>
+<row>
+<entry>Variable</entry>
+<entry>Value</entry>
+</row>
+</thead>
+<tbody>
+<row>
+<entry>IUSE_IMPLICIT</entry>
+<entry>prefix selinux</entry>
+</row>
+<row>
+<entry>USE_EXPAND</entry>
+<entry>ELIBC KERNEL USERLAND</entry>
+</row>
+<row>
+<entry>USE_EXPAND_UNPREFIXED</entry>
+<entry>ARCH</entry>
+</row>
+<row>
+<entry>USE_EXPAND_IMPLICIT</entry>
+<entry>ARCH ELIBC KERNEL USERLAND</entry>
+</row>
+<row>
+<entry>USE_EXPAND_VALUES_ARCH</entry>
+<entry>amd64 ppc ppc64 x86 x86-fbsd x86-solaris</entry>
+</row>
+<row>
+<entry>USE_EXPAND_VALUES_ELIBC</entry>
+<entry>FreeBSD glibc</entry>
+</row>
+<row>
+<entry>USE_EXPAND_VALUES_KERNEL</entry>
+<entry>FreeBSD linux SunOS</entry>
+</row>
+<row>
+<entry>USE_EXPAND_VALUES_USERLAND</entry>
+<entry>BSD GNU</entry>
+</row>
+</tbody>
+</tgroup>
+</table>
+</para>
+</section>
+<section id='package-ebuild-eapi-5-profile-stable-use-masking'>
+<title>Profile stable USE forcing and masking</title>
+<para>
+In profile directories with an EAPI supporting stable masking,
+new USE configuration files are supported: use.stable.mask,
+use.stable.force, package.use.stable.mask and
+package.use.stable.force. These files behave similarly to
+previously supported USE configuration files, except that they
+only influence packages that are merged due to a stable keyword.
+</para>
+</section>
+</section>
+<section id='package-ebuild-eapi-5-helpers'>
+<title>Helpers</title>
+<section id='package-ebuild-eapi-5-helpers-econf-disable-silent-rules'>
+<title>econf adds --disable-silent-rules</title>
+<para>
+This option will automatically be passed if
+--disable-silent-rules occurs in the output of configure --help.
+</para>
+</section>
+<section id='package-ebuild-eapi-5-helpers-newfoo-stdin'>
+<title>new* commands can read from standard input</title>
+<para>
+Standard input is read when the first parameter is - (a hyphen).
+</para>
+</section>
+<section id='package-ebuild-eapi-5-helpers-foo-version-host-root'>
+<title>New option --host-root for {has,best}_version</title>
+<para>
+This option --host-root will cause the query to apply to the
+host root instead of ROOT.
+</para>
+</section>
+<section id='package-ebuild-eapi-5-helpers-doheader'>
+<title>New doheader helper function</title>
+<para>
+Installs the given header files into /usr/include/, by default
+with file mode 0644. This can be overridden by setting
+INSOPTIONS with the insopts function.
+</para>
+</section>
+<section id='package-ebuild-eapi-5-helpers-usex'>
+<title>New usex helper function</title>
+<programlisting>
+USAGE: usex &lt;USE flag&gt; [true output] [false output] [true suffix] [false suffix]
+DESCRIPTION:
+ If USE flag is set, echo [true output][true suffix] (defaults to "yes"),
+ otherwise echo [false output][false suffix] (defaults to "no").
+</programlisting>
+</section>
+</section>
+<section id='package-ebuild-eapi-5-phases'>
+<title>Phases</title>
+<section id='package-ebuild-eapi-5-phases-src-test-parallel'>
+<title>src_test supports parallel tests</title>
+<para>
+Unlike older EAPIs, the default src_test implementation will not
+pass the -j1 option to emake.
+</para>
+</section>
+</section>
+<section id='package-ebuild-eapi-5-ebuild-environment-variables'>
+<title>Ebuild Environment Variables</title>
+<section id='package-ebuild-eapi-5-ebuild-environment-variables-ebuild-phase-func'>
+<title>New EBUILD_PHASE_FUNC variable</title>
+<para>
+During execution of an ebuild phase function (such as pkg_setup
+or src_unpack), the EBUILD_PHASE_FUNC variable will contain the
+name of the phase function that is currently executing.
+</para>
+</section>
+</section>
+</section>
diff --git a/doc/portage.docbook b/doc/portage.docbook
index 781915cbb..811544f5e 100644
--- a/doc/portage.docbook
+++ b/doc/portage.docbook
@@ -22,6 +22,9 @@
<!ENTITY package_ebuild_eapi_4 SYSTEM "package/ebuild/eapi/4.docbook">
<!ENTITY package_ebuild_eapi_4_python SYSTEM "package/ebuild/eapi/4-python.docbook">
<!ENTITY package_ebuild_eapi_4_slot_abi SYSTEM "package/ebuild/eapi/4-slot-abi.docbook">
+ <!ENTITY package_ebuild_eapi_5 SYSTEM "package/ebuild/eapi/5.docbook">
+ <!ENTITY package_ebuild_eapi_5_progress SYSTEM "package/ebuild/eapi/5-progress.docbook">
+ <!ENTITY package_ebuild_eapi_5_hdepend SYSTEM "package/ebuild/eapi/5-hdepend.docbook">
<!ENTITY qa SYSTEM "qa.docbook">
<!ENTITY config SYSTEM "config.docbook">
<!ENTITY config_bashrc SYSTEM "config/bashrc.docbook">
diff --git a/doc/qa.docbook b/doc/qa.docbook
index b9ec375ec..d0986e2d3 100644
--- a/doc/qa.docbook
+++ b/doc/qa.docbook
@@ -70,7 +70,7 @@
</programlisting>
</para>
<para>
- Please see the Gentoo Hardened <ulink url="http://hardened.gentoo.org/gnu-stack.xml">GNU Stack Guide</ulink>.
+ Please see the Gentoo Hardened <ulink url="http://www.gentoo.org/proj/en/hardened/gnu-stack.xml">GNU Stack Guide</ulink>.
</para>
</sect1>
diff --git a/make.conf-repatch.sh b/make.conf-repatch.sh
deleted file mode 100644
index 6589e6b62..000000000
--- a/make.conf-repatch.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash
-
-die() {
- echo "ERROR: $*" > /dev/stderr
- patch -p0 make.conf < make.conf.diff
- exit 1
-}
-
-if [ ! -f "make.conf" -o ! -f "make.conf.x86.diff" -o ! -d ".svn" ]; then
- echo "ERROR: current directory is invalid" > /dev/stderr
- exit 1
-fi
-
-svn diff make.conf > make.conf.diff
-svn revert make.conf
-
-for x in make.conf.*.diff; do
- archs="$archs $(basename ${x:10} .diff)"
-done
-
-
-for arch in $archs; do
- echo "* Patching $arch"
- cp make.conf make.conf.$arch || die "copy failed"
- patch -p0 make.conf.$arch < make.conf.${arch}.diff > /dev/null || die "arch-patch failed"
- patch -p0 make.conf.$arch < make.conf.diff > /dev/null || die "patch failed"
-done
-
-echo "* Re-patching make.conf"
-patch -p0 make.conf < make.conf.diff > /dev/null || die "repatch failed"
-
-for arch in $archs; do
- echo "* Creating diff for $arch"
- diff -u make.conf make.conf.$arch > make.conf.${arch}.diff
- [ -z "${KEEP_ARCH_MAKE_CONF}" ] && rm -f make.conf.$arch make.conf.${arch}.orig
-done
-
-rm make.conf.diff
-
-echo "Done" \ No newline at end of file
diff --git a/make.conf.example-repatch.sh b/make.conf.example-repatch.sh
new file mode 100755
index 000000000..c97c6f20f
--- /dev/null
+++ b/make.conf.example-repatch.sh
@@ -0,0 +1,41 @@
+#!/bin/bash
+
+die() {
+ echo "ERROR: $*" > /dev/stderr
+ patch -p0 make.conf.example < make.conf.example.diff
+ exit 1
+}
+
+if [[ ! -f make.conf.example || ! -f make.conf.example.x86.diff || ! -d ../.git ]]; then
+ echo "ERROR: current directory is invalid" > /dev/stderr
+ exit 1
+fi
+
+git diff --no-prefix --relative="$(basename "$(pwd)")" make.conf.example > make.conf.example.diff
+git checkout -- make.conf.example
+
+archs=()
+for x in make.conf.example.*.diff; do
+ archs+=("$(basename ${x:18} .diff)")
+done
+
+
+for arch in "${archs[@]}"; do
+ echo "* Patching ${arch}"
+ cp make.conf.example make.conf.example.${arch} || die "copy failed"
+ patch -p0 make.conf.example.${arch} < make.conf.example.${arch}.diff > /dev/null || die "arch-patch failed"
+ patch -p0 make.conf.example.${arch} < make.conf.example.diff > /dev/null || die "patch failed"
+done
+
+echo "* Re-patching make.conf.example"
+patch -p0 make.conf.example < make.conf.example.diff > /dev/null || die "repatch failed"
+
+for arch in "${archs[@]}"; do
+ echo "* Creating diff for ${arch}"
+ diff -u make.conf.example make.conf.example.${arch} > make.conf.example.${arch}.diff
+ [[ -z ${KEEP_ARCH_MAKE_CONF_EXAMPLE} ]] && rm -f make.conf.example.${arch} make.conf.example.${arch}.orig
+done
+
+rm make.conf.example.diff
+
+echo "Done"
diff --git a/man/color.map.5 b/man/color.map.5
index ca6b17dd7..5543628f8 100644
--- a/man/color.map.5
+++ b/man/color.map.5
@@ -1,4 +1,4 @@
-.TH "COLOR.MAP" "5" "Mar 2010" "Portage VERSION" "Portage"
+.TH "COLOR.MAP" "5" "Jul 2013" "Portage VERSION" "Portage"
.SH "NAME"
color.map \- custom color settings for Portage
.SH "SYNOPSIS"
@@ -9,9 +9,11 @@ Portage will check this file first for color classes settings. If no setting
of given color class is found in /etc/portage/color.map, Portage uses default
value defined internally.
.SH "SYNTAX"
-\fBVARIABLE\fR = \fI[space delimited list of attributes or ansi code pattern]\fR
+\fBVARIABLE\fR = \fI[space delimited list of attributes or ansi code
+pattern]\fR
.TP
-\fBATTRIBUTE\fR = \fI[space delimited list of attributes or ansi code pattern]\fR
+\fBATTRIBUTE\fR = \fI[space delimited list of attributes or ansi code \
+pattern]\fR
.SH "VARIABLES"
.TP
\fBNORMAL\fR = \fI"normal"\fR
@@ -54,10 +56,12 @@ Defines color used for world packages planned to be merged.
Defines color used for packages planned to be merged using a binary package.
.TP
\fBPKG_BINARY_MERGE_SYSTEM\fR = \fI"purple"\fR
-Defines color used for system packages planned to be merged using a binary package.
+Defines color used for system packages planned to be merged using a binary
+package.
.TP
\fBPKG_BINARY_MERGE_WORLD\fR = \fI"fuchsia"\fR
-Defines color used for world packages planned to be merged using a binary package.
+Defines color used for world packages planned to be merged using a binary
+package.
.TP
\fBPKG_NOMERGE\fR = \fI"darkblue"\fR
Defines color used for packages not planned to be merged.
@@ -185,14 +189,14 @@ Defines color used for warnings.
Please report bugs via http://bugs.gentoo.org/
.SH "AUTHORS"
.nf
-Arfrever Frehtes Taifersar Arahesis <Arfrever.FTA@gmail.com>
+Arfrever Frehtes Taifersar Arahesis <arfrever@apache.org>
.fi
.SH "FILES"
.TP
.B /etc/portage/color.map
Contains variables customizing colors.
.TP
-.B /etc/make.conf
+.B /etc/portage/make.conf
Contains other variables.
.SH "SEE ALSO"
.BR console_codes (4),
diff --git a/man/dispatch-conf.1 b/man/dispatch-conf.1
index b82c2152d..3a5264a16 100644
--- a/man/dispatch-conf.1
+++ b/man/dispatch-conf.1
@@ -1,46 +1,36 @@
.TH "DISPATCH-CONF" "1" "Jan 2011" "Portage VERSION" "Portage"
-.SH NAME
-dispatch-conf \- Sanely update configuration files after emerging new packages
-.SH SYNOPSIS
-.B dispatch-conf
-.SH DESCRIPTION
-.I dispatch-conf
-is designed to be run after merging new packages in order to see if
-there are updates to the configuration files. If a new
-configuration file will overwrite an old one,
-.I dispatch-conf
-will prompt the user for a decision about how to resolve the
-discrepancy.
-Advantages of
-.I dispatch-conf
-include easy rollback (changes to config files are stored either using
-patches or rcs) and the ability to automatically update config files
-that the user has never modified or
+.SH "NAME"
+dispatch\-conf \- Sanely update configuration files after emerging new packages
+.SH "SYNOPSIS"
+.B dispatch\-conf
+.SH "DESCRIPTION"
+\fIdispatch\-conf\fR is designed to be run after merging new packages
+in order to see if there are updates to the configuration files.
+If a new configuration file will overwrite an old one, \fIdispatch\-conf\fR
+will prompt the user for a decision about how to resolve the discrepancy.
+Advantages of \fIdispatch\-conf\fR include easy rollback (changes to config
+files are stored either using patches or rcs) and the ability to
+automatically update config files that the user has never modified or
that differ from the current version only in CVS cruft or white space.
-.I dispatch-conf
-will check all directories in the \fICONFIG_PROTECT\fR variable. All
-config files found in \fICONFIG_PROTECT_MASK\fR will automatically be
-updated for you by \fIdispatch-conf\fR. See \fBmake.conf\fR(5) for more
-information.
-.SH OPTIONS
+\fIdispatch\-conf\fR will check all directories in the \fICONFIG_PROTECT\fR
+variable. All config files found in \fICONFIG_PROTECT_MASK\fR will
+automatically be updated for you by \fIdispatch\-conf\fR. See
+\fBmake.conf\fR(5) for more information.
+.SH "OPTIONS"
.TP
None.
-.SH USAGE
-.I dispatch-conf
-must be run as root, since the config files to be replaced are generally
-owned by root. Before running
-.I dispatch-conf
-for the first time the settings in
-.B /etc/dispatch-conf.conf
-should be edited and the archive directory specified in
-\fI/etc/dispatch-conf.conf\fR will need to be created. All changes to
+.SH "USAGE"
+\fIdispatch\-conf\fR must be run as root, since the config files to be
+replaced are generally owned by root. Before running \fIdispatch\-conf\fR
+for the first time the settings in \fB/etc/dispatch\-conf.conf\fR
+should be edited and the archive directory specified in
+\fB/etc/dispatch\-conf.conf\fR will need to be created. All changes to
config files will be saved in the archive directory either as patches
or using rcs, making restoration to an earlier version rather simple.
-When dispatch-conf finds a config file that has a new update the user
-is provided
-with a menu of options for how to handle the update:
+When \fIdispatch\-conf\fR finds a config file that has a new update the user
+is provided with a menu of options for how to handle the update:
.TP
.B u
Update (replace) the current config file with the new config file and continue.
@@ -53,7 +43,7 @@ Skip to the next config file, leaving both the original config file and any
\fICONFIG_PROTECT\fRed files.
.TP
.B e
-Edit the new config file, using the editor defined in \fI$EDITOR\fR.
+Edit the new config file, using the editor defined in \fIEDITOR\fR.
.TP
.B m
Interactively merge the current and new config files.
@@ -63,17 +53,14 @@ Look at the differences between the pre-merged and merged config files.
.TP
.B t
Toggle between the merged and pre-merged config files (in terms of which
-should be installed using the
-.Qt u
-command).
+should be installed using the \fBu\fR command).
.TP
.B h
Display a help screen.
.TP
.B q
-Quit
-.I dispatch-conf.
-.SH FILE MODES
+Quit \fIdispatch\-conf\fR.
+.SH "FILE MODES"
\fBWARNING:\fR When \fB/etc/dispatch\-conf.conf\fR is configured
to use \fBrcs\fR(1), read and execute permissions of archived
files may be inherited from the first check in of a working file,
@@ -85,7 +72,7 @@ to RCS files by setting the permissions of the directory
containing the files.
.SH "REPORTING BUGS"
Please report bugs via http://bugs.gentoo.org/
-.SH AUTHORS
+.SH "AUTHORS"
.nf
Jeremy Wohl
Karl Trygve Kalleberg <karltk@gentoo.org>
@@ -94,8 +81,8 @@ Grant Goodyear <g2boojum@gentoo.org>
.fi
.SH "FILES"
.TP
-.B /etc/dispatch-conf.conf
-Configuration settings for \fIdispatch-conf\fR are stored here.
+.B /etc/dispatch\-conf.conf
+Configuration settings for \fIdispatch\-conf\fR are stored here.
.SH "SEE ALSO"
.BR make.conf (5),
.BR ci (1),
diff --git a/man/ebuild.1 b/man/ebuild.1
index e74779a66..29f88b02d 100644
--- a/man/ebuild.1
+++ b/man/ebuild.1
@@ -1,4 +1,4 @@
-.TH "EBUILD" "1" "Feb 2011" "Portage VERSION" "Portage"
+.TH "EBUILD" "1" "Mar 2013" "Portage VERSION" "Portage"
.SH "NAME"
ebuild \- a low level interface to the Portage system
.SH "SYNOPSIS"
@@ -19,11 +19,15 @@ This must be a valid ebuild script. For further information read
\fBebuild\fR(5).
.SH "COMMANDS"
By default, portage will execute all the functions in order up to the
-one actually specified. For example, simply issuing the command \fBcompile\fR
-will trigger the functions before it to also be run (such as \fBsetup\fR
-and \fBunpack\fR). If you wish to only have the specified command run, then
-you should use the \fInoauto\fR option in the \fBFEATURES\fR environment
-variable. See the \fBmake.conf\fR(5) man page for more information.
+one actually specified, except for the functions that have already been
+executed in a previous invocation of ebuild. For example, simply issuing the
+command \fBcompile\fR will trigger the functions before it to also be run (such
+as \fBsetup\fR and \fBunpack\fR), unless they were run in a previous invocation
+of ebuild. If you want to make sure they are all run, you need to use
+the command \fBclean\fR first. If you wish to only have the specified command
+run, then you should use the \fInoauto\fR option in the \fBFEATURES\fR
+environment variable. See the \fBmake.conf\fR(5) man page for more
+information.
.TP
.BR help
@@ -53,13 +57,13 @@ manually clean these files with \fIrm \-rf /var/tmp/portage\fR.
.BR fetch
Checks to see if all the sources specified in SRC_URI are available in
DISTDIR (see \fBmake.conf\fR(5) for more information) and have a valid
-md5 checksum. If the sources aren't available, an attempt is made to
+checksum. If the sources aren't available, an attempt is made to
download them from the locations specified in SRC_URI. If multiple
download locations are listed for a particular file, Portage pings
each location to see which location is closer. (May not be true
presently.) The Gentoo Linux mirrors defined by GENTOO_MIRRORS is
always considered first. If for some reason the current or
-just\-downloaded sources' md5 digests don't match those recorded
+just\-downloaded sources' checksums don't match those recorded
in files/digest\-[package]\-[version\-rev], a warning is printed
and ebuild exits with an error code of 1.
.TP
@@ -74,7 +78,7 @@ for all of the files listed in SRC_URI for each ebuild. For further
information regarding the behavior of this command, see the documentation for
the \fIassume\-digests\fR value of the \fBFEATURES\fR variable in
\fBmake.conf\fR(5). See the \fB\-\-force\fR option if you would like to
-prevent digests from being assumed.
+prevent digests from being assumed.
.TP
.BR unpack
Extracts the sources to a subdirectory in the \fIbuild directory\fR
@@ -112,7 +116,7 @@ current working directory will be set to ${S}. When src_compile()
completes, the sources should be fully compiled.
.TP
.BR test
-Runs package-specific test cases to verify that everything was built
+Runs package-specific test cases to verify that everything was built
properly.
.TP
.BR preinst
@@ -135,8 +139,8 @@ shown here.
This function installs all the files in the \fIinstall directory\fR
to the live filesystem. The process works as follows: first, the
\fIpkg_preinst()\fR function (if specified) is run. Then, the files
-are merged into the live filesystem, and the installed files' md5
-digests are recorded in
+are merged into the live filesystem, and the installed files'
+checksums are recorded in
\fI/var/db/pkg/${CATEGORY}/${PN}\-${PVR}/CONTENTS\fR. After
all the files have been merged, the \fIpkg_postinst()\fR function
(if specified) is executed.
@@ -150,7 +154,7 @@ particular step doesn't complete successfully.
.TP
.BR unmerge
This function first executes the \fIpkg_prerm()\fR function (if specified).
-Then it removes all files from the live filesystem that have a valid md5
+Then it removes all files from the live filesystem that have a valid
checksum and mtime in the package contents file. Any empty directories
are recursively removed. Finally, it runs \fIpkg_postrm()\fR function (if
specified). It is safe to merge a new version of a package first and
@@ -179,7 +183,7 @@ tarball is created and stored in \fBPKGDIR\fR (see \fBmake.conf\fR(5)).
Builds a RedHat RPM package from the files in the temporary
\fIinstall directory\fR. At the moment, the ebuild's dependency
information is not incorporated into the RPM.
-.SH OPTIONS
+.SH "OPTIONS"
.TP
.BR "\-\-debug"
Run bash with the \-x option, causing it to output verbose debugging
@@ -212,7 +216,7 @@ Mike Frysinger <vapier@gentoo.org>
.fi
.SH "FILES"
.TP
-.B /etc/make.conf
+.B /etc/portage/make.conf
Contains variables for the build\-process and overwrites those
in make.globals.
.TP
@@ -224,6 +228,6 @@ Contains variables customizing colors.
.BR make.conf (5),
.BR color.map (5)
.TP
-The \fI/usr/sbin/ebuild.sh\fR script.
+The \fI/usr/lib/portage/bin/ebuild.sh\fR script.
.TP
The helper apps in \fI/usr/lib/portage/bin\fR.
diff --git a/man/ebuild.5 b/man/ebuild.5
index e9e718e0d..89bd6a275 100644
--- a/man/ebuild.5
+++ b/man/ebuild.5
@@ -1,57 +1,373 @@
-.TH "EBUILD" "5" "Dec 2011" "Portage VERSION" "Portage"
+.TH "EBUILD" "5" "Jan 2014" "Portage VERSION" "Portage"
+
.SH "NAME"
ebuild \- the internal format, variables, and functions in an ebuild script
+
.SH "DESCRIPTION"
-The
-.BR ebuild (1)
-program accepts a single ebuild script as an argument. This script
-contains variables and commands that specify how to download, unpack,
-patch, compile, install and merge a particular software package from
-its original sources. In addition to all of this, the ebuild script
-can also contain pre/post install/remove commands, as required. All
-ebuild scripts are written in bash.
-.SH "EXAMPLES"
-Here's a simple example ebuild:
+The \fBebuild\fR(1) program accepts a single ebuild script as an argument.
+This script contains variables and commands that specify how to download,
+unpack, patch, compile, install and merge a particular software package from
+its original sources. In addition to all of this, the ebuild script can also
+contain pre/post install/remove commands, as required. All ebuild scripts are
+written in bash.
-.DS
+.SS "Dependencies"
+A \fIdepend atom\fR is simply a dependency that is used by portage when
+calculating relationships between packages. Please note that if the atom has
+not already been emerged, then the latest version available is matched.
+.TP
+.B Atom Bases
+The base atom is just a full category/packagename.
+
+Examples:
.nf
-# Copyright 1999\-2009 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-# $Header: $
+.I sys\-apps/sed
+.I sys\-libs/zlib
+.I net\-misc/dhcp
+.fi
+.TP
+.B Atom Versions
+It is nice to be more specific and say that only certain versions of atoms are
+acceptable. Note that versions must be combined with a prefix (see below).
+Hence you may add a version number as a postfix to the base.
-EAPI="4"
+Examples:
+.nf
+ sys\-apps/sed\fI\-4.0.5\fR
+ sys\-libs/zlib\fI\-1.1.4\-r1\fR
+ net\-misc/dhcp\fI\-3.0_p2\fR
+.fi
-inherit some_eclass another_eclass
+Versions are normally made up of two or three numbers separated by periods,
+such as 1.2 or 4.5.2. This string may be followed by a character such as 1.2a
+or 4.5.2z. Note that this letter is \fInot\fR meant to indicate alpha, beta,
+etc... status. For that, use the optional suffix; either _alpha, _beta, _pre
+(pre\-release), _rc (release candidate), or _p (patch). This means for the
+3rd pre\-release of a package, you would use something like 1.2_pre3. The
+suffixes here can be arbitrarily chained without limitation.
+.TP
+.B Atom Prefix Operators [> >= = <= <]
+Sometimes you want to be able to depend on general versions rather than
+specifying exact versions all the time. Hence we provide standard boolean
+operators:
-DESCRIPTION="Super\-useful stream editor (sed)"
-HOMEPAGE="http://www.gnu.org/software/sed/sed.html"
-SRC_URI="ftp://alpha.gnu.org/pub/gnu/${PN}/${P}.tar.gz"
+Examples:
+.nf
+ \fI>\fRmedia\-libs/libgd\-1.6
+ \fI>=\fRmedia\-libs/libgd\-1.6
+ \fI=\fRmedia\-libs/libgd\-1.6
+ \fI<=\fRmedia\-libs/libgd\-1.6
+ \fI<\fRmedia\-libs/libgd\-1.6
+.fi
+.TP
+.B Extended Atom Prefixes [!~] and Postfixes [*]
+Now to get even fancier, we provide the ability to define blocking packages and
+version range matching. Also note that these extended prefixes/postfixes may
+be combined in any way with the atom classes defined above.
+.RS
+.TP
+.I ~
+means match any revision of the base version specified. So in the
+example below, we would match versions '1.0.2a', '1.0.2a\-r1', '1.0.2a\-r2',
+etc...
-LICENSE="GPL\-2"
-SLOT="0"
-KEYWORDS="~x86"
-IUSE=""
+Example:
+.nf
+ \fI~\fRnet\-libs/libnet\-1.0.2a
+.fi
+.TP
+.I !
+means block packages from being installed at the same time.
-RDEPEND=""
-DEPEND="nls? ( sys-devel/gettext )"
+Example:
+.nf
+ \fI!\fRapp\-text/dos2unix
+.fi
+.TP
+.I !!
+means block packages from being installed at the same time
+and explicitly disallow them from being temporarily installed
+simultaneously during a series of upgrades. This syntax is supported
+beginning with \fBEAPI 2\fR.
-src_configure() {
- econf \\
- \-\-bindir="${EPREFIX}"/bin
-}
+Example:
+.nf
+ \fI!!\fR<sys\-apps/portage\-2.1.4_rc1
+.fi
+.TP
+.I *
+means match any version of the package so long
+as the specified string prefix is matched. So with a
+version of '2*', we can match '2.1', '2.2', '2.2.1',
+etc... and not match version '1.0', '3.0', '4.1', etc...
+Beware that, due to the string matching nature, '20'
+will also be matched by '2*'. The version part
+that comes before the '*' must be a valid version in the absence of the '*'.
+For example, '2' is a valid version and '2.' is not. Therefore, '2*' is
+allowed and '2.*' is not.
-src_install() {
- emake DESTDIR="${D}" install
- dodoc NEWS README* THANKS AUTHORS BUGS ChangeLog
-}
+Examples:
+.nf
+ =dev\-libs/glib\-2\fI*\fR
+ \fI!\fR=net\-fs/samba\-2\fI*\fR
+.fi
+.RE
+.TP
+.B Atom Slots
+Beginning with \fBEAPI 1\fR, any atom can be constrained to match a specific
+\fBSLOT\fR. This is accomplished by appending a colon followed by a
+\fBSLOT\fR:
+
+Examples:
+.nf
+ x11\-libs/qt:3
+ \fI~\fRx11\-libs/qt-3.3.8:3
+ \fI>=\fRx11\-libs/qt-3.3.8:3
+ \fI=\fRx11\-libs/qt-3.3*:3
+.fi
+.TP
+.B Sub Slots
+Beginning with \fBEAPI 5\fR, a slot dependency may contain an
+optional sub\-slot part that follows the regular slot and is
+delimited by a \fI/\fR character.
+
+Examples:
+.nf
+ dev\-libs/icu:0/0
+ dev\-libs/icu:0/49
+ dev\-lang/perl:0/5.12
+ dev\-libs/glib:2/2.30
+.fi
+.TP
+.B Atom Slot Operators
+Beginning with \fBEAPI 5\fR, slot operator dependency consists
+of a colon followed by one of the following operators:
+.RS
+.TP
+.I *
+Indicates that any slot value is acceptable. In addition,
+for runtime dependencies, indicates that the package will not
+break if the matched package is uninstalled and replaced by
+a different matching package in a different slot.
+
+Examples:
+.nf
+ dev\-libs/icu:*
+ dev\-lang/perl:*
+ dev-libs/glib:*
+.fi
+.TP
+.I =
+Indicates that any slot value is acceptable. In addition,
+for runtime dependencies, indicates that the package will
+break unless a matching package with slot and sub\-slot equal
+to the slot and sub\-slot of the best installed version at the
+time the package was installed is available.
+
+Examples:
+.nf
+ dev\-libs/icu:=
+ dev\-lang/perl:=
+ dev-libs/glib:=
+.fi
+.TP
+.I slot=
+Indicates that only a specific slot value is acceptable, and
+otherwise behaves identically to the plain equals slot operator.
+
+Examples:
+.nf
+ dev\-libs/icu:0=
+ dev\-lang/perl:0=
+ dev-libs/glib:2=
.fi
+.PP
+To implement the equals slot operator, the package manager
+will need to store the slot/sub\-slot pair of the best installed
+version of the matching package. This syntax is only for package
+manager use and must not be used by ebuilds. The package manager
+may do this by inserting the appropriate slot/sub\-slot pair
+between the colon and equals sign when saving the package's
+dependencies. The sub\-slot part must not be omitted here
+(when the SLOT variable omits the sub\-slot part, the package
+is considered to have an implicit sub\-slot which is equal to
+the regular slot).
+
+Examples:
+.nf
+ dev\-libs/icu:0/0=
+ dev\-libs/icu:0/49=
+ dev\-lang/perl:0/5.12=
+ dev-libs/glib:2/2.30=
+.fi
+.RE
+.TP
+.B Atom USE
+Beginning with \fBEAPI 2\fR, any atom can be constrained to match specific
+\fBUSE\fR flag settings. When used together with \fBSLOT\fR dependencies,
+\fBUSE\fR dependencies appear on the right hand side of \fBSLOT\fR
+dependencies.
+.RS
+.TP
+.B Unconditional USE Dependencies
+.TS
+l l
+__
+l l.
+Example Meaning
+foo[bar] foo must have bar enabled
+foo[bar,baz] foo must have both bar and baz enabled
+foo[\-bar,baz] foo must have bar disabled and baz enabled
+.TE
+.TP
+.B Conditional USE Dependencies
+.TS
+l l
+__
+l l.
+Compact Form Equivalent Expanded Form
+foo[bar?] bar? ( foo[bar] ) !bar? ( foo )
+foo[!bar?] bar? ( foo ) !bar? ( foo[\-bar] )
+foo[bar=] bar? ( foo[bar] ) !bar? ( foo[\-bar] )
+foo[!bar=] bar? ( foo[\-bar] ) !bar? ( foo[bar] )
+.TE
+.RE
+.TP
+.B Atom USE defaults
+Beginning with \fBEAPI 4\fR, \fBUSE\fR dependencies may specify default
+assumptions about values for flags that may or may not be missing from
+the \fBIUSE\fR of the matched package. Such defaults are specified by
+immediately following a flag with either \fI(+)\fR or \fI(\-)\fR. Use
+\fI(+)\fR to behave as if a missing flag is present and enabled, or
+\fI(\-)\fR to behave as if it is present and disabled:
+
+Examples:
+.nf
+ media\-video/ffmpeg[threads(+)]
+ media\-video/ffmpeg[-threads(\-)]
+.fi
+.TP
+.B Dynamic Dependencies
+Sometimes programs may depend on different things depending on the USE
+variable. Portage offers a few options to handle this. Note that when
+using the following syntaxes, each case is considered as 1 Atom in the
+scope it appears. That means that each Atom both conditionally include
+multiple Atoms and be nested to an infinite depth.
+.RS
+.TP
+.B usevar? ( Atom )
+To include the jpeg library when the user has jpeg in \fBUSE\fR, simply use the
+following syntax:
+
+jpeg? ( media\-libs/jpeg )
+.TP
+.B !usevar? ( Atom )
+If you want to include a package only if the user does not have a certain
+option in their \fBUSE\fR variable, then use the following syntax:
+
+!nophysfs? ( dev\-games/physfs )
+
+This is often useful for those times when you want to want to add optional
+support for a feature and have it enabled by default.
+.TP
+.B usevar? ( Atom if true ) !usevar? ( Atom if false )
+For functionality like the tertiary operator found in C you must use
+two statements, one normal and one inverted. If a package uses
+GTK2 or GTK1, but not both, then you can handle that like this:
+
+gtk2? ( =x11\-libs/gtk+\-2* ) !gtk2? ( =x11\-libs/gtk+\-1* )
+
+That way the default is the superior GTK2 library.
+.TP
+.B || ( Atom Atom ... )
+When a package can work with a few different packages but a virtual is not
+appropriate, this syntax can easily be used.
+
+Example:
+.nf
+|| (
+ app\-games/unreal\-tournament
+ app\-games/unreal\-tournament\-goty
+)
+.fi
+
+Here we see that unreal\-tournament has a normal version and it has a goty
+version. Since they provide the same base set of files, another package can
+use either. Adding a virtual is inappropriate due to the small scope of it.
+
+Another good example is when a package can be built with multiple video
+interfaces, but it can only ever have just one.
+
+Example:
+.nf
+|| (
+ sdl? ( media\-libs/libsdl )
+ svga? ( media\-libs/svgalib )
+ opengl? ( virtual/opengl )
+ ggi? ( media\-libs/libggi )
+ virtual/x11
+)
+.fi
+
+Here only one of the packages will be chosen, and the order of preference is
+determined by the order in which they appear. So sdl has the best chance of
+being chosen, followed by svga, then opengl, then ggi, with a default of X if
+the user does not specify any of the previous choices.
+
+Note that if any of the packages listed are already merged, the package manager
+will use that to consider the dependency satisfied.
+
+.SS "Cross-compilation"
+Portage supports cross-compilation into a subdirectory specified by \fBROOT\fR.
+.TP
+.B Host
+\fIHost\fR in this context means the platform hosting the build process, i.e.
+what autotools calls CBUILD.
+Its packages are contained in the root of the filesystem ("\fI/\fR").
+
+If \fBROOT\fR is "\fI/\fR", all dependency types will be installed there.
+Otherwise, for EAPIs that support \fBHDEPEND\fR (experimental
+\fBEAPI 5-hdepend\fR), only \fBHDEPEND\fR is installed into "\fI/\fR".
+For EAPIs that do not support \fBHDEPEND\fR, the behaviour is controlled by the
+\fI\-\-root-deps\fR flag to \fBemerge\fR(1), defaulting to install only
+\fBDEPEND\fR into the \fIhost\fR.
+.TP
+.B Target
+\fITarget\fR refers to the platform that the package will later run on, i.e.
+what autotools calls CHOST.
+The directory housing this system is specified by \fBROOT\fR.
+If it is different from "\fI/\fR", i.e. \fIhost\fR and \fItarget\fR are not the
+same, this variable contains the path to the directory housing the \fItarget\fR
+system.
+
+For EAPIs that support \fBHDEPEND\fR (experimental \fBEAPI 5-hdepend\fR),
+\fBDEPEND\fR, \fBRDEPEND\fR, and \fBPDEPEND\fR
+list the \fItarget\fR dependencies, i.e. those to be installed into \fBROOT\fR.
+For EAPIs that do not support \fBHDEPEND\fR, the \fBemerge\fR(1) flag
+\fI\-\-root-deps\fR controls what the package manager installs there.
+Without it, \fBemerge\fR defaults to install only runtime dependencies (i.e.
+\fBRDEPEND\fR and \fBPDEPEND\fR) into \fBROOT\fR.
+.PP
+See section \fBVARIABLES\fR for more information about the \fBDEPEND\fR,
+\fBRDEPEND\fR and \fBHDEPEND\fR variables.
+.TP
+.B The targetroot USE flag
+For EAPIs that support the "\fItargetroot\fR" USE flag, that flag is
+automatically enabled by the package manager if \fIhost\fR and \fItarget\fR
+system are not the same, i.e. if the \fBROOT\fR is not "\fI/\fR".
+This is necessary where the package to be built needs an executable copy of
+itself during the build process.
+A known example is dev-lang/python, which needs to run a Python interpreter
+during compilation.
+
.SH "VARIABLES"
.TP
-.B MISC USAGE NOTES
-\- All variables defined in \fBmake.conf\fR(5) are available for use in
-ebuilds (such as the PORTAGE* and PORTDIR* variables)
+.B Usage Notes
+\- Variables defined in \fBmake.conf\fR(5) are available for use in
+ebuilds (except Portage\-specific variables, which might be not supported by
+other package managers).
.br
-\- When assigning values to variables in ebuilds, you \fBcannot have a
+\- When assigning values to variables in ebuilds, you \fIcannot have a
space\fR between the variable name and the equal sign.
.br
\- Variable values should only contain characters that are members of the
@@ -60,47 +376,57 @@ space\fR between the variable name and the equal sign.
.B P
This variable contains the package name without the ebuild revision.
This variable must NEVER be modified.
-.br
-\fBxfree\-4.2.1\-r2.ebuild\fR \-\-> \fB$P\fR=='\fIxfree\-4.2.1\fR'
+
+xfree\-4.2.1\-r2.ebuild \-\-> $P=='xfree\-4.2.1'
.TP
.B PN
Contains the name of the script without the version number.
-.br
-\fBxfree\-4.2.1\-r2.ebuild\fR \-\-> \fB$PN\fR=='\fIxfree\fR'
+
+xfree\-4.2.1\-r2.ebuild \-\-> $PN=='xfree'
.TP
.B PV
Contains the version number without the revision.
-.br
-\fBxfree\-4.2.1\-r2.ebuild\fR \-\-> \fB$PV\fR=='\fI4.2.1\fR'
+
+xfree\-4.2.1\-r2.ebuild \-\-> $PV=='4.2.1'
.TP
.B PR
Contains the revision number or 'r0' if no revision number exists.
-.br
-\fBxfree\-4.2.1\-r2.ebuild\fR \-\-> \fB$PR\fR=='\fIr2\fR'
+
+xfree\-4.2.1\-r2.ebuild \-\-> $PR=='r2'
.TP
.B PVR
Contains the version number with the revision.
-.br
-\fBxfree\-4.2.1\-r2.ebuild\fR \-\-> \fB$PVR\fR=='\fI4.2.1\-r2\fR'
+
+xfree\-4.2.1\-r2.ebuild \-\-> $PVR=='4.2.1\-r2'
.TP
.B PF
-Contains the full package name \fI[PN]\-[PVR]\fR
-.br
-\fBxfree\-4.2.1\-r2.ebuild\fR \-\-> \fB$PF\fR=='\fIxfree\-4.2.1\-r2\fR'
+Contains the full package name \fBPN\fR\-\fBPVR\fR
+
+xfree\-4.2.1\-r2.ebuild \-\-> $PF=='xfree\-4.2.1\-r2'
.TP
.B CATEGORY
Contains the package category name.
.TP
.B A
Contains all source files required for the package. This variable must
-not be defined. It is autogenerated from the \fISRC_URI\fR variable.
+not be defined. It is autogenerated from the \fBSRC_URI\fR variable.
.TP
-\fBWORKDIR\fR = \fI"${PORTAGE_TMPDIR}/portage/${CATEGORY}/${PF}/work"\fR
+.B WORKDIR\fR = \fI"${PORTAGE_TMPDIR}/portage/${CATEGORY}/${PF}/work"
Contains the path to the package build root. Do not modify this variable.
.TP
-\fBFILESDIR\fR = \fI"${PORTDIR}/${CATEGORY}/${PN}/files"\fR
-Contains the path to the 'files' sub folder in the package specific
-location in the portage tree. Do not modify this variable.
+.B FILESDIR\fR = \fI"${repository_location}/${CATEGORY}/${PN}/files"
+Contains the path to the 'files' subdirectory in the package specific
+location in given repository. Do not modify this variable.
+.TP
+.B EBUILD_PHASE
+Contains the abreviated name of the phase function that is
+currently executing, such as "setup", "unpack", "compile", or
+"preinst".
+.TP
+.B EBUILD_PHASE_FUNC
+Beginning with \fBEAPI 5\fR, contains the full name of the phase
+function that is currently executing, such as "pkg_setup",
+"src_unpack", "src_compile", or "pkg_preinst".
.TP
.B EPREFIX
Beginning with \fBEAPI 3\fR, contains the offset
@@ -110,17 +436,17 @@ and is available in such cases as ${EPREFIX}. EPREFIX does not contain
a trailing slash, therefore an absent offset is represented by the empty
string. Do not modify this variable.
.TP
-\fBS\fR = \fI"${WORKDIR}/${P}"\fR
+.B S\fR = \fI"${WORKDIR}/${P}"
Contains the path to the temporary \fIbuild directory\fR. This variable
is used by the functions \fIsrc_compile\fR and \fIsrc_install\fR. Both
are executed with \fIS\fR as the current directory. This variable may
be modified to match the extraction directory of a tarball for the package.
.TP
-\fBT\fR = \fI"${PORTAGE_TMPDIR}/portage/${CATEGORY}/${PF}/temp"\fR
+.B T\fR = \fI"${PORTAGE_TMPDIR}/portage/${CATEGORY}/${PF}/temp"
Contains the path to a \fItemporary directory\fR. You may use this for
whatever you like.
.TP
-\fBD\fR = \fI"${PORTAGE_TMPDIR}/portage/${CATEGORY}/${PF}/image/"\fR
+.B D\fR = \fI"${PORTAGE_TMPDIR}/portage/${CATEGORY}/${PF}/image/"
Contains the path to the temporary \fIinstall directory\fR. Every write
operation that does not involve the helper tools and functions (found below)
should be prefixed with ${D}.
@@ -129,12 +455,12 @@ to be taken into account here, for which the variable
${ED} is provided (see below).
Do not modify this variable.
.TP
-\fBED\fT = \fI"${PORTAGE_TMPDIR}/portage/${CATEGORY}/${PF}/image/${EPREFIX}/"\fR
+.B ED\fR = \fI"${PORTAGE_TMPDIR}/portage/${CATEGORY}/${PF}/image/${EPREFIX}/"
Beginning with \fBEAPI 3\fR, contains the path
"${D%/}${EPREFIX}/" for convenience purposes.
-For \fBEAPI\fR values prior to \fBEAPI 3\fR which do
-not support \fB${ED}\fR, helpers use \fB${D}\fR where
-they would otherwise use \fB${ED}\fR.
+For EAPI values prior to \fBEAPI 3\fR which do
+not support ED, helpers use \fBD\fR where
+they would otherwise use ED.
Do not modify this variable.
.TP
.B MERGE_TYPE
@@ -148,7 +474,6 @@ l l
__
l l.
Value Meaning
-
binary previously\-built which is scheduled for merge
buildonly source\-build which is not scheduled for merge
source source\-build which is scheduled for merge
@@ -157,7 +482,7 @@ source source\-build which is scheduled for merge
.TP
.B PORTAGE_LOG_FILE
Contains the path of the build log. If \fBPORT_LOGDIR\fR variable is unset then
-\fBPORTAGE_LOG_FILE\fR=\fB"${T}/build.log"\fR.
+PORTAGE_LOG_FILE=\fI"${T}/build.log"\fR.
.TP
.B REPLACED_BY_VERSION
Beginning with \fBEAPI 4\fR, the REPLACED_BY_VERSION variable can be
@@ -176,22 +501,22 @@ to the package version(s) being replaced. Typically, this variable will
not contain more than one version, but according to PMS it can contain
more.
.TP
-\fBROOT\fR = \fI"/"\fR
+.B ROOT\fR = \fI"/"
Contains the path that portage should use as the root of the live filesystem.
When packages wish to make changes to the live filesystem, they should do so in
the tree prefixed by ${ROOT}. Often the offset prefix needs to be taken
into account here, for which the variable ${EROOT} is provided (see
below). Do not modify this variable.
.TP
-\fBEROOT\fR = \fI"${ROOT%/}${EPREFIX}/"\fR
+.B EROOT\fR = \fI"${ROOT%/}${EPREFIX}/"
Beginning with \fBEAPI 3\fR, contains
"${ROOT%/}${EPREFIX}/" for convenience
purposes. Do not modify this variable.
.TP
-\fBDESCRIPTION\fR = \fI"A happy little package"\fR
+.B DESCRIPTION\fR = \fI"A happy little package"
Should contain a short description of the package.
.TP
-\fBEAPI\fR = \fI"0"\fR
+.B EAPI\fR = \fI"0"
Defines the ebuild API version to which this package conforms. If not
defined then it defaults to "0". If portage does not recognize the
EAPI value then it will mask the package and refuse to perform any
@@ -202,7 +527,7 @@ who uses the \fBebuild\fR(1) and \fBrepoman\fR(1) commands with this
package will be required to have a version of portage that recognizes
the EAPI to which this package conforms.
.TP
-\fBSRC_URI\fR = \fI"http://example.com/path/${P}.tar.gz"\fR
+.B SRC_URI\fR = \fI"http://example.com/path/${P}.tar.gz"
Contains a list of URIs for the required source files. It can contain
multiple URIs for a single source file. The list is processed in order
if the file was not found on any of the \fIGENTOO_MIRRORS\fR.
@@ -211,11 +536,11 @@ customized with a "->" operator on the right hand side, followed by the
desired output file name. All tokens, including the operator and output
file name, should be separated by whitespace.
.TP
-\fBHOMEPAGE\fR = \fI"http://example.com/"\fR
+.B HOMEPAGE\fR = \fI"http://example.com/"
Should contain a list of URIs for the sources main sites and other further
package dependent information.
.TP
-\fBKEYWORDS\fR = \fI[\-~][x86,ppc,sparc,mips,alpha,arm,hppa]\fR
+.B KEYWORDS\fR = \fI[\-~][x86,ppc,sparc,mips,alpha,arm,hppa]
Should contain appropriate list of arches that the ebuild is know to
work/not work. By default if you do not know if an ebuild runs under
a particular arch simply omit that KEYWORD. If the ebuild will not
@@ -226,19 +551,31 @@ unmasked for testing by setting ACCEPT_KEYWORDS="~arch" on the command
line, or in \fBmake.conf\fR(5)) For an authoritative list please review
/usr/portage/profiles/arch.list. Please keep this list in alphabetical order.
.TP
-\fBSLOT\fR
+.B SLOT
This sets the SLOT for packages that may need to have multiple versions
co\-exist. By default you should set \fBSLOT\fR="0". If you are unsure, then
do not fiddle with this until you seek some guidance from some guru. This
value should \fINEVER\fR be left undefined.
-.TP
-\fBLICENSE\fR
+
+Beginning with \fBEAPI 5\fR, the SLOT variable may contain
+an optional sub\-slot part that follows the regular slot and
+is delimited by a / character. The sub\-slot must be a valid
+slot name. The sub\-slot is used to represent cases in which
+an upgrade to a new version of a package with a different
+sub\-slot may require dependent packages to be rebuilt. When
+the sub\-slot part is omitted from the SLOT definition, the
+package is considered to have an implicit sub\-slot which is
+equal to the regular slot. Refer to the \fBAtom Slot
+Operators\fR section for more information about sub\-slot
+usage.
+.TP
+.B LICENSE
This should be a space delimited list of licenses that the package falls
under. This \fB_must_\fR be set to a matching license in
/usr/portage/licenses/. If the license does not exist in portage yet, you
must add it first.
.TP
-\fBIUSE\fR
+.B IUSE
This should be a list of any and all USE flags that are leveraged within
your build script. The only USE flags that should not be listed here are
arch related flags (see \fBKEYWORDS\fR). Beginning with \fBEAPI 1\fR, it
@@ -250,237 +587,51 @@ negative IUSE default settings are effective only for negation of
repo\-level USE settings, since profile and user configuration settings
override them.
.TP
-\fBDEPEND\fR
-This should contain a list of all packages that are required for the
-program to compile.
-.RS
-.TP
-.B DEPEND Atoms
-A depend atom is simply a dependency that is used by portage when calculating
-relationships between packages. Please note that if the atom has not already
-been emerged, then the latest version available is matched.
-.RS
-.TP
-.B Atom Bases
-The base atom is just a full category/packagename. Hence, these are base atoms:
+.B DEPEND
+This should contain a list of all packages that are required for the program
+to compile (aka \fIbuildtime\fR dependencies). These are usually libraries and
+headers.
-.nf
-.I sys\-apps/sed
-.I sys\-libs/zlib
-.I net\-misc/dhcp
-.fi
-.TP
-.B Atom Versions
-It is nice to be more specific and say that only certain versions of atoms are
-acceptable. Note that versions must be combined with a prefix (see below).
-Hence you may add a version number as a postfix to the base:
+Starting from experimental \fBEAPI 5-hdepend\fR, tools should go into the
+\fBHDEPEND\fR variable instead, as \fBDEPEND\fR will only be installed into the
+\fItarget\fR system and hence cannot be executed in a cross\-compile setting.
+(See section \fBCross\-compilation\fR for more information.)
-.nf
-sys\-apps/sed\fI\-4.0.5\fR
-sys\-libs/zlib\fI\-1.1.4\-r1\fR
-net\-misc/dhcp\fI\-3.0_p2\fR
-.fi
-
-Versions are normally made up of two or three numbers separated by periods, such
-as 1.2 or 4.5.2. This string may be followed by a character such as 1.2a or
-4.5.2z. Note that this letter is \fBnot\fR meant to indicate alpha, beta,
-etc... status. For that, use the optional suffix; either _alpha, _beta, _pre
-(pre\-release), _rc (release candidate), or _p (patch). This means for the
-3rd pre\-release of a package, you would use something like 1.2_pre3. The
-suffixes here can be arbitrarily chained without limitation.
+You may use the syntax described above in the \fBDependencies\fR section.
.TP
-.B Atom Prefix Operators [> >= = <= <]
-Sometimes you want to be able to depend on general versions rather than specifying
-exact versions all the time. Hence we provide standard boolean operators:
-
-.nf
-\fI>\fRmedia\-libs/libgd\-1.6
-\fI>=\fRmedia\-libs/libgd\-1.6
-\fI=\fRmedia\-libs/libgd\-1.6
-\fI<=\fRmedia\-libs/libgd\-1.6
-\fI<\fRmedia\-libs/libgd\-1.6
-.fi
-.TP
-.B Extended Atom Prefixes [!~] and Postfixes [*]
-Now to get even fancier, we provide the ability to define blocking packages and
-version range matching. Also note that these extended prefixes/postfixes may
-be combined in any way with the atom classes defined above. Here are some common
-examples you may find in the portage tree:
-
-.nf
-\fI!\fRapp\-text/dos2unix
-=dev\-libs/glib\-2\fI*\fR
-\fI!\fR=net\-fs/samba\-2\fI*\fR
-\fI~\fRnet\-libs/libnet\-1.0.2a
-\fI!!\fR<sys\-apps/portage\-2.1.4_rc1\fI\fR
-.fi
-
-\fI!\fR means block packages from being installed at the same time.
-.br
-\fI!!\fR means block packages from being installed at the same time
-and explicitly disallow them from being temporarily installed
-simultaneously during a series of upgrades. This syntax is supported
-beginning with \fBEAPI 2\fR.
-.br
-\fI*\fR means match any version of the package so long
-as the specified string prefix is matched. So with a
-version of '2*', we can match '2.1', '2.2', '2.2.1',
-etc... and not match version '1.0', '3.0', '4.1', etc...
-Beware that, due to the string matching nature, '20'
-will also be matched by '2*'. The version part
-that comes before the '*' must be a valid version in the absence of the '*'.
-For example, '2' is a valid version and '2.' is not. Therefore, '2*' is
-allowed and '2.*' is not.
-.br
-\fI~\fR means match any revision of the base version specified. So in the
-above example, we would match versions '1.0.2a', '1.0.2a\-r1', '1.0.2a\-r2',
-etc...
-.TP
-.B Atom Slots
-Beginning with \fBEAPI 1\fR, any atom can be constrained to match a specific
-\fBSLOT\fR. This is accomplished by appending a colon followed by a
-\fBSLOT\fR:
+.B RDEPEND
+This should contain a list of all packages that are required for this
+program to run (aka \fIruntime\fR dependencies). These are usually libraries.
-.nf
-x11\-libs/qt:3
-\fI~\fRx11\-libs/qt-3.3.8:3
-\fI>=\fRx11\-libs/qt-3.3.8:3
-\fI=\fRx11\-libs/qt-3.3*:3
-.fi
-.TP
-.B Atom USE
-Beginning with \fBEAPI 2\fR, any atom can be constrained to match specific
-\fBUSE\fR flag settings. When used together with \fBSLOT\fR dependencies,
-\fBUSE\fR dependencies appear on the right hand side of \fBSLOT\fR
-dependencies.
+In \fBEAPI 3\fR or earlier, if this is not set, then it defaults to the value
+of \fBDEPEND\fR. In \fBEAPI 4\fR or later, \fBRDEPEND\fR will never be
+implicitly set.
-.RS
+You may use the syntax described above in the \fBDependencies\fR section.
.TP
-.B Unconditional USE Dependencies
-.TS
-l l
-__
-l l.
-Example Meaning
+.B HDEPEND
+This should contain a list of all packages that are required to be executable
+during compilation of this program (aka \fIhost\fR buildtime dependencies).
+These are usually tools, like interpreters or (cross\-)compilers.
-foo[bar] foo must have bar enabled
-foo[bar,baz] foo must have both bar and baz enabled
-foo[\-bar,baz] foo must have bar disabled and baz enabled
-.TE
+This variable is new in experimental \fBEAPI 5-hdepend\fR and will be installed
+into the \fIhost\fR system.
+(See section \fBCross-compilation\fR for more information.)
+You may use the syntax described above in the \fBDependencies\fR section.
.TP
-.B Conditional USE Dependencies
-.TS
-l l
-__
-l l.
-Compact Form Equivalent Expanded Form
+.B PDEPEND
+This should contain a list of all packages that should be merged after this
+one (aka \fIpost\fR merge dependencies), but which may be installed by the
+package manager at any time, if that is not possible.
-foo[bar?] bar? ( foo[bar] ) !bar? ( foo )
-foo[!bar?] bar? ( foo ) !bar? ( foo[\-bar] )
-foo[bar=] bar? ( foo[bar] ) !bar? ( foo[\-bar] )
-foo[!bar=] bar? ( foo[\-bar] ) !bar? ( foo[bar] )
-.TE
-.RE
-.TP
-.B Atom USE defaults
-Beginning with \fBEAPI 4\fR, \fBUSE\fR dependencies may specify default
-assumptions about values for flags that may or may not be missing from
-the \fBIUSE\fR of the matched package. Such defaults are specified by
-immediately following a flag with either \fB(+)\fR or \fB(\-)\fR. Use
-\fB(+)\fR to behave as if a missing flag is present and enabled, or
-\fB(\-)\fR to behave as if it is present and disabled:
-
-.RS
-.nf
-media\-video/ffmpeg[threads(+)]
-media\-video/ffmpeg[-threads(\-)]
-.fi
-.RE
-.RE
-.TP
-.B Dynamic DEPENDs
-Sometimes programs may depend on different things depending on the USE
-variable. Portage offers a few options to handle this. Note that when
-using the following syntaxes, each case is considered as 1 Atom in the
-scope it appears. That means that each Atom both conditionally include
-multiple Atoms and be nested to an infinite depth.
-.RS
-.TP
-.B usevar? ( DEPEND Atom )
-To include the jpeg library when the user has jpeg in \fBUSE\fR, simply use the
-following syntax:
-.br
-.B jpeg? ( media\-libs/jpeg )
-.TP
-.B !usevar? ( Atom )
-If you want to include a package only if the user does not have a certain option
-in their \fBUSE\fR variable, then use the following syntax:
-.br
-.B !nophysfs? ( dev\-games/physfs )
-.br
-This is often useful for those times when you want to want to add optional support
-for a feature and have it enabled by default.
-.TP
-.B usevar? ( Atom if true ) !usevar? ( Atom if false )
-For functionality like the tertiary operator found in C you must use
-two statements, one normal and one inverted. If a package uses
-GTK2 or GTK1, but not both, then you can handle that like this:
-.br
-.B gtk2? ( =x11\-libs/gtk+\-2* ) !gtk2? ( =x11\-libs/gtk+\-1* )
-.br
-That way the default is the superior GTK2 library.
-.TP
-.B || ( Atom Atom ... )
-When a package can work with a few different packages but a virtual is not
-appropriate, this syntax can easily be used.
-.nf
-.B || (
-.B app\-games/unreal\-tournament
-.B app\-games/unreal\-tournament\-goty
-.B )
-.fi
-Here we see that unreal\-tournament has a normal version and it has a goty
-version. Since they provide the same base set of files, another package can
-use either. Adding a virtual is inappropriate due to the small scope of it.
-.br
-Another good example is when a package can be built with multiple video
-interfaces, but it can only ever have just one.
-.nf
-.B || (
-.B sdl? ( media\-libs/libsdl )
-.B svga? ( media\-libs/svgalib )
-.B opengl? ( virtual/opengl )
-.B ggi? ( media\-libs/libggi )
-.B virtual/x11
-.B )
-.fi
-Here only one of the packages will be chosen, and the order of preference is
-determined by the order in which they appear. So sdl has the best chance of
-being chosen, followed by svga, then opengl, then ggi, with a default of X if
-the user does not specify any of the previous choices.
+.B ***WARNING***
.br
-Note that if any of the packages listed are already merged, the package manager
-will use that to consider the dependency satisfied.
-.RE
+Use this only as last resort to break cyclic dependencies!
-.RE
-.TP
-\fBRDEPEND\fR
-This should contain a list of all packages that are required for this
-program to run (aka runtime depend). If this is not set in \fBEAPI 3\fR
-or earlier, then it defaults to the value of \fBDEPEND\fR. In
-\fBEAPI 4\fR or later, \fBRDEPEND\fR will never be implicitly set.
-.br
-You may use the same syntax to vary dependencies as seen above in \fBDEPEND\fR.
+You may use the syntax described above in the \fBDependencies\fR section.
.TP
-\fBPDEPEND\fR
-This should contain a list of all packages that should be merged after this one,
-but may be merged before if need be.
-.br
-You may use the same syntax to vary dependencies as seen above in \fBDEPEND\fR.
-.TP
-\fBREQUIRED_USE\fR
+.B REQUIRED_USE
Beginning with \fBEAPI 4\fR, the \fBREQUIRED_USE\fR variable can be
used to specify combinations of \fBUSE\fR flags that are allowed
or not allowed. Elements can be nested when necessary.
@@ -489,16 +640,16 @@ l l
__
l l.
Behavior Expression
-
If flag1 enabled then flag2 disabled flag1? ( !flag2 )
If flag1 enabled then flag2 enabled flag1? ( flag2 )
If flag1 disabled then flag2 enabled !flag1? ( flag2 )
If flag1 disabled then flag2 disabled !flag1? ( !flag2 )
Must enable any one or more (inclusive or) || ( flag1 flag2 flag3 )
Must enable exactly one but not more (exclusive or) ^^ ( flag1 flag2 flag3 )
+May enable at most one (EAPI 5 or later) ?? ( flag1 flag2 flag3 )
.TE
.TP
-\fBRESTRICT\fR = \fI[strip,mirror,fetch,userpriv]\fR
+.B RESTRICT\fR = \fI[strip,mirror,fetch,userpriv]
This should be a space delimited list of portage features to restrict.
You may use conditional syntax to vary restrictions as seen above in DEPEND.
.PD 0
@@ -524,9 +675,19 @@ binaries that are not compatible with debugedit.
.I mirror
files in \fBSRC_URI\fR will not be downloaded from the \fBGENTOO_MIRRORS\fR.
.TP
+.I preserve\-libs
+Disables preserve\-libs for specific packages. Note than when a package is
+merged, RESTRICT=preserve\-libs applies if either the new instance or the
+old instance sets RESTRICT=preserve\-libs.
+.TP
.I primaryuri
fetch from URIs in \fBSRC_URI\fR before \fBGENTOO_MIRRORS\fR.
.TP
+.I splitdebug
+Disables splitdebug for specific packages. This is for packages with
+binaries that trigger problems with splitdebug, such as file\-collisions
+between symlinks in /usr/lib/debug/.build-id (triggered by bundled libraries).
+.TP
.I strip
final binaries/libraries will not be stripped of debug symbols.
.TP
@@ -538,7 +699,7 @@ Disables userpriv for specific packages.
.RE
.PD 1
.TP
-\fBPROPERTIES\fR = \fI[interactive]\fR
+.B PROPERTIES\fR = \fI[interactive]
A space delimited list of properties, with conditional syntax support.
.PD 0
.RS
@@ -548,30 +709,35 @@ One or more ebuild phases will produce a prompt that requires user interaction.
.RE
.PD 1
.TP
-\fBPROVIDE\fR = \fI"virtual/TARGET"\fR
+.B PROVIDE\fR = \fI"virtual/TARGET"
This variable should only be used when a package provides a virtual target.
For example, blackdown\-jdk and sun\-jdk provide \fIvirtual/jdk\fR. This
allows for packages to depend on \fIvirtual/jdk\fR rather than on blackdown
or sun specifically.
+
+The \fBPROVIDE\fR variable has been deprecated. See
+\fIhttp://www.gentoo.org/proj/en/glep/glep-0037.html\fR for details.
+
.TP
-\fBDOCS\fR
+.B DOCS
Beginning with \fBEAPI 4\fR, an array or space\-delimited list of documentation
files for the default src_install function to install using dodoc. If
undefined, a reasonable default list is used. See the documentation for
src_install below.
-.SH "QA CONTROL VARIABLES"
+
+.SS "QA Control Variables:"
.TP
-.B USAGE NOTES
+.B Usage Notes
Several QA variables are provided which allow an ebuild to manipulate some
of the QA checks performed by portage. Use of these variables in ebuilds
should be kept to an absolute minimum otherwise they defeat the purpose
of the QA checks, and their use is subject to agreement of the QA team.
They are primarily intended for use by ebuilds that install closed\-source
binary objects that cannot be altered.
-.br
+
Note that objects that violate these rules may fail on some architectures.
.TP
-\fBQA_PREBUILT\fR
+.B QA_PREBUILT
This should contain a list of file paths, relative to the image
directory, of files that are pre\-built binaries. Paths
listed here will be appended to each of the QA_* variables
@@ -581,65 +747,78 @@ the QA_* variables that support regular expressions instead
of fnmatch patterns. The translation mechanism simply replaces
"*" with ".*".
.TP
-\fBQA_TEXTRELS\fR
+.B QA_TEXTRELS
This variable can be set to a list of file paths, relative to the image
directory, of files that contain text relocations that cannot be eliminated.
The paths may contain fnmatch patterns.
-.br
+
This variable is intended to be used on closed\-source binary objects that
cannot be altered.
.TP
-\fBQA_EXECSTACK\fR
+.B QA_EXECSTACK
This should contain a list of file paths, relative to the image directory, of
objects that require executable stack in order to run.
The paths may contain fnmatch patterns.
-.br
+
This variable is intended to be used on objects that truly need executable
stack (i.e. not those marked to need it which in fact do not).
.TP
-\fBQA_WX_LOAD\fR
+.B QA_WX_LOAD
This should contain a list of file paths, relative to the image directory, of
files that contain writable and executable segments. These are rare.
The paths may contain fnmatch patterns.
.TP
-\fBQA_FLAGS_IGNORED\fR
+.B QA_FLAGS_IGNORED
This should contain a list of file paths, relative to the image directory, of
files that do not contain .GCC.command.line sections or contain .hash sections.
-The paths may contain regular expressions with escape\-quoted special characters.
-.br
+The paths may contain regular expressions with escape\-quoted special
+characters.
+
This variable is intended to be used on files of binary packages which ignore
CFLAGS, CXXFLAGS, FFLAGS, FCFLAGS, and LDFLAGS variables.
.TP
-.TP
-\fBQA_DT_HASH\fR
+.B QA_MULTILIB_PATHS
This should contain a list of file paths, relative to the image directory, of
-files that contain .hash sections. The paths may contain regular expressions
-with escape\-quoted special characters. This variable is deprecated. Use
-\fBQA_FLAGS_IGNORED\fR instead.
-.br
-This variable is intended to be used on files of binary packages which ignore
-LDFLAGS variable.
+files that should be ignored for the multilib\-strict checks.
+The paths may contain regular expressions with escape\-quoted special
+characters.
.TP
-\fBQA_PRESTRIPPED\fR
+.B QA_PRESTRIPPED
This should contain a list of file paths, relative to the image directory, of
files that contain pre-stripped binaries. The paths may contain regular
expressions with escape\-quoted special characters.
.TP
-\fBQA_SONAME\fR
+.B QA_SONAME
This should contain a list of file paths, relative to the image directory, of
shared libraries that lack SONAMEs. The paths may contain regular expressions
with escape\-quoted special characters.
.TP
-\fBQA_SONAME_NO_SYMLINK\fR
+.B QA_SONAME_NO_SYMLINK
This should contain a list of file paths, relative to the image directory, of
shared libraries that have SONAMEs but should not have a corresponding SONAME
symlink in the same directory. The paths may contain regular expressions
with escape\-quoted special characters.
.TP
-\fBQA_DT_NEEDED\fR
+.B QA_AM_MAINTAINER_MODE
+This should contain a list of lines containing automake missing \-\-run
+commands. The lines may contain regular expressions with escape\-quoted
+special characters.
+.TP
+.B QA_CONFIGURE_OPTIONS
+This should contain a list of configure options which trigger warnings about
+unrecognized options. The options may contain regular expressions with
+escape\-quoted special characters.
+.TP
+.B QA_DT_NEEDED
This should contain a list of file paths, relative to the image directory, of
shared libraries that lack NEEDED entries. The paths may contain regular
expressions with escape\-quoted special characters.
+.TP
+.B QA_DESKTOP_FILE
+This should contain a list of file paths, relative to the image directory, of
+desktop files which should not be validated. The paths may contain regular
+expressions with escape\-quoted special characters.
+
.SH "PORTAGE DECLARATIONS"
.TP
.B inherit
@@ -652,6 +831,7 @@ ebuild. Specification of the eclasses contains only their name and not the
\fI.eclass\fR extension. Also note that the inherit statement must come
before other variable declarations unless these variables are used in global
scope of eclasses.
+
.SH "PHASE FUNCTIONS"
.TP
.B pkg_pretend
@@ -664,9 +844,10 @@ is used to execute pkg_pretend is not saved and therefore is not
available in phases that execute afterwards.
.TP
.B pkg_nofetch
-If you turn on \fIfetch\fR in \fBRESTRICT\fR, then this function will be
-run when the files in \fBSRC_URI\fR cannot be found. Useful for
-displaying information to the user on *how* to obtain said files. All
+This function will be executed when the files in \fBSRC_URI\fR
+cannot be fetched for any reason. If you turn on \fIfetch\fR in
+\fBRESTRICT\fR, this is useful for displaying information to the
+user on *how* to obtain said files. All
you have to do is output a message and let the function return. Do not
end the function with a call to \fBdie\fR.
.TP
@@ -674,45 +855,48 @@ end the function with a call to \fBdie\fR.
This function can be used if the package needs specific setup actions or
checks to be preformed before anything else.
.br
-Initial working directory of ${PORTAGE_TMPDIR}.
+Initial working directory: $PORTAGE_TMPDIR
.TP
.B src_unpack
This function is used to unpack all the sources in \fIA\fR to \fIWORKDIR\fR.
If not defined in the \fIebuild script\fR it calls \fIunpack ${A}\fR. Any
patches and other pre configure/compile modifications should be done here.
.br
-Initial working directory of $WORKDIR.
+Initial working directory: $WORKDIR
.TP
.B src_prepare
All preparation of source code, such as application of patches, should be done
here. This function is supported beginning with \fBEAPI 2\fR.
.br
-Initial working directory of $S.
+Initial working directory: $S
.TP
.B src_configure
All necessary steps for configuration should be done here. This function is
supported beginning with \fBEAPI 2\fR.
.br
-Initial working directory of $S.
+Initial working directory: $S
.TP
.B src_compile
With less than \fBEAPI 2\fR, all necessary steps for both configuration and
compilation should be done here. Beginning with \fBEAPI 2\fR, only compilation
steps should be done here.
.br
-Initial working directory of $S.
+Initial working directory: $S
.TP
.B src_test
-Run all package specific test cases. The default is to run 'make check'
-followed 'make test'.
+Run all package specific test cases. The default is to run
+\'emake check\' followed \'emake test\'. Prior to \fBEAPI 5\fR,
+the default src_test implementation will automatically pass the
+\-j1 option as the last argument to emake, and beginning with
+\fBEAPI 5\fR it will allow the tests to run in parallel.
.br
-Initial working directory of $S.
+Initial working directory: $S
.TP
.B src_install
Should contain everything required to install the package in the temporary
\fIinstall directory\fR.
.br
-Initial working directory of $S.
+Initial working directory: $S
Beginning with \fBEAPI 4\fR, if src_install is undefined then the
following default implementation is used:
@@ -742,18 +926,20 @@ All modifications required on the live\-filesystem before and after the
package is merged should be placed here. Also commentary for the user
should be listed here as it will be displayed last.
.br
-Initial working directory of $PWD.
+Initial working directory: $PWD
.TP
.B pkg_prerm pkg_postrm
Like the pkg_*inst functions but for unmerge.
.br
-Initial working directory of $PWD.
+Initial working directory: $PWD
.TP
.B pkg_config
This function should contain optional basic configuration steps.
.br
-Initial working directory of $PWD.
-.SH "HELPER FUNCTIONS: PHASES"
+Initial working directory: $PWD
+
+.SH "HELPER FUNCTIONS"
+.SS "Phases:"
.TP
.B default
Calls the default phase function implementation for the currently executing
@@ -772,7 +958,6 @@ l
_
l.
Default Phase Functions
-
default_pkg_nofetch
default_src_unpack
default_src_prepare
@@ -781,9 +966,10 @@ default_src_compile
default_src_test
.TE
.RE
-.SH "HELPER FUNCTIONS: GENERAL"
+
+.SS "General:"
.TP
-\fBdie\fR \fI[reason]\fR
+.B die\fR \fI[reason]
Causes the current emerge process to be aborted. The final display will
include \fIreason\fR.
@@ -791,11 +977,11 @@ Beginning with \fBEAPI 4\fR, all helpers automatically call \fBdie\fR
whenever some sort of error occurs. Helper calls may be prefixed with
the \fBnonfatal\fR helper in order to prevent errors from being fatal.
.TP
-\fBnonfatal\fR \fI<helper>\fR
+.B nonfatal\fR \fI<helper>
Execute \fIhelper\fR and \fIdo not\fR call die if it fails.
The \fBnonfatal\fR helper is available beginning with \fBEAPI 4\fR.
.TP
-\fBuse\fR \fI<USE item>\fR
+.B use\fR \fI<USE item>
If \fIUSE item\fR is in the \fBUSE\fR variable, the function will silently
return 0 (aka shell true). If \fIUSE item\fR is not in the \fBUSE\fR
variable, the function will silently return 1 (aka shell false). \fBusev\fR
@@ -817,17 +1003,26 @@ fi
.fi
.RE
.TP
-\fBuse_with\fR \fI<USE item>\fR \fI[configure name]\fR \fI[configure opt]\fR
+.B usev\fR \fI<USE item>
+Like \fBuse\fR, but also echoes \fIUSE item\fR when \fBuse\fR returns true.
+.TP
+.B usex\fR \fI<USE flag>\fR \fI[true output]\fR \fI[false output]\fR \fI[true \
+suffix]\fR \fI[false suffix]
+If USE flag is set, echo [true output][true suffix] (defaults to
+"yes"), otherwise echo [false output][false suffix] (defaults to
+"no"). The usex helper is available beginning with \fBEAPI 5\fR.
+.TP
+.B use_with\fR \fI<USE item>\fR \fI[configure name]\fR \fI[configure opt]
Useful for creating custom options to pass to a configure script. If \fIUSE
item\fR is in the \fBUSE\fR variable and a \fIconfigure opt\fR is specified,
-then the string \fI\-\-with\-[configure name]=[configure opt]\fR will be echoed.
-If \fIconfigure opt\fR is not specified, then just \fI\-\-with\-[configure
-name]\fR will be echoed. If \fIUSE item\fR is not in the \fBUSE\fR variable,
-then the string \fI\-\-without\-[configure name]\fR will be echoed. If
-\fIconfigure name\fR is not specified, then \fIUSE item\fR will be used in
-its place. Beginning with \fBEAPI 4\fR, an empty \fIconfigure opt\fR argument
-is recognized. In \fBEAPI 3\fR and earlier, an empty \fIconfigure opt\fR
-argument is treated as if it weren't provided.
+then the string \fI\-\-with\-[configure name]=[configure opt]\fR will be
+echoed. If \fIconfigure opt\fR is not specified, then just
+\fI\-\-with\-[configure name]\fR will be echoed. If \fIUSE item\fR is not in
+the \fBUSE\fR variable, then the string \fI\-\-without\-[configure name]\fR
+will be echoed. If \fIconfigure name\fR is not specified, then \fIUSE item\fR
+will be used in its place. Beginning with \fBEAPI 4\fR, an empty \fIconfigure
+opt\fR argument is recognized. In \fBEAPI 3\fR and earlier, an empty
+\fIconfigure opt\fR argument is treated as if it weren't provided.
.RS
.TP
.I Examples:
@@ -850,88 +1045,96 @@ myconf=$(use_with sdl SDL all\-plugins)
.fi
.RE
.TP
-\fBuse_enable\fR \fI<USE item>\fR \fI[configure name]\fR \fI[configure opt]\fR
+.B use_enable\fR \fI<USE item>\fR \fI[configure name]\fR \fI[configure opt]
Same as \fBuse_with\fR above, except that the configure options are
-\fI\-\-enable\-\fR instead of \fI\-\-with\-\fR and \fI\-\-disable\-\fR instead of
-\fI\-\-without\-\fR. Beginning with \fBEAPI 4\fR, an empty \fIconfigure opt\fR
-argument is recognized. In \fBEAPI 3\fR and earlier, an empty
+\fI\-\-enable\-\fR instead of \fI\-\-with\-\fR and \fI\-\-disable\-\fR instead
+of \fI\-\-without\-\fR. Beginning with \fBEAPI 4\fR, an empty \fIconfigure
+opt\fR argument is recognized. In \fBEAPI 3\fR and earlier, an empty
\fIconfigure opt\fR argument is treated as if it weren't provided.
.TP
-\fBhasv\fR \fI<item>\fR \fI<item list>\fR
-If \fIitem\fR is in \fIitem list\fR, then \fIitem\fR is echoed and \fBhasv\fR
-returns 0. Otherwise, nothing is echoed and 1 is returned. As indicated with
-use, there is a non\-echoing version \fBhas\fR. Please use \fBhas\fR in all
-places where output is to be disregarded. Never use the output for calculation.
+.B has\fR \fI<item>\fR \fI<item list>
+If \fIitem\fR is in \fIitem list\fR, then \fBhas\fR returns
+0. Otherwise, 1 is returned. There is another version, \fBhasv\fR, that
+will conditionally echo \fIitem\fR.
.br
The \fIitem list\fR is delimited by the \fIIFS\fR variable. This variable
has a default value of ' ', or a space. It is a \fBbash\fR(1) setting.
.TP
-\fBhas_version\fR \fI<category/package\-version>\fR
+.B hasv\fR \fI<item>\fR \fI<item list>
+Like \fBhas\fR, but also echoes \fIitem\fR when \fBhas\fR returns true.
+.TP
+.B has_version\fR \fI[\-\-host\-root]\fR \fI<category/package\-version>
Check to see if \fIcategory/package\-version\fR is installed on the system.
The parameter accepts all values that are acceptable in the \fBDEPEND\fR
variable. The function returns 0 if \fIcategory/package\-version\fR is
-installed, 1 otherwise.
+installed, 1 otherwise. Beginning with \fBEAPI 5\fR, the
+\-\-host\-root option may be used in order to cause the query
+to apply to the host root instead of ${ROOT}.
.TP
-\fBbest_version\fR \fI<package name>\fR
+.B best_version\fR \fI[\-\-host\-root]\fR \fI<package name>
This function will look up \fIpackage name\fR in the database of currently
installed programs and echo the "best version" of the package that is
-currently installed.
-.RS
-.TP
-.I Example:
-VERINS="$(best_version net\-ftp/glftpd)"
-.br
-(VERINS now has the value "net\-ftp/glftpd\-1.27" if glftpd\-1.27 is installed)
-.RE
-.SH "HELPER FUNCTIONS: HOOKS"
+currently installed. Beginning with \fBEAPI 5\fR, the
+\-\-host\-root option may be used in order to cause the query
+to apply to the host root instead of ${ROOT}.
+
+Example:
+.nf
+ VERINS="$(best_version net\-ftp/glftpd)"
+ (VERINS now has the value "net\-ftp/glftpd\-1.27" if glftpd\-1.27 is \
+ installed)
+.fi
+
+.SS "Hooks:"
.TP
-\fBregister_die_hook\fR \fI[list of function names]\fR
+.B register_die_hook\fR \fI[list of function names]
Register one or more functions to call when the ebuild fails for any reason,
including file collisions with other packages.
.TP
-\fBregister_success_hook\fR \fI[list of function names]\fR
+.B register_success_hook\fR \fI[list of function names]
Register one or more functions to call when the ebuild builds and/or installs
successfully.
+
+.SS "Output:"
.TP
-.RE
-.SH "HELPER FUNCTIONS: OUTPUT"
-.TP
-\fBeinfo\fR \fI"disposable message"\fR
+.B einfo\fR \fI"disposable message"
Same as \fBelog\fR, but should be used when the message isn't important to the
user (like progress or status messages during the build process).
.TP
-\fBelog\fR \fI"informative message"\fR
+.B elog\fR \fI"informative message"
If you need to display a message that you wish the user to read and take
notice of, then use \fBelog\fR. It works just like \fBecho\fR(1), but
adds a little more to the output so as to catch the user's eye. The message
will also be logged by portage for later review.
.TP
-\fBewarn\fR \fI"warning message"\fR
+.B ewarn\fR \fI"warning message"
Same as \fBeinfo\fR, but should be used when showing a warning to the user.
.TP
-\fBeqawarn\fR \fI"QA warning message"\fR
+.B eqawarn\fR \fI"QA warning message"
Same as \fBeinfo\fR, but should be used when showing a QA warning to the user.
.TP
-\fBeerror\fR \fI"error message"\fR
+.B eerror\fR \fI"error message"
Same as \fBeinfo\fR, but should be used when showing an error to the user.
.TP
-\fBebegin\fR \fI"helpful message"\fR
+.B ebegin\fR \fI"helpful message"
Like \fBeinfo\fR, we output a \fIhelpful message\fR and then hint that the
following operation may take some time to complete. Once the task is
finished, you need to call \fBeend\fR.
.TP
-\fBeend\fR \fI<status>\fR \fI["error message"]\fR
+.B eend\fR \fI<status>\fR \fI["error message"]
Followup the \fBebegin\fR message with an appropriate "OK" or "!!" (for
errors) marker. If \fIstatus\fR is non\-zero, then the additional \fIerror
message\fR is displayed.
-.SH "HELPER FUNCTIONS: UNPACK"
+
+.SS "Unpack:"
.TP
-\fBunpack\fR \fI<source>\fR \fI[list of more sources]\fR
+.B unpack\fR \fI<source>\fR \fI[list of more sources]
This function uncompresses and/or untars a list of sources into the current
directory. The function will append \fIsource\fR to the \fBDISTDIR\fR variable.
-.SH "HELPER FUNCTIONS: COMPILE"
+
+.SS "Compile:"
.TP
-\fBeconf\fR \fI[configure options]\fR
+.B econf\fR \fI[configure options]
This is used as a replacement for configure. Performs:
.nf
${\fIECONF_SOURCE\fR:-.}/configure \\
@@ -958,21 +1161,26 @@ Beginning with \fBEAPI 4\fR, \fBeconf\fR adds
\fI\-\-disable\-dependency\-tracking\fR to the arguments if the
string \fIdisable\-dependency\-tracking\fR occurs in the output
of \fIconfigure \-\-help\fR.
+Beginning with \fBEAPI 5\fR, \fBeconf\fR adds
+\fIdisable\-silent\-rules\fR to the arguments if the
+string \fIdisable\-silent\-rules\fR occurs in the output
+of \fIconfigure \-\-help\fR.
.TP
-\fBemake\fR \fI[make options]\fR
+.B emake\fR \fI[make options]
This is used as a replacement for make. Performs 'make ${MAKEOPTS}
\fImake options\fR' (as set in make.globals), default is MAKEOPTS="\-j2".
-\fB***warning***\fR
+.B ***WARNING***
.br
if you are going to use \fBemake\fR, make sure your build is happy with
parallel makes (make \-j2). It should be tested thoroughly as parallel
makes are notorious for failing _sometimes_ but not always. If you determine
that your package fails to build in parallel, and you are unable to resolve
the issue, then you should run '\fBemake\fR \-j1' instead of 'make'.
-.SH "HELPER FUNCTIONS: INSTALL"
+
+.SS "Install:"
.TP
-\fBeinstall\fR \fI[make options]\fR
+.B einstall\fR \fI[make options]
This is used as a replacement for make install. Performs:
.nf
make \\
@@ -1029,11 +1237,11 @@ Strips all executable files of debugging symboles. This includes libraries.
.RE
.TP
-\fBprepinfo\fR \fI[dir]\fR
+.B prepinfo\fR \fI[dir]
.TP
-\fBprepman\fR \fI[dir]\fR
+.B prepman\fR \fI[dir]
.TP
-\fBprepstrip\fR \fI[dir]\fR
+.B prepstrip\fR \fI[dir]
.PD 1
Similar to the \fBprepall\fR functions, these are subtle in their differences.
.RS
@@ -1055,7 +1263,7 @@ multiple directories.
.RE
.PD 1
.TP
-\fBdocompress\fR \fI[\-x] <path> [list of more paths]\fR
+.B docompress\fR \fI[\-x] <path> [list of more paths]
.RS
Beginning with \fBEAPI 4\fR, the \fBdocompress\fR helper is used to
manage lists of files to be included or excluded from optional compression.
@@ -1094,7 +1302,7 @@ If the item does not exist, it is ignored.
.RE
.RE
.TP
-\fBdosed\fR \fI"s:orig:change:g" <filename>\fR
+.B dosed\fR \fI"s:orig:change:g" <filename>
Beginning with \fBEAPI 4\fR, the \fBdosed\fR helper no longer exists. Ebuilds
should call \fBsed(1)\fR directly (and assume that it is GNU sed).
@@ -1105,66 +1313,66 @@ that this expression does \fBNOT\fR use the offset prefix.
.BR 'dosed\ "s:/usr/local:/usr:g"\ /usr/bin/some\-script'
runs sed on ${ED}/usr/bin/some\-script
.TP
-\fBdodir\fR \fI<path> [more paths]\fR
+.B dodir\fR \fI<path> [more paths]
Creates directories inside of ${ED}.
.br
.BR 'dodir\ /usr/lib/apache'
creates ${ED}/usr/lib/apache. Note that the do* functions will run
\fBdodir\fR for you.
.TP
-\fBdiropts\fR \fI[options for install(1)]\fR
+.B diropts\fR \fI[options for install(1)]
Can be used to define options for the install function used in
\fBdodir\fR. The default is \fI\-m0755\fR.
.TP
-\fBinto\fR \fI<path>\fR
+.B into\fR \fI<path>
Sets the root (\fIDESTTREE\fR) for other functions like \fBdobin\fR,
\fBdosbin\fR, \fBdoman\fR, \fBdoinfo\fR, \fBdolib\fR.
.br
The default root is /usr.
.TP
-\fBkeepdir\fR \fI<path> [more paths]\fR
+.B keepdir\fR \fI<path> [more paths]
Tells portage to leave directories behind even if they're empty. Functions
the same as \fBdodir\fR.
.TP
-\fBdobin\fR \fI<binary> [list of more binaries]\fR
+.B dobin\fR \fI<binary> [list of more binaries]
Installs a \fIbinary\fR or a list of binaries into \fIDESTTREE\fR/bin.
Creates all necessary dirs.
.TP
-\fBdosbin\fR \fI<binary> [list of more binaries]\fR
+.B dosbin\fR \fI<binary> [list of more binaries]
Installs a \fIbinary\fR or a list of binaries into \fIDESTTREE\fR/sbin.
Creates all necessary dirs.
.TP
-\fBdoinitd\fR \fI<init.d script> [list of more init.d scripts]\fR
+.B doinitd\fR \fI<init.d script> [list of more init.d scripts]
Install Gentoo \fIinit.d scripts\fR. They will be installed into the
correct location for Gentoo init.d scripts (/etc/init.d/). Creates all
necessary dirs.
.TP
-\fBdoconfd\fR \fI<conf.d file> [list of more conf.d file]\fR
+.B doconfd\fR \fI<conf.d file> [list of more conf.d file]
Install Gentoo \fIconf.d files\fR. They will be installed into the
correct location for Gentoo conf.d files (/etc/conf.d/). Creates all
necessary dirs.
.TP
-\fBdoenvd\fR \fI<env.d entry> [list of more env.d entries]\fR
+.B doenvd\fR \fI<env.d entry> [list of more env.d entries]
Install Gentoo \fIenv.d entries\fR. They will be installed into the
correct location for Gentoo env.d entries (/etc/env.d/). Creates all
necessary dirs.
.PD 0
.TP
-\fBdolib\fR \fI<library>\fR \fI[list of more libraries]\fR
+.B dolib\fR \fI<library>\fR \fI[list of more libraries]
.TP
-\fBdolib.a\fR \fI<library>\fR \fI[list of more libraries]\fR
+.B dolib.a\fR \fI<library>\fR \fI[list of more libraries]
.TP
-\fBdolib.so\fR \fI<library>\fR \fI[list of more libraries]\fR
+.B dolib.so\fR \fI<library>\fR \fI[list of more libraries]
.PD 1
Installs a library or a list of libraries into \fIDESTTREE\fR/lib.
Creates all necessary dirs.
.TP
-\fBlibopts\fR \fI[options for install(1)]\fR
+.B libopts\fR \fI[options for install(1)]
Can be used to define options for the install function used in
the \fBdolib\fR functions. The default is \fI\-m0644\fR.
.TP
-\fBdoman\fR \fI[\-i18n=<locale>]\fR \fI<man\-page> [list of more man\-pages]\fR
+.B doman\fR \fI[\-i18n=<locale>]\fR \fI<man\-page> [list of more man\-pages]
Installs manual\-pages into /usr/share/man/man[0\-9n] depending on the
manual file ending. The files are compressed if they are not already. You
can specify locale\-specific manpages with the \fI\-i18n\fR option. Then the
@@ -1177,135 +1385,186 @@ foo.\fI<locale>\fR.1 will be installed as
/usr/share/man/\fI<locale>\fR/man1/foo.1. Beginning with \fBEAPI 4\fR,
the \fI\-i18n\fR option takes precedence over the locale suffix of the
file name.
+
.PD 0
.TP
-\fBdohard\fR \fI<filename> <linkname>\fR
+.B dohard\fR \fI<filename> <linkname>
Beginning with \fBEAPI 4\fR, the \fBdohard\fR helper no longer exists. Ebuilds
should call \fBln(1)\fR directly.
.TP
-\fBdosym\fR \fI<filename> <linkname>\fR
+.B dosym\fR \fI<filename> <linkname>
.PD 1
Performs the ln command to create a symlink.
.TP
-\fBdohtml\fR \fI [\-a filetypes] [\-r] [\-x list\-of\-dirs\-to\-ignore] [list\-of\-files\-and\-dirs]\fR
+.B doheader\fR \fI[\-r] <file> [list of more files]
+Installs the given header files into /usr/include/, by default
+with file mode \fI0644\fR (this can be overridden with the
+\fBinsopts\fR function). Setting \-r sets recursive. The
+\fBdoheader\fR helper is available beginning with \fBEAPI 5\fR.
+.TP
+.B dohtml\fR \fI [\-a filetypes] [\-r] [\-x list\-of\-dirs\-to\-ignore] \
+[list\-of\-files\-and\-dirs]
Installs the files in the list of files (space\-separated list) into
-/usr/share/doc/${PF}/html provided the file ends in .htm, .html, .css, .js, .gif, .jpeg, .jpg, or .png.
+/usr/share/doc/${PF}/html provided the file ends in .htm, .html, .css, .js, \
+.gif, .jpeg, .jpg, or .png.
Setting \fI\-a\fR limits what types of files will be included,
\fI\-A\fR appends to the default list, setting \fI\-x\fR sets which dirs to
-exclude (CVS excluded by default), \fI\-p\fR sets a document prefix, \fI\-r\fR sets recursive.
+exclude (CVS excluded by default), \fI\-p\fR sets a document prefix,
+\fI\-r\fR sets recursive.
.TP
-\fBdoinfo\fR \fI<info\-file> [list of more info\-files]\fR
+.B doinfo\fR \fI<info\-file> [list of more info\-files]
Installs info\-pages into \fIDESTDIR\fR/info. Files are automatically
gzipped. Creates all necessary dirs.
.TP
-\fBdomo\fR \fI<locale\-file> [list of more locale\-files] \fR
+.B domo\fR \fI<locale\-file> [list of more locale\-files]
Installs locale\-files into \fIDESTDIR\fR/usr/share/locale/[LANG]
depending on local\-file's ending. Creates all necessary dirs.
.PD 0
.TP
-\fBfowners\fR \fI<permissions> <file> [files]\fR
+.B fowners\fR \fI<permissions> <file> [files]
.TP
-\fBfperms\fR \fI<permissions> <file> [files]\fR
+.B fperms\fR \fI<permissions> <file> [files]
.PD 1
Performs chown (\fBfowners\fR) or chmod (\fBfperms\fR), applying
\fIpermissions\fR to \fIfiles\fR.
.TP
-\fBinsinto\fR \fI[path]\fR
+.B insinto\fR \fI[path]
Sets the destination path for the \fBdoins\fR function.
.br
The default path is /.
.TP
-\fBinsopts\fR \fI[options for install(1)]\fR
+.B insopts\fR \fI[options for install(1)]
Can be used to define options for the install function used in
\fBdoins\fR. The default is \fI\-m0644\fR.
.TP
-\fBdoins\fR \fI[\-r] <file> [list of more files]\fR
+.B doins\fR \fI[\-r] <file> [list of more files]
Installs files into the path controlled by \fBinsinto\fR. This function
uses \fBinstall\fR(1). Creates all necessary dirs.
Setting \-r sets recursive. Beginning with \fBEAPI 4\fR, both
\fBdoins\fR and \fBnewins\fR preserve symlinks. In \fBEAPI 3\fR and
earlier, symlinks are dereferenced rather than preserved.
.TP
-\fBexeinto\fR \fI[path]\fR
+.B exeinto\fR \fI[path]
Sets the destination path for the \fBdoexe\fR function.
.br
The default path is /.
.TP
-\fBexeopts\fR \fI[options for install(1)]\fR
+.B exeopts\fR \fI[options for install(1)]
Can be used to define options for the install function used in \fBdoexe\fR.
The default is \fI\-m0755\fR.
.TP
-\fBdoexe\fR \fI<executable> [list of more executables]\fR
+.B doexe\fR \fI<executable> [list of more executables]
Installs executables into the path controlled by \fBexeinto\fR. This function
uses \fBinstall\fR(1). Creates all necessary dirs.
.TP
-\fBdocinto\fR \fI[path]\fR
+.B docinto\fR \fI[path]
Sets the subdir used by \fBdodoc\fR and \fBdohtml\fR
when installing into the document tree
(based in /usr/share/doc/${PF}/). Default is no subdir, or just "".
.TP
-\fBdodoc\fR \fI[-r] <document> [list of more documents]\fR
-Installs a document or a list of documents into /usr/share/doc/${PF}/\fI<docinto path>\fR.
+.B dodoc\fR \fI[-r] <document> [list of more documents]
+Installs a document or a list of documents into
+/usr/share/doc/${PF}/\fI<docinto path>\fR.
Documents are marked for compression. Creates all necessary dirs.
Beginning with \fBEAPI 4\fR, there is support for recursion, enabled by the
new \fI\-r\fR option.
.PD 0
.TP
-\fBnewbin\fR \fI<old file> <new filename>\fR
+.B newbin\fR \fI<old file> <new filename>
.TP
-\fBnewsbin\fR \fI<old file> <new filename>\fR
+.B newsbin\fR \fI<old file> <new filename>
.TP
-\fBnewinitd\fR \fI<old file> <new filename>\fR
+.B newinitd\fR \fI<old file> <new filename>
.TP
-\fBnewconfd\fR \fI<old file> <new filename>\fR
+.B newconfd\fR \fI<old file> <new filename>
.TP
-\fBnewenvd\fR \fI<old file> <new filename>\fR
+.B newenvd\fR \fI<old file> <new filename>
.TP
-\fBnewlib.so\fR \fI<old file> <new filename>\fR
+.B newlib.so\fR \fI<old file> <new filename>
.TP
-\fBnewlib.a\fR \fI<old file> <new filename>\fR
+.B newlib.a\fR \fI<old file> <new filename>
.TP
-\fBnewman\fR \fI<old file> <new filename>\fR
+.B newman\fR \fI<old file> <new filename>
.TP
-\fBnewinfo\fR \fI<old file> <new filename>\fR
+.B newins\fR \fI<old file> <new filename>
.TP
-\fBnewins\fR \fI<old file> <new filename>\fR
+.B newexe\fR \fI<old file> <new filename>
.TP
-\fBnewexe\fR \fI<old file> <new filename>\fR
-.TP
-\fBnewdoc\fR \fI<old file> <new filename>\fR
+.B newdoc\fR \fI<old file> <new filename>
.PD 1
All these functions act like the do* functions, but they only work with one
file and the file is installed as \fI[new filename]\fR.
-.SH "REPORTING BUGS"
-Please report bugs via http://bugs.gentoo.org/
-.SH "AUTHORS"
+Beginning with \fBEAPI 5\fR, standard input is read when the
+first parameter is \- (a hyphen).
+
+.SH "EXAMPLES"
+.DS
.nf
-Achim Gottinger <achim@gentoo.org>
-Mark Guertin <gerk@gentoo.org>
-Nicholas Jones <carpaski@gentoo.org>
-Mike Frysinger <vapier@gentoo.org>
-Arfrever Frehtes Taifersar Arahesis <Arfrever.FTA@gmail.com>
-Fabian Groffen <grobian@gentoo.org>
+# Copyright 1999\-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: $
+
+EAPI="5"
+
+inherit some_eclass another_eclass
+
+DESCRIPTION="Super\-useful stream editor (sed)"
+HOMEPAGE="http://www.gnu.org/software/sed/sed.html"
+SRC_URI="ftp://alpha.gnu.org/pub/gnu/${PN}/${P}.tar.gz"
+
+LICENSE="GPL\-2"
+SLOT="0"
+KEYWORDS="~x86"
+IUSE=""
+
+RDEPEND=""
+DEPEND="nls? ( sys-devel/gettext )"
+
+src_configure() {
+ econf \\
+ \-\-bindir="${EPREFIX}"/bin
+}
+
+src_install() {
+ emake DESTDIR="${D}" install
+ dodoc NEWS README* THANKS AUTHORS BUGS ChangeLog
+}
.fi
+.DE
+
.SH "FILES"
.TP
-The \fI/usr/sbin/ebuild.sh\fR script.
+The \fI/usr/lib/portage/bin/ebuild.sh\fR script.
.TP
The helper apps in \fI/usr/lib/portage/bin\fR.
.TP
-.B /etc/make.conf
-Contains variables for the build\-process and overwrites those in make.defaults.
+.B /etc/portage/make.conf
+Contains variables for the build\-process and overwrites those in
+make.defaults.
.TP
.B /usr/share/portage/config/make.globals
Contains the default variables for the build\-process, you should edit
-\fI/etc/make.conf\fR instead.
+\fI/etc/portage/make.conf\fR instead.
.TP
.B /etc/portage/color.map
Contains variables customizing colors.
+
.SH "SEE ALSO"
.BR ebuild (1),
.BR make.conf (5),
.BR color.map (5)
+
+.SH "REPORTING BUGS"
+Please report bugs via http://bugs.gentoo.org/
+
+.SH "AUTHORS"
+.nf
+Achim Gottinger <achim@gentoo.org>
+Mark Guertin <gerk@gentoo.org>
+Nicholas Jones <carpaski@gentoo.org>
+Mike Frysinger <vapier@gentoo.org>
+Arfrever Frehtes Taifersar Arahesis <arfrever@apache.org>
+Fabian Groffen <grobian@gentoo.org>
+.fi
diff --git a/man/egencache.1 b/man/egencache.1
index 909459502..f71feb371 100644
--- a/man/egencache.1
+++ b/man/egencache.1
@@ -1,4 +1,4 @@
-.TH "EGENCACHE" "1" "Oct 2010" "Portage VERSION" "Portage"
+.TH "EGENCACHE" "1" "Jul 2013" "Portage VERSION" "Portage"
.SH "NAME"
egencache \- generate metadata cache for ebuild repositories
.SH "SYNOPSIS"
@@ -6,12 +6,13 @@ egencache \- generate metadata cache for ebuild repositories
.I [options] --update [ATOM]\fR...
.SH "DESCRIPTION"
The egencache program generates metadata cache for ebuild repositories and
-stores it in the \fImetadata/cache/\fR directory within the repository itself,
-for distribution.
+stores it in the \fImetadata/md5\-cache/\fR directory within the repository
+itself, for distribution.
.SH ACTIONS
.TP
.BR "\-\-update [ATOM] ... "
-Update the \fImetadata/cache/\fR directory (generate metadata as necessary).
+Update the \fImetadata/md5\-cache/\fR directory (generate metadata as
+necessary).
If no package atoms are specified then all will be updated. See ebuild(5)
for the details on package atom syntax.
.TP
@@ -20,6 +21,12 @@ Update the ChangeLog files from SCM logs (supported only in git repos).
.TP
.BR "\-\-update\-use\-local\-desc"
Update the \fIprofiles/use.local.desc\fR file from metadata.xml.
+.TP
+.BR "\-\-update\-manifests"
+Update manifest files, and sign them if signing is enabled. This supports
+parallelization if enabled via the \-\-jobs option. The \-\-thin\-manifests
+and \-\-sign\-manifests options may be used to manually override layout.conf
+settings.
.SH OPTIONS
.TP
.BR "\-\-cache\-dir=CACHE_DIR"
@@ -34,6 +41,12 @@ Location of portage config files.
.br
Defaults to /.
.TP
+.BR "\-\-gpg\-dir"
+Override the PORTAGE_GPG_DIR variable.
+.TP
+.BR "\-\-gpg\-key"
+Override the PORTAGE_GPG_KEY variable.
+.TP
.BR "\-\-ignore-default-opts"
Causes \fIEGENCACHE_DEFAULT_OPTS\fR to be ignored.
.TP
@@ -45,21 +58,24 @@ Also see the related \fB\-\-load\-average\fR option.
Specifies that maximum load allowed when spawning multiple jobs.
.TP
.BR "\-\-portdir=PORTDIR"
-Override the portage tree location.
+Override the PORTDIR variable. This option is deprecated in favor of
+\-\-repositories\-configuration option.
.TP
.BR "\-\-portdir\-overlay=PORTDIR_OVERLAY"
-Override the PORTDIR_OVERLAY variable (requires that
-\-\-repo is also specified).
+Override the PORTDIR_OVERLAY variable. This option is deprecated in favor of
+\-\-repositories\-configuration option.
.TP
.BR "\-\-preserve\-comments"
Preserve the comments found in the output use.local.desc file. This requires
the output file to exist before egencache is called.
.TP
.BR "\-\-repo=REPO"
-Name of the repo to operate on (default repo is located at \fBPORTDIR\fR).
-The name should correspond the value of a \fBrepo_name\fR entry (see
-\fBportage\fR(5)) from one of the repositories that is configured via the
-\fBPORTDIR\fR or \fBPORTDIR_OVERLAY\fR variables (see \fBmake.conf\fR(5)).
+Name of the repo to operate on. The name should correspond the value of
+a \fBrepo_name\fR entry (see \fBportage\fR(5)) from one of the repositories.
+.TP
+.BR "\-\-repositories\-configuration=REPOSITORIES_CONFIGURATION"
+Override configuration of repositories. The argument of this option has
+the same format as repos.conf (see \fBportage\fR(5)).
.TP
.BR "\-\-rsync"
When used together with the \fB\-\-update\fR action, this enables a workaround
@@ -72,6 +88,15 @@ This option should only be needed for distribution via something like
more thorough mechanism which allows it to detect changed inode numbers
(described in \fIracy-git.txt\fR in the git technical docs).
.TP
+.BR "\-\-sign\-manifests< y | n >"
+Manually override layout.conf sign-manifests setting.
+.TP
+.BR "\-\-strict\-manifests< y | n >"
+Manually override "strict" FEATURES setting.
+.TP
+.BR "\-\-thin\-manifests< y | n >"
+Manually override layout.conf thin-manifests setting.
+.TP
.BR "\-\-tolerant"
Exit successfully if only minor errors occurred, such as skipped cache
updates due to ebuilds that either fail to source or are not sourced
@@ -87,10 +112,10 @@ contains will be added to the beginning of the command line on every
invocation. These options will not be added if the
\fB\-\-ignore-default\-opts\fR option is specified.
.SH "BUGS"
-There are significant limitations associated with the metadata
-cache format that is distributed in the \fImetadata/cache/\fR directory
-of the repository. These limitations are related to the cache validation
-mechanism. Currently, the validation mechanism involves comparison of
+Prior to portage-2.1.11.32, the 'pms' cache format was enabled by default.
+This 'pms' format, which is distributed in the \fImetadata/cache/\fR
+directory of the repository, has significant limitations related to the
+cache validation mechanism which involves comparison of
a cache entry mtime to the mtime of the corresponding \fBebuild(5)\fR. This
mechanism is unreliable in cases when eclass changes result in metadata
changes, since no information about eclass state is available in the cache.
@@ -102,11 +127,21 @@ implemented in \fBemerge\fR(1) \fB\-\-sync\fR which updates ebuild mtimes
to match their corresponding cache entries (except for ebuilds that are
modified relative to HEAD).
-In order to solve the above problems, a future extension
-to the cache format will include additional
-validation data in the form of digests for both the ebuild
-and its inherited eclasses. Until the
-cache format has been extended in this way, it is necessary to enable
+In order to solve the above problems, the newer 'md5-dict' format has been
+enabled by default since portage-2.1.11.32. This format is distributed in
+the \fImetadata/md5-cache/\fR directory of the repository, and includes
+additional validation data in the form of digests for both the ebuild
+and its inherited eclasses. \fBWARNING:\fR Portage versions prior to
+portage-2.1.11.14 will \fBNOT\fR recognize the 'md5-dict' format unless it is
+explicitly listed in \fImetadata/layout.conf\fR (refer to \fBportage\fR(5)
+for example usage).
+
+\fBWARNING:\fR For backward compatibility, the obsolete 'pms' cache format
+will still be generated by default if the \fImetadata/cache/\fR directory
+exists in the repository. It can also be explicitly enabled via the
+cache\-formats setting in \fImetadata/layout.conf\fR (refer to \fBportage\fR(5)
+for example usage). If the 'pms' cache format is enabled and the 'md5-dict'
+format is not enabled, then it is necessary to enable
\fBmetadata-transfer\fR in \fBFEATURES\fR (see \fBmake.conf(5)\fR).
This causes intermediate cache (in a different format that includes
eclass state) to be generated inside the directory which is configurable
@@ -116,10 +151,11 @@ Please report bugs via http://bugs.gentoo.org/
.SH "AUTHORS"
.nf
Zac Medico <zmedico@gentoo.org>
+Arfrever Frehtes Taifersar Arahesis <arfrever@apache.org>
.fi
.SH "FILES"
.TP
-.B /etc/make.conf
+.B /etc/portage/make.conf
Contains variables.
.SH "SEE ALSO"
.BR emerge (1),
diff --git a/man/emaint.1 b/man/emaint.1
index c588a0bfe..83562998f 100644
--- a/man/emaint.1
+++ b/man/emaint.1
@@ -26,9 +26,10 @@ Discard no longer installed config tracker entries.
Discard merge lists saved for the \fBemerge\fR(1) \fB--resume\fR action.
.TP
.BR logs
-Clean out old logs from the \fBPORT_LOGDIR\fR using the command \fBPORT_LOGDIR_CLEAN\fR
-See the \fBmake.conf\fR(5) man page for additional information as well as enabling the
-\fB'clean-logs'\fR feature in emerge to do this automatically.
+Clean out old logs from the \fBPORT_LOGDIR\fR using the command
+\fBPORT_LOGDIR_CLEAN\fR
+See the \fBmake.conf\fR(5) man page for additional information as well as
+enabling the \fB'clean-logs'\fR feature in emerge to do this automatically.
.TP
.BR movebin
Perform package move updates for binary packages located in \fBPKGDIR\fR.
@@ -38,7 +39,7 @@ Perform package move updates for installed packages.
.TP
.BR world
Fix problems in the \fIworld\fR file.
-.SH DEFAULT OPTIONS
+.SH DEFAULT OPTIONS
.TP
.B \-c, \-\-check
Check for any problems that may exist. (all commands)
@@ -51,10 +52,12 @@ Fix any problems that may exist. (not all commands)
Cleans the logs from \fBPORT_LOGDIR\fR (logs command only)
.TP
.B \-p, \-\-pretend
-Sets pretend mode (same as \-c, \-\-check) for use with the \-C, \-\-clean OPTION (logs command only)
+Sets pretend mode (same as \-c, \-\-check) for use with the \-C, \-\-clean
+OPTION (logs command only)
.TP
.B \-t NUM, \-\-time NUM
-Changes the minimum age \fBNUM\fR (in days) of the logs to be listed or deleted. (logs command only)
+Changes the minimum age \fBNUM\fR (in days) of the logs to be listed or
+deleted. (logs command only)
.SH "REPORTING BUGS"
Please report bugs via http://bugs.gentoo.org/
.SH AUTHORS
diff --git a/man/emerge.1 b/man/emerge.1
index 7aa46226c..abb0ed898 100644
--- a/man/emerge.1
+++ b/man/emerge.1
@@ -1,10 +1,11 @@
-.TH "EMERGE" "1" "Jun 2012" "Portage VERSION" "Portage"
+.TH "EMERGE" "1" "Mar 2014" "Portage VERSION" "Portage"
.SH "NAME"
emerge \- Command\-line interface to the Portage system
.SH "SYNOPSIS"
.TP
.BR emerge
-[\fIoptions\fR] [\fIaction\fR] [\fIebuild\fR | \fItbz2file\fR | \fIfile\fR | \fI@set\fR | \fIatom\fR] ...
+[\fIoptions\fR] [\fIaction\fR] [\fIebuild\fR | \fItbz2file\fR | \fIfile\fR |
+\fI@set\fR | \fIatom\fR] ...
.TP
.BR emerge
\fB\-\-sync\fR | \fB\-\-version\fR
@@ -16,7 +17,7 @@ emerge \- Command\-line interface to the Portage system
\fB\-\-search\fR \fIsomestring\fR
.TP
.BR emerge
-\fB\-\-help\fR [\fB\-\-verbose\fR]
+\fB\-\-help\fR
.SH "DESCRIPTION"
\fBemerge\fR is the definitive command\-line interface to the Portage
system. It is primarily used for installing packages, and \fBemerge\fR
@@ -48,7 +49,7 @@ so this syntax shouldn't be used.
.TP
.BR tbz2file
A \fItbz2file\fR must be a valid .tbz2 created with \fBebuild
-<package>\-<version>.ebuild package\fR or \fBemerge \-\-buildpkg
+<package>\-<version>.ebuild package\fR or \fBemerge \-\-buildpkg
[category/]<package>\fR or \fBquickpkg /var/db/pkg/<category>/<package>\fR.
.TP
.BR file
@@ -72,20 +73,22 @@ on the current configuration. The default set configuration is located
in the \fB/usr/share/portage/config/sets\fR directory.
User sets may be created by placing files in the \fB/etc/portage/sets/\fR
directory (see \fBportage\fR(5)). Note that a \fIset\fR
-is generally used in conjunction with \fB\-\-update\fR. When used as
+is generally used in conjunction with \fB\-\-update\fR. When used as
arguments to \fBemerge\fR sets have to be prefixed with \fB@\fR to be
recognized. Use the \fB\-\-list\-sets\fR action to display a list of
available package sets.
.TP
.BR atom
-An \fIatom\fR describes bounds on a package that you wish to install.
+An \fIatom\fR describes bounds on a package that you wish to install.
\fISee ebuild(5) for the details on atom syntax.\fR For example,
-\fB>=dev\-lang/python\-2.2.1\-r2\fR matches the latest available version of
-Python greater than or equal to 2.2.1\-r2. Similarly,
-\fB<dev\-lang/python\-2.0\fR matches the latest available version of Python
-before 2.0. Note that in many shells you will need to escape characters such
-as '<' and '='; use single\- or double\-quotes around the \fIatom\fR
-to get around escaping problems.
+\fB>=dev\-lang/python\-2.2.1\-r2\fR matches the latest available version of
+Python greater than or equal to 2.2.1\-r2. Similarly,
+\fB<dev\-lang/python\-2.0\fR matches the latest available version of Python
+before 2.0. Note that in many shells you will need to escape characters such
+as '<' and '='; use single\- or double\-quotes around the \fIatom\fR
+to get around escaping problems. You may also constrain an atom to match a
+specific \fBSLOT\fR by appending a colon and a \fBSLOT\fR. Example:
+\fBx11\-libs/qt:3\fR.
.SH "ACTIONS"
.TP
.BR "No action"
@@ -100,18 +103,20 @@ later updating.
.TP
.BR \-\-check\-news
Scan all repositories for relevant unread GLEP 42 news items, and display
-how many are found. See \fIhttp://www.gentoo.org/proj/en/glep/glep-0042.html\fR.
+how many are found. See
+\fIhttp://www.gentoo.org/proj/en/glep/glep-0042.html\fR.
.TP
.BR \-\-clean
Cleans up the system by examining the installed packages and removing older
-packages. This is accomplished by looking at each installed package and separating
-the installed versions by \fBslot\fR. Clean will \fBremove all but the most recently
-installed version in each \fbslot\fR. Clean should not remove unslotted packages.
-Note: Most recently installed means most \fBrecent\fR, not highest version.
+packages. This is accomplished by looking at each installed package and
+separating the installed versions by \fBslot\fR. Clean will \fBremove all but
+the most recently installed version in each \fbslot\fR. Clean should not
+remove unslotted packages. Note: Most recently installed means most
+\fBrecent\fR, not highest version.
.TP
.BR "\-\-config "
-Run package specific actions needed to be executed after the emerge process
-has completed. This usually entails configuration file setup or other similar
+Run package specific actions needed to be executed after the emerge process
+has completed. This usually entails configuration file setup or other similar
setups that the user may wish to run.
.TP
.BR "\-\-depclean (-c)"
@@ -161,21 +166,21 @@ updated more frequently than this man page; check it out if you
are having problems that this man page does not help resolve.
.TP
.BR \-\-info
-Produces a list of information to include in bug reports which aids the
-developers when fixing the reported problem. \fBPlease include this
-information when submitting a bug report.\fR Expanded output can be obtained
+Produces a list of information to include in bug reports which aids the
+developers when fixing the reported problem. \fBPlease include this
+information when submitting a bug report.\fR Expanded output can be obtained
with the \fI\-\-verbose\fR option.
.TP
.BR \-\-list\-sets
Displays a list of available package sets.
.TP
.BR \-\-metadata
-Transfers metadata cache from ${PORTDIR}/metadata/cache/ to
-/var/cache/edb/dep/ as is normally done on the
-tail end of an rsync update using \fBemerge \-\-sync\fR. This process
-populates the cache database that portage uses for pre-parsed lookups of
-package data. It does not populate cache for the overlays listed in
-PORTDIR_OVERLAY. In order to generate cache for overlays, use \fB\-\-regen\fR.
+Transfers pregenerated metadata cache from ${repository_location}/metadata/md5\-cache/
+to /var/cache/edb/dep/ as is normally done on the tail end of an rsync update using
+\fBemerge \-\-sync\fR. This process populates the cache database that Portage uses
+for pre-parsed lookups of package data. It does not populate cache for repositories
+not distributing pregenerated metadata cache. In order to generate cache for these
+repositories, use \fB\-\-regen\fR.
In versions of portage >=2.1.5 the \-\-metadata action is totally unnecessary
unless the user has enabled FEATURES="metadata-transfer" in \fBmake.conf\fR(5).
.TP
@@ -188,40 +193,45 @@ the emerge output of the next \-\-depclean run carefully! Use
\-\-depclean to avoid this issue.\fR
.TP
.BR \-\-regen
-Causes portage to check and update the dependency cache of all ebuilds in the
-portage tree. The cache is used to speed up searches and the building of
-dependency trees. This command is not recommended for rsync users as rsync
-updates the cache using server\-side caches. If you do not know the
-differences between a 'rsync user' and some other user, then you are a 'rsync
-user' :). Rsync users should simply run \fBemerge \-\-sync\fR to regenerate
-the cache. After a portage update, rsync users may find it convenient to run
-\fBemerge \-\-metadata\fR to rebuild the cache as portage does at the end of
+Causes portage to check and update the dependency cache of all ebuilds in the
+portage tree. The cache is used to speed up searches and the building of
+dependency trees. This command is not recommended for rsync users as rsync
+updates the cache using server\-side caches. If you do not know the
+differences between a 'rsync user' and some other user, then you are a 'rsync
+user' :). Rsync users should simply run \fBemerge \-\-sync\fR to regenerate
+the cache. After a portage update, rsync users may find it convenient to run
+\fBemerge \-\-metadata\fR to rebuild the cache as portage does at the end of
a sync operation. In order to specify parallel \fB\-\-regen\fR behavior, use
the \fB\-\-jobs\fR and \fB\-\-load\-average\fR options. If you would like to
generate and distribute cache for use by others, use \fBegencache\fR(1).
.TP
.BR "\-\-resume" (\fB\-r\fR)
Resumes the most recent merge list that has been aborted due to an error.
-This re\-uses the options that were given with the original
+This re\-uses the arguments and options that were given with the original
command that's being resumed, and the user may also provide
-additional options when calling \fB\-\-resume\fR.
+additional options when calling \fB\-\-resume\fR. It is an error to provide
+atoms or sets as arguments to \fB\-\-resume\fR, since the arguments from the
+resumed command are used instead.
Please note that this operation will only return an error on failure. If there
is nothing for portage to do, then portage will exit with a message and a
success condition. A resume list will persist until it has been completed in
entirety or until another aborted merge list replaces it. The resume history
is capable of storing two merge lists. After one resume list completes, it is
possible to invoke \-\-resume once again in order to resume an older list.
+The resume lists are stored in \fI/var/cache/edb/mtimedb\fR, and may be
+explicitly discarded by running `emaint \-\-fix cleanresume` (see
+\fBemaint\fR(1)).
.TP
.BR "\-\-search " (\fB\-s\fR)
Searches for matches of the supplied string in the portage tree.
-By default emerge uses a case-insensitive simple search, but you can
+By default emerge uses a case-insensitive simple search, but you can
enable a regular expression search by prefixing the search string with %.
-For example, \fBemerge \-\-search "%^kde"\fR searches for any package whose
-name starts with "kde"; \fBemerge \-\-search "%gcc$"\fR searches for any
-package that ends with "gcc"; \fBemerge \-\-search "office"\fR searches for
-any package that contains the word "office". If you want to include the
-category into the search string, prepend an @: \fBemerge \-\-search
-"%@^dev-java.*jdk"\fR. If you want to search the package descriptions as well,
+For example, \fBemerge \-\-search "%^kde"\fR searches for any package whose
+name starts with "kde"; \fBemerge \-\-search "%gcc$"\fR searches for any
+package that ends with "gcc"; \fBemerge \-\-search "office"\fR searches for
+any package that contains the word "office". If you want to include the
+category into the search string, prepend an @: \fBemerge \-\-search
+"%@^dev-java.*jdk"\fR. If you want to search the package descriptions as well,
use the \fB\-\-searchdesc\fR action.
.TP
.BR "\-\-searchdesc " (\fB\-S\fR)
@@ -230,20 +240,15 @@ the package name. \fBTake caution\fR as the descriptions are also
matched as regular expressions.
.TP
.BR \-\-sync
-This updates the portage tree that is located in the
-directory that the PORTDIR variable refers to (default
-location is /usr/portage). The SYNC variable specifies
-the remote URI from which files will be synchronized.
+Updates repositories, for which sync\-type and sync\-uri attributes are
+set in repos.conf. See \fBportage\fR(5) for more information.
The \fBPORTAGE_SYNC_STALE\fR variable configures
warnings that are shown when emerge \-\-sync has not
been executed recently.
\fBWARNING:\fR
-The emerge \-\-sync action will modify and/or delete
-files located inside the directory that the PORTDIR
-variable refers to (default location is /usr/portage).
-For more information, see the PORTDIR documentation in
-the make.conf(5) man page.
+The emerge \-\-sync action will revert local changes (e.g. modifications or
+additions of files) inside repositories synchronized using rsync.
\fBNOTE:\fR
The \fBemerge\-webrsync\fR program will download the entire
@@ -277,6 +282,21 @@ temporarily mask interactive packages. With default
configuration, this would result in an effective
\fBACCEPT_PROPERTIES\fR value of "* -interactive".
.TP
+.BR \-\-accept\-restrict=ACCEPT_RESTRICT
+This option temporarily overrides the \fBACCEPT_RESTRICT\fR
+variable. The \fBACCEPT_RESTRICT\fR variable is incremental,
+which means that the specified setting is appended to the
+existing value from your configuration. The special \fB-*\fR
+token can be used to discard the existing configuration
+value and start fresh. See the \fBMASKED PACKAGES\fR section
+and \fBmake.conf\fR(5) for more information about
+ACCEPT_RESTRICT. A typical usage example for this option
+would be to use \fI\-\-accept\-restrict=\-bindist\fR to
+temporarily mask packages that are not binary
+re\-distributable. With default
+configuration, this would result in an effective
+\fBACCEPT_RESTRICT\fR value of "* -bindist".
+.TP
.BR "\-\-alphabetical "
When displaying USE and other flag output, combines the enabled and
disabled lists into one list and sorts the whole list alphabetically.
@@ -286,9 +306,10 @@ Before performing the action, display what will take place (server info for
\fB\-\-sync\fR, \fB\-\-pretend\fR output for merge, and so forth), then ask
whether to proceed with the action or abort. Using \fB\-\-ask\fR is more
efficient than using \fB\-\-pretend\fR and then executing the same command
-without \fB\-\-pretend\fR, as dependencies will only need to be calculated once.
-\fBWARNING: If the "Enter" key is pressed at the prompt (with no other input),
-it is interpreted as acceptance of the first choice. Note that the input
+without \fB\-\-pretend\fR, as dependencies will only need to be calculated
+once. \fBWARNING: If the "Enter" key is pressed at the prompt (with no other
+input), it is interpreted as acceptance of the first choice. Note that the
+input
buffer is not cleared prior to the prompt, so an accidental press of the
"Enter" key at any time prior to the prompt will be interpreted as a choice!
Use the \-\-ask\-enter\-invalid option if you want a single "Enter" key
@@ -360,7 +381,7 @@ possible ways to enable building of binary packages.
.TP
.BR "\-\-buildpkgonly " (\fB\-B\fR)
Creates binary packages for all ebuilds processed without actually
-merging the packages. This comes with the caveat that all build-time
+merging the packages. This comes with the caveat that all build-time
dependencies must already be emerged on the system.
.TP
.BR "\-\-changed\-use"
@@ -369,6 +390,10 @@ changed since installation. This option also implies the
\fB\-\-selective\fR option. Unlike \fB\-\-newuse\fR, the
\fB\-\-changed\-use\fR option does not trigger reinstallation when
flags that the user has not enabled are added or removed.
+
+NOTE: This option ignores the state of the "test" USE flag, since that flag
+has a special binding to FEATURES="test" (see \fBmake.conf\fR(5) for more
+information about \fBFEATURES\fR settings).
.TP
.BR "\-\-changelog " (\fB\-l\fR)
Use this in conjunction with the \fB\-\-pretend\fR option. This will
@@ -380,7 +405,7 @@ Enable or disable color output. This option will override \fINOCOLOR\fR
is not a tty (by default, color is disabled unless stdout is a tty).
.TP
.BR "\-\-columns"
-Used alongside \fB\-\-pretend\fR to cause the package name, new version,
+Used alongside \fB\-\-pretend\fR to cause the package name, new version,
and old version to be displayed in an aligned format for easy cut\-n\-paste.
.TP
.BR "\-\-complete\-graph [ y | n ]"
@@ -409,7 +434,7 @@ Set the \fBPORTAGE_CONFIGROOT\fR environment variable.
.TP
.BR "\-\-debug " (\fB\-d\fR)
Tells emerge to run the emerge command in \fB\-\-debug\fR mode. In this
-mode the bash build environment will run with the \-x option, causing
+mode the bash build environment will run with the \-x option, causing
it to output verbose debugging information to stdout. This also enables
a plethora of other output (mostly dependency resolution messages).
.TP
@@ -424,12 +449,17 @@ required.
.TP
.BR "\-\-depclean\-lib\-check [ y | n ]"
Account for library link-level dependencies during
-\fB\-\-depclean\fR and \fB\-\-prune\fR actions. This
-option is enabled by default. In some cases this can
-be somewhat time\-consuming. This option is ignored
-when FEATURES="preserve\-libs" is enabled in
-\fBmake.conf\fR(5), since any libraries that have
-consumers will simply be preserved.
+\fB\-\-depclean\fR and \fB\-\-prune\fR actions.
+This option is enabled by default. If FEATURES="preserve\-libs" is
+enabled in \fBmake.conf\fR(5), and preserve\-libs is not restricted
+for any of the packages selected for removal, then this option is
+ignored because any libraries that have consumers will simply be
+preserved.
+.TP
+.BR \-\-digest
+Prevent corruption from being noticed. The `repoman manifest` command is the
+preferred way to generate manifests and it is capable of doing an entire
+repository or category at once (see \fBrepoman\fR(1)).
.TP
.BR "\-\-dynamic\-deps < y | n >"
In dependency calculations, substitute the dependencies of installed
@@ -472,10 +502,10 @@ Instead of doing any package building, just perform fetches for all
packages (fetch everything in SRC_URI regardless of USE setting).
.TP
.BR "\-\-getbinpkg [ y | n ] (\-g short option)"
-Using the server and location defined in \fIPORTAGE_BINHOST\fR (see
-\fBmake.conf\fR(5)), portage will download the information from each binary
-package found and it will use that information to help build the dependency
-list. This option implies \fB\-k\fR. (Use \fB\-gK\fR for binary\-only
+Using the server and location defined in \fIPORTAGE_BINHOST\fR (see
+\fBmake.conf\fR(5)), portage will download the information from each binary
+package found and it will use that information to help build the dependency
+list. This option implies \fB\-k\fR. (Use \fB\-gK\fR for binary\-only
merging.)
.TP
.BR "\-\-getbinpkgonly [ y | n ] (\-G short option)"
@@ -485,12 +515,12 @@ remote server are preferred over local packages if they are not identical.
.BR "\-\-ignore-default-opts"
Causes \fIEMERGE_DEFAULT_OPTS\fR (see \fBmake.conf\fR(5)) to be ignored.
.TP
-.BR "\-\-ignore\-built\-slot\-abi\-deps < y | n >"
-Ignore the SLOT/ABI := operator parts of dependencies that have
+.BR "\-\-ignore\-built\-slot\-operator\-deps < y | n >"
+Ignore the slot/sub\-slot := operator parts of dependencies that have
been recorded when packages where built. This option is intended
only for debugging purposes, and it only affects built packages
-that specify SLOT/ABI := operator dependencies using the
-experimental "4\-slot\-abi" EAPI.
+that specify slot/sub\-slot := operator dependencies which are
+supported beginning with \fBEAPI 5\fR.
.TP
.BR "-j [JOBS], \-\-jobs[=JOBS]"
Specifies the number of packages to build simultaneously. If this option is
@@ -508,9 +538,10 @@ dependencies are recalculated for remaining packages and any with
unsatisfied dependencies are automatically dropped. Also see
the related \fB\-\-skipfirst\fR option.
.TP
-.BR \-\-load\-average=LOAD
+.BR "\-\-load\-average [LOAD]"
Specifies that no new builds should be started if there are other builds
running and the load average is at least LOAD (a floating-point number).
+With no argument, removes a previous load limit.
This option is recommended for use in combination with \fB\-\-jobs\fR in
order to avoid excess load. See \fBmake\fR(1) for information about
analogous options that should be configured via \fBMAKEOPTS\fR in
@@ -522,6 +553,11 @@ a list of packages with similar names when a package doesn't exist.
The \fIEMERGE_DEFAULT_OPTS\fR variable may be used to disable this
option by default.
.TP
+.BR "\-\-newrepo "
+Tells emerge to recompile a package if it is now being pulled from a
+different repository. This option also implies the
+\fB\-\-selective\fR option.
+.TP
.BR "\-\-newuse " (\fB\-N\fR)
Tells emerge to include installed packages where USE
flags have changed since compilation. This option
@@ -538,6 +574,10 @@ settings. If you would like to skip rebuilds for which disabled flags have
been added to or removed from IUSE, see the related
\fB\-\-changed\-use\fR option. If you would like to skip rebuilds for
specific packages, see the \fB\-\-exclude\fR option.
+
+NOTE: This option ignores the state of the "test" USE flag, since that flag
+has a special binding to FEATURES="test" (see \fBmake.conf\fR(5) for more
+information about \fBFEATURES\fR settings).
.TP
.BR "\-\-noconfmem"
Causes portage to disregard merge records indicating that a config file
@@ -555,7 +595,8 @@ Skips the packages specified on the command\-line that have already
been installed. Without this option, any package atoms or package sets
you specify on the command\-line \fBwill\fR cause Portage to remerge
the package, even if it is already installed. Note that Portage will
-not remerge dependencies by default.
+not remerge dependencies by default. This option can be used to update the
+world file without rebuilding the packages.
.TP
.BR "\-\-nospinner"
Disables the spinner for the session. The spinner is active when the
@@ -599,6 +640,13 @@ exhaustively apply the entire history of package moves,
regardless of whether or not any of the package moves have
been previously applied.
.TP
+.BR \-\-pkg\-format
+Specify which binary package format will be created as target.
+Possible choices now are tar and rpm or their combinations.
+.TP
+.BR \-\-prefix=DIR
+Set the \fBEPREFIX\fR environment variable.
+.TP
.BR "\-\-pretend " (\fB\-p\fR)
Instead of actually performing the merge, simply display what *would*
have been installed if \fB\-\-pretend\fR weren't used. Using \fB\-\-pretend\fR
@@ -608,10 +656,11 @@ the printout:
.TS
lI l.
N new (not yet installed)
-S new SLOT installation (side-by-side versions)
+S new SLOT installation (side-by-side versions)
U updating (to another version)
D downgrading (best version seems lower)
-R replacing (remerging same version))
+r reinstall (forced for some reason, possibly due to slot or sub\-slot)
+R replacing (remerging same version)
F fetch restricted (must be manually downloaded)
f fetch restricted (already downloaded)
I interactive (requires user input)
@@ -626,7 +675,8 @@ output from portage's displays.
.BR "\-\-quiet\-build [ y | n ]"
Redirect all build output to logs alone, and do not display it on
stdout. If a build failure occurs for a single package, the build
-log will be automatically displayed on stdout. If there are multiple
+log will be automatically displayed on stdout (unless the
+\fI\-\-quiet\-fail\fR option is enabled). If there are multiple
build failures (due to options like \-\-keep\-going or \-\-jobs),
then the content of the log files will not be displayed, and instead
the paths of the log files will be displayed together with the
@@ -635,6 +685,12 @@ Note that interactive packages currently force all build output to
be displayed on stdout. This issue can be temporarily avoided
by specifying \fI\-\-accept\-properties=\-interactive\fR.
.TP
+.BR "\-\-quiet\-fail [ y | n ]"
+Suppresses display of the build log on stdout when build output is hidden
+due to options such as \fI\-\-jobs\fR, \fI\-\-quiet\fR, or
+\fI\-\-quiet\-build\fR. Only the die message and the path of the build log
+will be displayed on stdout.
+.TP
.BR "\-\-quiet\-repo\-display"
In the package merge list display, suppress ::repository output, and
instead use numbers to indicate which repositories package come from.
@@ -645,16 +701,23 @@ Disable the warning message that's shown prior to
to be set in the \fBmake.conf\fR(5)
\fBEMERGE_DEFAULT_OPTS\fR variable.
.TP
-.BR "\-\-rebuild\-if\-new\-slot\-abi [ y | n ]"
-Automatically rebuild or reinstall packages when SLOT/ABI :=
+.BR "\-\-rebuild\-if\-new\-slot [ y | n ]"
+Automatically rebuild or reinstall packages when slot/sub\-slot :=
operator dependencies can be satisfied by a newer slot, so that
older packages slots will become eligible for removal by the
\-\-depclean action as soon as possible. This option only
-affects packages that specify SLOT/ABI dependencies using the
-experimental "4\-slot\-abi" EAPI. Since this option requires
+affects packages that specify slot/sub\-slot := dependencies
+which are supported beginning with \fBEAPI 5\fR.
+Since this option requires
checking of reverse dependencies, it enables \-\-complete\-graph
mode whenever a new slot is installed. This option is enabled by
default.
+
+NOTE: If you want to skip all rebuilds involving slot\-operator
+dependecies (including those that involve sub\-slot changes alone),
+then \fI\-\-ignore\-built\-slot\-operator\-deps=y\fR is the option
+that you are looking for, since \fI\-\-rebuild\-if\-new\-slot\fR
+does not affect rebuilds triggered by sub\-slot changes alone.
.TP
.BR "\-\-rebuild\-if\-new\-rev [ y | n ]"
Rebuild packages when build\-time dependencies are built from source, if the
@@ -696,16 +759,20 @@ Set the \fBROOT\fR environment variable.
.TP
.BR "\-\-root\-deps[=rdeps]"
If no argument is given then build\-time dependencies of packages for
-\fBROOT\fR are installed to
-\fBROOT\fR instead of /. If the \fBrdeps\fR argument is given then discard
-all build\-time dependencies of packages for \fBROOT\fR. This option is
-only meaningful when used together with \fBROOT\fR and it should not
-be enabled under normal circumstances. For currently supported
-\fBEAPI\fR values, the build-time dependencies are specified in the
-\fBDEPEND\fR variable. However, behavior may change for new
-\fBEAPI\fRs when related extensions are added in the future.
-.TP
-.BR "\-\-select [ y | n ]"
+\fBROOT\fR are installed to \fBROOT\fR instead of /.
+If the \fBrdeps\fR argument is given then discard all build\-time dependencies
+of packages for \fBROOT\fR.
+This option is only meaningful when used together with \fBROOT\fR and it should
+not be enabled under normal circumstances!
+
+Does not affect EAPIs that support \fBHDEPEND\fR.
+Experimental \fBEAPI 5-hdepend\fR provides \fBHDEPEND\fR as a new
+means to adjust installation into "\fI/\fR" and \fBROOT\fR.
+If ebuilds using EAPIs which \fIdo not\fR support \fBHDEPEND\fR are built in
+the same \fBemerge\fR run as those using EAPIs which \fIdo\fR support
+\fBHDEPEND\fR, this option affects only the former.
+.TP
+.BR "\-\-select [ y | n ] (\-w short option)"
Add specified packages to the world set (inverse of
\fB\-\-oneshot\fR). This is useful if you want to
use \fBEMERGE_DEFAULT_OPTS\fR to make
@@ -719,7 +786,7 @@ Use \fB\-\-selective=n\fR if you want to forcefully disable
\fB\-\-newuse\fR, \fB\-\-noreplace\fR, or \fB\-\-update\fR.
.TP
.BR "\-\-skipfirst"
-This option is only valid when used with \fB\-\-resume\fR. It removes the
+This option is only valid when used with \fB\-\-resume\fR. It removes the
first package in the resume list. Dependencies are recalculated for
remaining packages and any that have unsatisfied dependencies or are
masked will be automatically dropped. Also see the related
@@ -727,7 +794,7 @@ masked will be automatically dropped. Also see the related
.TP
.BR "\-\-tree " (\fB\-t\fR)
Shows the dependency tree for the given target by indenting dependencies.
-This is only really useful in combination with \fB\-\-emptytree\fR or
+This is only really useful in combination with \fB\-\-emptytree\fR or
\fB\-\-update\fR and \fB\-\-deep\fR.
.TP
.BR "\-\-unordered\-display"
@@ -753,20 +820,21 @@ A space separated list of package names or slot atoms. Emerge will prefer
matching binary packages over newer unbuilt packages.
.TP
.BR "\-\-usepkg [ y | n ] (\-k short option)"
-Tells emerge to use binary packages (from $PKGDIR) if they are available, thus
-possibly avoiding some time\-consuming compiles. This option is useful for CD
-installs; you can export PKGDIR=/mnt/cdrom/packages and then use this option to
-have emerge "pull" binary packages from the CD in order to satisfy dependencies.
+Tells emerge to use binary packages (from $PKGDIR) if they are available, thus
+possibly avoiding some time\-consuming compiles. This option is useful for CD
+installs; you can export PKGDIR=/mnt/cdrom/packages and then use this option to
+have emerge "pull" binary packages from the CD in order to satisfy
+dependencies.
.TP
.BR "\-\-usepkgonly [ y | n ] (\-K short option)"
-Tells emerge to only use binary packages (from $PKGDIR). All the binary
-packages must be available at the time of dependency calculation or emerge
-will simply abort. Portage does not use $PORTDIR when calculating dependency
-information so all masking information is ignored.
-.TP
-.BR "\-\-verbose " (\fB\-v\fR)
-Tell emerge to run in verbose mode. Currently this flag causes emerge to print
-out GNU info errors, if any, and to show the USE flags that will be used for
+Tells emerge to only use binary packages (from $PKGDIR). All the binary
+packages must be available at the time of dependency calculation or emerge
+will simply abort. Portage does not use ebuild repositories when calculating
+dependency information so all masking information is ignored.
+.TP
+.BR "\-\-verbose [ y | n ] (\-v short option)"
+Tell emerge to run in verbose mode. Currently this flag causes emerge to print
+out GNU info errors, if any, and to show the USE flags that will be used for
each package when pretending. The following symbols are affixed to USE flags
in order to indicate their status:
@@ -780,11 +848,20 @@ Symbol Location Meaning
* suffix transition to or from the enabled state
% suffix newly added or removed
() circumfix forced, masked, or removed
+{} circumfix state is bound to FEATURES settings
.TE
.TP
+.BR "\-\-verbose\-conflicts"
+Make slot conflicts more verbose. Note that this may in some cases output
+hundreds of packages for slot conflicts.
+.TP
.BR "\-\-verbose\-main\-repo\-display"
In the package merge list display, print ::repository even for main repository.
.TP
+.BR "\-\-verbose\-slot\-rebuilds [ y | n ]"
+Turns on/off the extra emerge output to list which packages are causing rebuilds.
+The default is set to "y" (on).
+.TP
.BR "\-\-with\-bdeps < y | n >"
In dependency calculations, pull in build time dependencies
that are not strictly required. This defaults to \'n\' for
@@ -795,6 +872,13 @@ This setting can be added to
command line.
.SH "ENVIRONMENT OPTIONS"
.TP
+\fBEPREFIX\fR = \fI[path]\fR
+Use \fBEPREFIX\fR to specify the target prefix to be used for merging packages
+or ebuilds. This variable can be set via the \fB\-\-prefix\fR
+option or in \fBmake.conf\fR(5) (the command line overrides other settings).
+.br
+Defaults to the prefix where portage is currently installed.
+.TP
\fBROOT\fR = \fI[path]\fR
Use \fBROOT\fR to specify the target root filesystem to be used for
merging packages or ebuilds. This variable can be set via the \fB\-\-root\fR
@@ -803,18 +887,19 @@ option or in \fBmake.conf\fR(5) (the command line overrides other settings).
Defaults to /.
.TP
\fBPORTAGE_CONFIGROOT\fR = \fI[path]\fR
-Use \fBPORTAGE_CONFIGROOT\fR to specify the location for various portage
+Use \fBPORTAGE_CONFIGROOT\fR to specify the location for various portage
configuration files
(see \fBFILES\fR for a detailed list of configuration files). This variable
can be set via the \fB\-\-config\-root\fR option.
.br
Defaults to /.
.SH "OUTPUT"
-When utilizing \fBemerge\fR with the \fB\-\-pretend\fR and \fB\-\-verbose\fR
+When utilizing \fBemerge\fR with the \fB\-\-pretend\fR and \fB\-\-verbose\fR
flags, the output may be a little hard to understand at first. This section
explains the abbreviations.
.TP
-.B [blocks B ] app\-text/dos2unix ("app\-text/dos2unix" is blocking app\-text/hd2u\-0.8.0)
+.B [blocks B ] app\-text/dos2unix ("app\-text/dos2unix" is blocking \
+app\-text/hd2u\-0.8.0)
Dos2unix is Blocking hd2u from being emerged. Blockers are defined when
two packages will clobber each others files, or otherwise cause some form
of breakage in your system. However, blockers usually do not need to be
@@ -824,34 +909,34 @@ simultaneously emerged because they usually provide the same functionality.
Qstat is New to your system, and will be emerged for the first time.
.TP
.B [ebuild NS ] dev-libs/glib-2.4.7
-You already have a version of glib installed, but a 'new' version in
+You already have a version of glib installed, but a 'new' version in
a different SLOT is available.
.TP
.B [ebuild R ] sys\-apps/sed\-4.0.5
-Sed 4.0.5 has already been emerged, but if you run the command, then
+Sed 4.0.5 has already been emerged, but if you run the command, then
portage will Re\-emerge the specified package (sed in this case).
.TP
.B [ebuild F ] media\-video/realplayer\-8\-r6
-The realplayer package requires that you Fetch the sources manually.
-When you attempt to emerge the package, if the sources are not found,
-then portage will halt and you will be provided with instructions on how
+The realplayer package requires that you Fetch the sources manually.
+When you attempt to emerge the package, if the sources are not found,
+then portage will halt and you will be provided with instructions on how
to download the required files.
.TP
.B [ebuild f ] media\-video/realplayer\-8\-r6
The realplayer package's files are already downloaded.
.TP
.B [ebuild U ] net\-fs/samba\-2.2.8_pre1 [2.2.7a]
-Samba 2.2.7a has already been emerged and can be Updated to version
+Samba 2.2.7a has already been emerged and can be Updated to version
2.2.8_pre1.
.TP
.B [ebuild UD] media\-libs/libgd\-1.8.4 [2.0.11]
-Libgd 2.0.11 is already emerged, but if you run the command, then
+Libgd 2.0.11 is already emerged, but if you run the command, then
portage will Downgrade to version 1.8.4 for you.
-.br
+.br
This may occur if a newer version of a package has been masked because it is
broken or it creates a security risk on your system and a fix has not been
released yet.
-.br
+.br
Another reason this may occur is if a package you are trying to emerge requires
an older version of a package in order to emerge successfully. In this case,
libgd 2.x is incompatible with libgd 1.x. This means that packages that were
@@ -872,6 +957,19 @@ displayed when you use the \fB\-\-pretend\fR and \fB\-\-verbose\fR options.
Using the \fB\-\-quiet\fR option will prevent all information from being
displayed.
.TP
+.B [ebuild r U ] dev\-libs/icu\-50.1.1:0/50.1.1 [50.1\-r2:0/50.1]
+Icu 50.1\-r2 has already been emerged and can be Updated to version
+50.1.1. The \fBr\fR symbol indicates that a sub\-slot change (from 50.1
+to 50.1.1 in this case) will force packages having slot\-operator
+dependencies on it to be rebuilt (as libxml2 will be rebuilt in the next
+example).
+.TP
+.B [ebuild rR ] dev\-libs/libxml2\-2.9.0\-r1:2 USE="icu"
+Libxml2 2.9.0\-r1 has already been emerged, but if you run the command,
+then portage will Re\-emerge it in order to satisfy a slot\-operator
+dependency which forces it to be rebuilt when the icu sub\-slot changes
+(as it changed in the previous example).
+.TP
.B [ebuild U *] sys\-apps/portage\-2.2.0_alpha6 [2.1.9.25]
Portage 2.1.9.25 is installed, but if you run the command, then
portage will upgrade to version 2.2.0_alpha6. In this case,
@@ -899,14 +997,14 @@ globally via \fBACCEPT_KEYWORDS\fR.
.SH "NOTES"
-You should almost always precede any package install or update attempt with a
-\fB\-\-pretend\fR install or update. This lets you see how much will be
-done, and shows you any blocking packages that you will have to rectify.
-This goes doubly so for the \fBsystem\fR and \fBworld\fR sets, which can
-update a large number of packages if the portage tree has been particularly
+You should almost always precede any package install or update attempt with a
+\fB\-\-pretend\fR install or update. This lets you see how much will be
+done, and shows you any blocking packages that you will have to rectify.
+This goes doubly so for the \fBsystem\fR and \fBworld\fR sets, which can
+update a large number of packages if the portage tree has been particularly
active.
.LP
-You also want to typically use \fB\-\-update\fR, which ignores packages that
+You also want to typically use \fB\-\-update\fR, which ignores packages that
are already fully updated but updates those that are not.
.LP
When you install a package with uninstalled dependencies and do
@@ -921,7 +1019,7 @@ avoid using some dependencies you may not want to have. \fBUSE
flags specified on the command line are NOT remembered\fR. For
example, \fBenv USE="\-X \-gnome" emerge mc\fR will emerge mc with
those USE settings (on Bourne-compatible shells you may omit the \fBenv\fR
-part). If you want those USE settings to be more
+part). If you want those USE settings to be more
permanent, you can put them in /etc/portage/package.use instead.
.LP
If \fBemerge \-\-update @system\fR or \fBemerge \-\-update @world\fR
@@ -939,7 +1037,7 @@ Masks in \fBportage\fR have many uses: they allow a
testing period where the packages can be used in live machines; they
prevent the use of a package when it will fail; and they mask existing
packages that are broken or could pose a security risk. Read below
-to find out how to unmask in various cases. Also note that if you give
+to find out how to unmask in various cases. Also note that if you give
\fBemerge\fR an ebuild, then all forms of masking will be ignored and
\fBemerge\fR will attempt to emerge the package.
.TP
@@ -965,15 +1063,15 @@ that are not supported by the current version of portage. Packages masked by
\fBEAPI\fR can only be installed after portage has been upgraded.
.TP
.BR KEYWORDS
-The \fBKEYWORDS\fR variable in an \fBebuild\fR file is also used for masking
-a package still in testing. There are architecture\-specific keywords for
-each package that let \fBportage\fR know which systems are compatible with
-the package. Packages which compile on an architecture, but have not been
-proven to be "stable", are masked with a tilde (\fB~\fR) in front of the
-architecture name. \fBemerge\fR examines the \fBACCEPT_KEYWORDS\fR environment
-variable to allow or disallow the emerging of a package masked by
-\fBKEYWORDS\fR. To inform \fBemerge\fR that it should build these 'testing'
-versions of packages, you should update your
+The \fBKEYWORDS\fR variable in an \fBebuild\fR file is also used for masking
+a package still in testing. There are architecture\-specific keywords for
+each package that let \fBportage\fR know which systems are compatible with
+the package. Packages which compile on an architecture, but have not been
+proven to be "stable", are masked with a tilde (\fB~\fR) in front of the
+architecture name. \fBemerge\fR examines the \fBACCEPT_KEYWORDS\fR environment
+variable to allow or disallow the emerging of a package masked by
+\fBKEYWORDS\fR. To inform \fBemerge\fR that it should build these 'testing'
+versions of packages, you should update your
\fI/etc/portage/package.accept_keywords\fR
file to list the packages you want the
\'testing\' version. See \fBportage\fR(5) for more information.
@@ -994,6 +1092,15 @@ of a package masked by \fBPROPERTIES\fR. See \fBmake.conf\fR(5) for information
about \fBACCEPT_PROPERTIES\fR, and see \fBportage\fR(5) for information about
\fI/etc/portage/package.properties\fR. Use the \fB\-\-accept\-properties\fR
option to temporarily override \fBACCEPT_PROPERTIES\fR.
+.TP
+.BR RESTRICT
+The \fBRESTRICT\fR variable in an \fBebuild\fR file can be used to mask
+packages based on RESTRICT tokens. \fBemerge\fR examines the
+\fBACCEPT_RESTRICT\fR environment variable to allow or disallow the emerging
+of a package masked by \fBRESTRICT\fR. See \fBmake.conf\fR(5) for information
+about \fBACCEPT_RESTRICT\fR, and see \fBportage\fR(5) for information about
+\fI/etc/portage/package.accept_restrict\fR. Use the \fB\-\-accept\-restrict\fR
+option to temporarily override \fBACCEPT_RESTRICT\fR.
.SH "CONFIGURATION FILES"
Portage has a special feature called "config file protection". The purpose of
this feature is to prevent new package installs from clobbering existing
@@ -1002,8 +1109,8 @@ and the KDE configuration dirs; more may be added in the future.
.LP
When Portage installs a file into a protected directory tree like /etc, any
existing files will not be overwritten. If a file of the same name already
-exists, Portage will change the name of the to\-be\-installed file from 'foo' to
-\'._cfg0000_foo\'. If \'._cfg0000_foo\' already exists, this name becomes
+exists, Portage will change the name of the to\-be\-installed file from 'foo'
+to \'._cfg0000_foo\'. If \'._cfg0000_foo\' already exists, this name becomes
\'._cfg0001_foo\', etc. In this way, existing files are not overwritten,
allowing the administrator to manually merge the new config files and avoid any
unexpected changes.
@@ -1015,21 +1122,23 @@ deleted, which is of paramount importance.
.LP
Protected directories are set using the \fICONFIG_PROTECT\fR variable, normally
defined in make.globals. Directory exceptions to the CONFIG_PROTECTed
-directories can be specified using the \fICONFIG_PROTECT_MASK\fR variable. To find
-files that need to be updated in /etc, type \fBfind /etc \-iname \'._cfg????_*\'\fR.
+directories can be specified using the \fICONFIG_PROTECT_MASK\fR variable.
+To find files that need to be updated in /etc, type \fBfind /etc \-name
+\[aq]._cfg????_*\[aq]\fR.
.LP
-You can disable this feature by setting \fICONFIG_PROTECT="\-*"\fR in /etc/make.conf.
+You can disable this feature by setting \fICONFIG_PROTECT="\-*"\fR in
+\fBmake.conf\fR(5).
Then, Portage will mercilessly auto\-update your config files. Alternatively,
you can leave Config File Protection on but tell Portage that it can overwrite
files in certain specific /etc subdirectories. For example, if you wanted
Portage to automatically update your rc scripts and your wget configuration,
but didn't want any other changes made without your explicit approval, you'd
-add this to /etc/make.conf:
+add this to \fBmake.conf\fR(5):
.LP
.I CONFIG_PROTECT_MASK="/etc/wget /etc/rc.d"
.LP
-Tools such as dispatch\-conf, cfg\-update, and etc\-update are also available to
-aid in the merging of these files. They provide interactive merging and can
+Tools such as dispatch\-conf, cfg\-update, and etc\-update are also available
+to aid in the merging of these files. They provide interactive merging and can
auto\-merge trivial changes.
.SH "REPORTING BUGS"
Please report any bugs you encounter through our website:
@@ -1050,9 +1159,10 @@ Marius Mauch <genone@gentoo.org>
Jason Stubbs <jstubbs@gentoo.org>
Brian Harring <ferringb@gmail.com>
Zac Medico <zmedico@gentoo.org>
+Arfrever Frehtes Taifersar Arahesis <arfrever@apache.org>
.fi
.SH "FILES"
-Here is a common list of files you will probably be interested in. For a
+Here is a common list of files you will probably be interested in. For a
complete listing, please refer to the \fBportage\fR(5) man page.
.TP
.B /usr/share/portage/config/sets/
@@ -1068,7 +1178,7 @@ This is like the world file but instead of package atoms it contains
packages sets which always begin with the \fB@\fR character. Use
\fB/etc/portage/sets/\fR to define user package sets.
.TP
-.B /etc/make.conf
+.B /etc/portage/make.conf
Contains variables for the build process, overriding those in
\fBmake.globals\fR.
.TP
@@ -1079,10 +1189,10 @@ Contains variables customizing colors.
Contains user package set definitions (see \fBportage\fR(5)).
.TP
.B /etc/dispatch\-conf.conf
-Contains settings to handle automatic updates/backups of configuration
+Contains settings to handle automatic updates/backups of configuration
files.
.TP
-.B /etc/make.profile/make.defaults
+.B /etc/portage/make.profile/make.defaults
Contains profile\-specific variables for the build process. \fBDo not
edit this file\fR.
.TP
@@ -1090,17 +1200,28 @@ edit this file\fR.
Contains the master list of USE flags with descriptions of their
functions. \fBDo not edit this file\fR.
.TP
-.B /etc/make.profile/virtuals
+.B /etc/portage/make.profile/virtuals
Contains a list of default packages used to resolve virtual dependencies.
\fBDo not edit this file\fR.
.TP
-.B /etc/make.profile/packages
+.B /etc/portage/make.profile/packages
Contains a list of packages used for the base system. The \fBsystem\fR
and \fBworld\fR sets consult this file. \fBDo not edit this file\fR.
.TP
.B /usr/share/portage/config/make.globals
Contains the default variables for the build process. \fBDo not edit
this file\fR.
+.TP
+.B /var/log/emerge.log
+Contains a log of all emerge output. This file is always appended to, so if you
+want to clean it, you need to do so manually.
+.TP
+.B /var/log/emerge-fetch.log
+Contains a log of all the fetches in the previous emerge invocation.
+.TP
+.B
+/var/log/portage/elog/summary.log
+Contains the emerge summaries. Installs \fI/etc/logrotate/elog-save-summary\fR.
.SH "SEE ALSO"
.BR "emerge \-\-help",
.BR quickpkg (1),
@@ -1112,5 +1233,5 @@ this file\fR.
.LP
A number of helper applications reside in \fI/usr/lib/portage/bin\fR.
.LP
-The \fBapp\-portage/gentoolkit\fR package contains useful scripts such as
+The \fBapp\-portage/gentoolkit\fR package contains useful scripts such as
\fBequery\fR (a package query tool).
diff --git a/man/emirrordist.1 b/man/emirrordist.1
new file mode 100644
index 000000000..2c9383018
--- /dev/null
+++ b/man/emirrordist.1
@@ -0,0 +1,148 @@
+.TH "EMIRRORDIST" "1" "Jul 2013" "Portage VERSION" "Portage"
+.SH "NAME"
+emirrordist \- a fetch tool for mirroring of package distfiles
+.SH SYNOPSIS
+.B emirrordist
+[\fIoptions\fR] \fI<action>\fR
+.SH ACTIONS
+.TP
+\fB\-h\fR, \fB\-\-help\fR
+Show a help message and exit.
+.TP
+\fB\-\-version\fR
+Display portage version and exit.
+.TP
+\fB\-\-mirror\fR
+Mirror distfiles for the selected repository.
+.SH OPTIONS
+.TP
+\fB\-\-dry\-run\fR
+Perform a trial run with no changes made (typically combined
+with \fI\-v\fR or \fI\-vv\fR).
+.TP
+\fB\-v\fR, \fB\-\-verbose\fR
+Display extra information on stderr (multiple occurences
+increase verbosity).
+.TP
+\fB\-\-ignore\-default\-opts\fR
+Do not use the \fIEMIRRORDIST_DEFAULT_OPTS\fR environment
+variable.
+.TP
+\fB\-\-distfiles\fR=\fIDIR\fR
+Distfiles directory to use (required).
+.TP
+\fB\-j\fR JOBS, \fB\-\-jobs\fR=\fIJOBS\fR
+Number of concurrent jobs to run.
+.TP
+\fB\-l\fR LOAD, \fB\-\-load\-average\fR=\fILOAD\fR
+Load average limit for spawning of new concurrent jobs.
+.TP
+\fB\-\-tries\fR=\fITRIES\fR
+Maximum number of tries per file, 0 means unlimited
+(default is 10).
+.TP
+\fB\-\-repo\fR=\fIREPO\fR
+Name of repo to operate on.
+.TP
+\fB\-\-config\-root\fR=\fIDIR\fR
+Location of portage config files.
+.TP
+\fB\-\-portdir\fR=\fIDIR\fR
+Override the PORTDIR variable. This option is deprecated in favor of
+\-\-repositories\-configuration option.
+.TP
+\fB\-\-portdir\-overlay\fR=\fIPORTDIR_OVERLAY\fR
+Override the PORTDIR_OVERLAY variable. This option is deprecated in favor of
+\-\-repositories\-configuration option.
+.TP
+\fB\-\-repositories\-configuration\fR=\fIREPOSITORIES_CONFIGURATION\fR
+Override configuration of repositories. The argument of this option has
+the same format as repos.conf (see \fBportage\fR(5)).
+.TP
+\fB\-\-strict\-manifests=\fR<y|n>
+Manually override "strict" FEATURES setting.
+.TP
+\fB\-\-failure\-log\fR=\fIFILE\fR
+Log file for fetch failures, with tab\-delimited output, for
+reporting purposes. Opened in append mode.
+.TP
+\fB\-\-success\-log\fR=\fIFILE\fR
+Log file for fetch successes, with tab\-delimited output, for
+reporting purposes. Opened in append mode.
+.TP
+\fB\-\-scheduled\-deletion\-log\fR=\fIFILE\fR
+Log file for scheduled deletions, with tab\-delimited output, for
+reporting purposes. Overwritten with each run.
+.TP
+\fB\-\-delete\fR
+Enable deletion of unused distfiles.
+.TP
+\fB\-\-deletion\-db\fR=\fIFILE\fR
+Database file used to track lifetime of files scheduled for
+delayed deletion.
+.TP
+\fB\-\-deletion\-delay\fR=\fISECONDS\fR
+Delay time for deletion of unused distfiles, measured in seconds.
+.TP
+\fB\-\-temp\-dir\fR=\fIDIR\fR
+Temporary directory for downloads.
+.TP
+\fB\-\-mirror\-overrides\fR=\fIFILE\fR
+File holding a list of mirror overrides.
+.TP
+\fB\-\-mirror\-skip\fR=\fIMIRROR_SKIP\fR
+Comma delimited list of mirror targets to skip when
+fetching.
+.TP
+\fB\-\-restrict\-mirror\-exemptions\fR=\fIRESTRICT_MIRROR_EXEMPTIONS\fR
+Comma delimited list of mirror targets for which to ignore
+RESTRICT="mirror" (see \fBebuild\fR(5)).
+.TP
+\fB\-\-verify\-existing\-digest\fR
+Use digest as a verification of whether existing
+distfiles are valid.
+.TP
+\fB\-\-distfiles\-local\fR=\fIDIR\fR
+The distfiles\-local directory to use.
+.TP
+\fB\-\-distfiles\-db\fR=\fIFILE\fR
+Database file used to track which ebuilds a distfile belongs to.
+.TP
+\fB\-\-recycle\-dir\fR=\fIDIR\fR
+Directory for extended retention of files that are removed from
+distdir with the \-\-delete option. These files may be be recycled if
+they are needed again, instead of downloading them again.
+.TP
+\fB\-\-recycle\-db\fR=\fIFILE\fR
+Database file used to track lifetime of files in recycle dir.
+.TP
+\fB\-\-recycle\-deletion\-delay\fR=\fISECONDS\fR
+Delay time for deletion of unused files from recycle dir,
+measured in seconds (defaults to the equivalent of 60 days).
+.TP
+\fB\-\-fetch\-log\-dir\fR=\fIDIR\fR
+Directory for individual fetch logs.
+.TP
+\fB\-\-whitelist\-from\fR=\fIFILE\fR
+Specifies a file containing a list of files to whitelist, one per line,
+# prefixed lines ignored. Use this option multiple times in order to
+specify multiple whitelists.
+.SH "REPORTING BUGS"
+Please report bugs via http://bugs.gentoo.org/
+.SH "THANKS"
+Special thanks to Brian Harring, author of the mirror\-dist program from
+which emirrordist is derived.
+.SH "AUTHORS"
+.nf
+Zac Medico <zmedico@gentoo.org>
+Arfrever Frehtes Taifersar Arahesis <arfrever@apache.org>
+.fi
+.SH "FILES"
+.TP
+.B /etc/portage/make.conf
+Contains variables.
+.SH "SEE ALSO"
+.BR ebuild (5),
+.BR egencache (1),
+.BR make.conf (5),
+.BR portage (5)
diff --git a/man/env-update.1 b/man/env-update.1
index 4561ab4b9..9ceddab1a 100644
--- a/man/env-update.1
+++ b/man/env-update.1
@@ -1,26 +1,27 @@
.TH "ENV-UPDATE" "1" "Aug 2008" "Portage VERSION" "Portage"
-.SH NAME
+.SH "NAME"
env\-update \- updates environment settings automatically
-.SH SYNOPSIS
+.SH "SYNOPSIS"
\fBenv\-update\fR \fI[options]\fR
-.SH DESCRIPTION
+.SH "DESCRIPTION"
.B env\-update
reads the files in \fI/etc/env.d\fR and automatically generates
\fI/etc/profile.env\fR and \fI/etc/ld.so.conf\fR. Then \fBldconfig\fR(8)
is run to update \fI/etc/ld.so.cache\fR. \fBenv-update\fR is run by
\fBemerge\fR(1) automatically after each package merge. Also, if you
-make changes to \fI/etc/env.d\fR, you should run \fIenv-update\fR
-yourself for changes to take effect immediately. Note that this would
-only affect new processes. In order for the changes to affect your
-active shell, you will probably have to run \fIsource /etc/profile\fR
+make changes to \fI/etc/env.d\fR, you should run \fBenv-update\fR
+yourself for changes to take effect immediately. Note that this would
+only affect new processes. In order for the changes to affect your
+active shell, you will probably have to run \fIsource /etc/profile\fR
first.
-.SH OPTIONS
+.SH "OPTIONS"
.TP
.B \-\-no\-ldconfig
-Do not run ldconfig (and thus skip rebuilding the ld.so cache, etc...).
+Do not run \fBldconfig\fR (and thus skip rebuilding the \fIld.so.cache\fR,
+etc...).
.SH "REPORTING BUGS"
Please report bugs via http://bugs.gentoo.org/
-.SH AUTHORS
+.SH "AUTHORS"
Daniel Robbins <drobbins@gentoo.org>
.SH "SEE ALSO"
.BR emerge (1),
diff --git a/man/etc-update.1 b/man/etc-update.1
index 366e8500b..71900da4a 100644
--- a/man/etc-update.1
+++ b/man/etc-update.1
@@ -1,28 +1,24 @@
.TH "ETC-UPDATE" "1" "Mar 2012" "Portage VERSION" "Portage"
-.SH NAME
-etc-update \- handle configuration file updates
-.SH SYNOPSIS
-.BR etc-update
+.SH "NAME"
+etc\-update \- handle configuration file updates
+.SH "SYNOPSIS"
+.BR etc\-update
[\fIoptions\fR] [\fI--automode <mode>\fR] [\fIpaths to scan\fR]
-.SH DESCRIPTION
-.I etc-update
-is supposed to be run after merging a new package to see if
+.SH "DESCRIPTION"
+\fIetc\-update\fR is supposed to be run after merging a new package to see if
there are updates to the configuration files. If a new
-configuration file will override an old one,
-.I etc-update
-will prompt the user for a decision.
+configuration file will override an old one,
+\fIetc\-update\fR will prompt the user for a decision.
.PP
-.I etc-update
-will check all directories specified on the command line. If no paths
-are given, then the \fICONFIG_PROTECT\fR variable will be used. All
-config files found in \fICONFIG_PROTECT_MASK\fR will automatically be
-updated for you by \fIetc-update\fR. See \fBmake.conf\fR(5) for more
-information.
+\fIetc\-update\fR will check all directories specified on the command
+line. If no paths are given, then the \fICONFIG_PROTECT\fR variable
+will be used. All config files found in \fICONFIG_PROTECT_MASK\fR will
+automatically be updated for you by \fIetc\-update\fR.
+See \fBmake.conf\fR(5) for more information.
.PP
-.I etc-update
-respects the normal \fIPORTAGE_CONFIGROOT\fR and \fIEROOT\fR variables
-for finding the aforementioned config protect variables.
-.SH OPTIONS
+\fIetc\-update\fR respects the normal \fIPORTAGE_CONFIGROOT\fR and
+\fIEROOT\fR variables for finding the aforementioned config protect variables.
+.SH "OPTIONS"
.TP
.BR \-d ", " \-\-debug
Run with shell tracing enabled.
@@ -37,11 +33,11 @@ Automerge trivial changes only and quit.
Show settings and important decision info while running.
.TP
.BR "\-\-automode <mode>"
-Select one of the automatic merge modes. Valid modes are: -3 -5 -7 -9.
+Select one of the automatic merge modes. Valid modes are: \-3 \-5 \-7 \-9.
See the \fI\-\-help\fR text for more details.
.SH "REPORTING BUGS"
Please report bugs via http://bugs.gentoo.org/
-.SH AUTHORS
+.SH "AUTHORS"
.nf
Jochem Kossen and Leo Lipelis
Karl Trygve Kalleberg <karltk@gentoo.org>
@@ -49,8 +45,8 @@ Mike Frysinger <vapier@gentoo.org>
.fi
.SH "FILES"
.TP
-.B /etc/etc-update.conf
-Configuration settings for \fIetc-update\fR are stored here.
+.B /etc/etc\-update.conf
+Configuration settings for \fIetc\-update\fR are stored here.
.SH "SEE ALSO"
-.BR dispatch-conf (1),
+.BR dispatch\-conf (1),
.BR make.conf (5)
diff --git a/man/make.conf.5 b/man/make.conf.5
index 876a8a330..228101408 100644
--- a/man/make.conf.5
+++ b/man/make.conf.5
@@ -1,10 +1,20 @@
-.TH "MAKE.CONF" "5" "Jul 2012" "Portage VERSION" "Portage"
+.TH "MAKE.CONF" "5" "Jan 2014" "Portage VERSION" "Portage"
.SH "NAME"
make.conf \- custom settings for Portage
.SH "SYNOPSIS"
\fB/etc/make.conf\fR and \fB/etc/portage/make.conf\fR
.SH "DESCRIPTION"
-This file contains various variables that are used by Portage.
+
+This file contains various variables that are used by Portage. The file has a
+newline\-delimited list of \fI<key>=<value>\fR pairs (see the default file for
+examples) which are accessible from the environment of ebuilds. It supports
+simple shell\-like expansion of the form \fIvar="${var}"\fR, the source
+keyword and variable substitution, but not some of the more advanced BASH
+features like arrays and special parameter expansions. For more details, see
+the Simple lexical analysis documentation:
+\fLhttp://docs.python.org/3/library/shlex.html\fR. Note that if you source
+files, they need to be in the same shlex syntax for portage to read them.
+.br
Portage will check the currently\-defined environment variables
first for any settings. If no environment settings are found,
Portage then checks the make.conf files. Both /etc/make.conf and
@@ -13,7 +23,7 @@ Portage then checks the make.conf files. Both /etc/make.conf and
If no setting is found in the make.conf files, Portage checks
make.globals. If no
setting is found there, the profile's default setting is grabbed
-from /etc/make.profile/make.defaults. Please note that all user
+from /etc/portage/make.profile/make.defaults. Please note that all user
settings should be made in the environment or in the make.conf
files, which are intended to be customized by the user.
.br
@@ -37,7 +47,7 @@ Defaults to the value of $CHOST.
\fBACCEPT_KEYWORDS\fR = \fI[space delimited list of KEYWORDS]\fR
Enable testing of ebuilds that have not yet been deemed 'stable'. Users
of the 'x86' architecture would set this to '~x86' while ppc users would
-set this to '~ppc'. This is an incremental variable. Only define a
+set this to '~ppc'. This is an incremental variable. Only define a
~arch.
.br
Defaults to the value of $ARCH.
@@ -55,12 +65,12 @@ Defaults to the value of * -@EULA.
.br
.I Examples:
.nf
-# Accept any license
-ACCEPT_LICENSE="*"
-# Accept any license except the "public-domain" license
-ACCEPT_LICENSE="* -public-domain"
-# Only accept licenses in the FSF-APPROVED license group
-ACCEPT_LICENSE="-* @FSF-APPROVED"
+# Only accept licenses in the FREE license group (i.e. Free Software)
+ACCEPT_LICENSE="-* @FREE"
+# As before, but exclude the "Artistic" license
+ACCEPT_LICENSE="-* @FREE -Artistic"
+# Accept any license except those in the EULA license group (default)
+ACCEPT_LICENSE="* -@EULA"
.fi
.TP
\fBACCEPT_PROPERTIES\fR = \fI[space delimited list of properties]\fR
@@ -68,7 +78,7 @@ This variable is used to mask packages based on PROPERTIES restrictions.
In addition to property names, the \fI*\fR and \fI-*\fR wildcard tokens are
also supported. This variable can be temporarily overridden using the
\fB\-\-accept\-properties\fR option of \fBemerge\fR(1).
-See \fBebuild\fR(5) for more information about PROPERTIES.
+See \fBebuild\fR(5) for more information about PROPERTIES.
.br
Defaults to the value of *.
.br
@@ -80,6 +90,23 @@ ACCEPT_PROPERTIES="*"
ACCEPT_PROPERTIES="* -interactive"
.fi
.TP
+\fBACCEPT_RESTRICT\fR = \fI[space delimited list of RESTRICT tokens]\fR
+This variable is used to mask packages based on RESTRICT tokens.
+In addition to RESTRICT tokens, the \fI*\fR and \fI-*\fR wildcard tokens are
+also supported. This variable can be temporarily overridden using the
+\fB\-\-accept\-restrict\fR option of \fBemerge\fR(1).
+See \fBebuild\fR(5) for more information about RESTRICT.
+.br
+Defaults to the value of *.
+.br
+.I Examples:
+.nf
+# Accept any restrict tokens
+ACCEPT_RESTRICT="*"
+# Accept any tokens except "bindist"
+ACCEPT_RESTRICT="* -bindist"
+.fi
+.TP
.B CBUILD
This variable is passed by the \fIebuild scripts\fR to the \fIconfigure\fR
as \fI\-\-build=${CBUILD}\fR only if it is defined. Do not set this yourself
@@ -92,7 +119,7 @@ man page for more information.
Defaults to /var/tmp/ccache
.TP
\fBCCACHE_SIZE\fR = \fI"size"\fR
-This controls the space use limitations for ccache. The default is 2 gigabytes
+This controls the space use limitations for ccache. The default is 2 gigabytes
('2G'). Sizes are specified with 'G', 'M', or 'K'.
.TP
.B CFLAGS CXXFLAGS
@@ -137,13 +164,14 @@ automatically have /* appended to them.
Defaults to "/lib/modules/* *.py[co]".
.TP
\fBCONFIG_PROTECT\fR = \fI[space delimited list of files and/or directories]\fR
-All files and/or directories that are defined here will have "config file protection"
-enabled for them. See the \fBCONFIGURATION FILES\fR section
+All files and/or directories that are defined here will have "config file
+protection" enabled for them. See the \fBCONFIGURATION FILES\fR section
of \fBemerge\fR(1) for more information.
.TP
-\fBCONFIG_PROTECT_MASK\fR = \fI[space delimited list of files and/or directories]\fR
-All files and/or directories that are defined here will have "config file protection"
-disabled for them. See the \fBCONFIGURATION FILES\fR section
+\fBCONFIG_PROTECT_MASK\fR = \fI[space delimited list of files and/or \
+directories]\fR
+All files and/or directories that are defined here will have "config file
+protection" disabled for them. See the \fBCONFIGURATION FILES\fR section
of \fBemerge\fR(1) for more information.
.TP
.B CTARGET
@@ -166,6 +194,10 @@ See the \fBPORTDIR\fR documentation for more information.
.br
Defaults to /usr/portage/distfiles.
.TP
+.B DCO_SIGNED_OFF_BY
+This variable may contain a name and email address which will be used by
+\fBrepoman\fR(1) to add a Signed\-off\-by line to each commit message.
+.TP
.B DOC_SYMLINKS_DIR
If this variable contains a directory then symlinks to html documentation will
be installed into it.
@@ -175,9 +207,9 @@ Defines whether or not to ignore audible beeps when displaying important
informational messages. This variable is unset by default.
.TP
.B EMERGE_DEFAULT_OPTS
-Options to append to the end of the \fBemerge\fR(1) command line on every invocation.
-These options will not be appended to the command line if \-\-ignore\-default\-opts
-is specified.
+Options to append to the end of the \fBemerge\fR(1) command line on every
+invocation. These options will not be appended to the command line if
+\-\-ignore\-default\-opts is specified.
.TP
.B EMERGE_LOG_DIR
Controls the location of emerge.log and emerge-fetch.log.
@@ -207,7 +239,7 @@ should not be disabled by default.
.RS
.TP
.B assume\-digests
-When commiting work to cvs with \fBrepoman\fR(1), assume that all existing
+When commiting work to cvs with \fBrepoman\fR(1), assume that all existing
SRC_URI digests are correct. This feature also affects digest generation via
\fBebuild\fR(1) and \fBemerge\fR(1) (emerge generates digests only when the
\fIdigest\fR feature is enabled). Existing digests for files that do not exist
@@ -234,8 +266,8 @@ Enable a special progress indicator when \fBemerge\fR(1) is calculating
dependencies.
.TP
.B ccache
-Enable portage support for the ccache package. If the ccache dir is not
-present in the user's environment, then portage will default to
+Enable portage support for the ccache package. If the ccache dir is not
+present in the user's environment, then portage will default to
${PORTAGE_TMPDIR}/ccache.
\fBWarning\fR: This feature is known to cause numerous compilation failures.
@@ -245,6 +277,10 @@ like "File not recognized: File truncated"), try recompiling the application
with ccache disabled before reporting a bug. Unless you are doing development
work, do not enable ccache.
.TP
+.B cgroup
+Use Linux control group to control processes spawned by ebuilds. This allows
+emerge to safely kill all subprocesses when ebuild phase exits.
+.TP
.B clean\-logs
Enable automatic execution of the command specified by the
PORT_LOGDIR_CLEAN variable. The default PORT_LOGDIR_CLEAN setting will
@@ -268,6 +304,13 @@ space. Make sure you have built both binutils and gdb with USE=zlib
support for this to work. See \fBsplitdebug\fR for general split debug
information (upon which this feature depends).
.TP
+.B compress\-index
+If set then a compressed copy of 'Packages' index file will be written.
+This feature is intended for Gentoo binhosts using certain webservers
+(such as, but not limited to, Nginx with gzip_static module) to avoid
+redundant on\-the\-fly compression. The resulting file will be called
+\[aq]Packages.gz' and its modification time will match that of 'Packages'.
+.TP
.B config\-protect\-if\-modified
This causes the \fBCONFIG_PROTECT\fR behavior to be skipped for files
that have not been modified since they were installed. This feature is
@@ -315,7 +358,7 @@ Both the \fBebuild\fR(1) command and the \fInoclean\fR feature cause the
\fIfail\-clean\fR feature to be automatically disabled.
.TP
.B getbinpkg
-Force emerges to always try to fetch files from the \fIPORTAGE_BINHOST\fR. See
+Force emerges to always try to fetch files from the \fIPORTAGE_BINHOST\fR. See
\fBmake.conf\fR(5) for more information.
.TP
.B installsources
@@ -347,6 +390,10 @@ would otherwise be useless with prefix configurations. This brings
compatibility with the prefix branch of portage, which also supports EPREFIX
for all EAPIs (for obvious reasons).
.TP
+.B ipc\-sandbox
+Isolate the ebuild phase functions from host IPC namespace. Supported
+only on Linux. Requires IPC namespace support in kernel.
+.TP
.B lmirror
When \fImirror\fR is enabled in \fBFEATURES\fR, fetch files even
when \fImirror\fR is also in the \fBebuild\fR(5) \fBRESTRICT\fR variable.
@@ -354,18 +401,17 @@ Do \fBNOT\fR use \fIlmirror\fR for clients that need to override \fBRESTRICT\fR
when fetching from a local mirror, but instead use a "local" mirror setting
in \fI/etc/portage/mirrors\fR, as described in \fBportage\fR(5).
.TP
+.B merge\-sync
+After a package is merged or unmerged, sync relevant files to
+disk in order to avoid data\-loss in the event of a power failure.
+This feature is enabled by default.
+.TP
.B metadata\-transfer
Automatically perform a metadata transfer when `emerge \-\-sync` is run.
In versions of portage >=2.1.5, this feature is disabled by
default. When metadata\-transfer is disabled, metadata cache from the
-${PORTDIR}/metadata/cache/ directory will be used directly (if available)
-and eclasses in ${PORTDIR}/eclass/ must not be modified except by
-`emerge \-\-sync` operations since the cache validation mechanism
-will not recognize eclass modifications. Normally, this issue only
-pertains to users of the rsync tree since the cvs tree does not contain
-a metadata/cache/ directory. Users of the rsync tree who want to modify
-eclasses should use \fBPORTDIR_OVERLAY\fR in order for the cache
-validation mechanism to work correctly.
+${repository_location}/metadata/md5\-cache/ directory will be used directly
+(if available).
.TP
.B mirror
Fetch everything in \fBSRC_URI\fR regardless of \fBUSE\fR settings,
@@ -378,12 +424,18 @@ isn't a symlink to /usr/lib64. To find the bad packages, we have a
portage feature called \fImultilib\-strict\fR. It will prevent emerge
from putting 64bit libraries into anything other than (/usr)/lib64.
.TP
+.B network\-sandbox
+Isolate the ebuild phase functions from host network interfaces.
+Supported only on Linux. Requires network namespace support in kernel.
+.TP
.B news
Enable GLEP 42 news support. See
\fIhttp://www.gentoo.org/proj/en/glep/glep-0042.html\fR.
.TP
.B noauto
-When utilizing \fBebuild\fR(1), only run the function requested.
+When utilizing \fBebuild\fR(1), only run the function requested. Also, forces
+the corresponding ebuild and eclasses to be sourced again for each phase, in
+order to account for modifications.
.TP
.B noclean
Do not delete the the source and temporary files after the merge process.
@@ -413,11 +465,6 @@ Use finer\-grained locks when installing packages, allowing for greater
parallelization. For additional parallelization, disable
\fIebuild\-locks\fR.
.TP
-.B parse\-eapi\-ebuild\-head
-Parse \fBEAPI\fR from the head of the ebuild as specified in PMS section
-7.3.1, and treat non\-conformant ebuilds as invalid. This feature is
-enabled by default, and will soon become enabled unconditionally.
-.TP
.B prelink\-checksums
If \fBprelink\fR(8) is installed then use it to undo any prelinks on files
before computing checksums for merge and unmerge. This feature is
@@ -434,6 +481,9 @@ already prelinked files to be merged.
.B preserve\-libs
Preserve libraries when the sonames change during upgrade or downgrade.
Libraries are preserved only if consumers of those libraries are detected.
+Preserved libraries are automatically removed when there are no remaining
+consumers. Run `emerge @preserved\-rebuild` in order to rebuild all
+consumers of preserved libraries.
.TP
.B protect\-owned
This is identical to the \fIcollision\-protect\fR feature except that files
@@ -447,10 +497,10 @@ selectively disable this feature. It is recommended to leave either
since otherwise file collisions between packages may result in files being
overwritten or uninstalled at inappropriate times.
If \fIcollision\-protect\fR is enabled then it takes precedence over
-\fIprotect\-owned\fR.
+\fIprotect\-owned\fR.
.TP
.B python\-trace
-Output a verbose trace of python execution to stderr when a command's
+Output a verbose trace of python execution to stderr when a command's
\-\-debug option is enabled.
.TP
.B sandbox
@@ -460,15 +510,16 @@ Enable sandbox\-ing when running \fBemerge\fR(1) and \fBebuild\fR(1).
Enable SELinux sandbox\-ing. Do not toggle this \fBFEATURE\fR yourself.
.TP
.B sfperms
-Stands for Smart Filesystem Permissions. Before merging packages to the
-live filesystem, automatically search for and set permissions on setuid
-and setgid files. Files that are setuid have the group and other read
-bits removed while files that are setgid have the other read bit removed.
+Stands for Smart Filesystem Permissions. Before merging packages to the
+live filesystem, automatically search for and set permissions on setuid
+and setgid files. Files that are setuid have the group and other read
+bits removed while files that are setgid have the other read bit removed.
See also \fIsuidctl\fR below.
.TP
.B sign
-When commiting work to cvs with \fBrepoman\fR(1), sign the Manifest with
-a GPG key. Read about the \fIPORTAGE_GPG_KEY\fR variable in \fBmake.conf\fR(5).
+When commiting work to cvs with \fBrepoman\fR(1), sign the Manifest with
+a GPG key. Read about the \fIPORTAGE_GPG_KEY\fR variable in
+\fBmake.conf\fR(5).
.TP
.B skiprocheck
Skip write access checks on \fBDISTDIR\fR when fetching files. This is
@@ -483,20 +534,20 @@ incompatibility.
Store logs created by \fBPORTAGE_ELOG_SYSTEM="save"\fR in category
subdirectories of \fBPORT_LOGDIR/elog\fR, instead of using
\fBPORT_LOGDIR/elog\fR directly.
-.TP
+.TP
.B split\-log
Store build logs in category subdirectories of \fBPORT_LOGDIR/build\fR,
instead of using \fBPORT_LOGDIR\fR directly.
.TP
.B splitdebug
-Prior to stripping ELF etdyn and etexec files, the debugging info is
+Prior to stripping ELF etdyn and etexec files, the debugging info is
stored for later use by various debuggers. This feature is disabled by
\fBnostrip\fR. You should also consider setting \fBcompressdebug\fR so
the files don't suck up a lot of space. For installation of source code,
see \fBinstallsources\fR.
.TP
.B strict
-Have portage react strongly to conditions that have the potential to be
+Have portage react strongly to conditions that have the potential to be
dangerous (like missing or incorrect digests for ebuilds).
.TP
.B stricter
@@ -505,14 +556,17 @@ security provisions (for example textrels, executable stack). Read about
the \fIQA_STRICT_*\fR variables in \fBmake.conf\fR(5).
.TP
.B suidctl
-Before merging packages to the live filesystem, automatically strip setuid
+Before merging packages to the live filesystem, automatically strip setuid
bits from any file that is not listed in \fI/etc/portage/suidctl.conf\fR.
.TP
.B test
-Run package\-specific tests during each merge to help make sure
-the package compiled properly. See \fItest\fR in \fBebuild\fR(1)
+Run package\-specific tests during each merge to help make sure
+the package compiled properly. See \fItest\fR in \fBebuild\fR(1)
and \fIsrc_test()\fR in \fBebuild\fR(5). This feature implies the "test"
-\fBUSE\fR flag.
+\fBUSE\fR flag if it is a member of \fBIUSE\fR, either explicitly or
+implicitly (see \fBebuild\fR(5) for more information about \fBIUSE\fR).
+The "test" \fBUSE\fR flag is also automatically disabled when the
+"test" feature is disabled.
.TP
.B test\-fail\-continue
If "test" is enabled \fBFEATURES\fR and the test phase of an ebuild fails,
@@ -545,22 +599,28 @@ When portage is run as root, drop privileges to portage:portage during the
fetching of package sources.
.TP
.B userpriv
-Allow portage to drop root privileges and compile packages as
+Allow portage to drop root privileges and compile packages as
portage:portage without a sandbox (unless \fIusersandbox\fR is also used).
.TP
.B usersandbox
-Enable the sandbox in the compile phase, when running without root privs (\fIuserpriv\fR).
+Enable the sandbox in the compile phase, when running without root privs
+(\fIuserpriv\fR).
.TP
.B usersync
-Drop privileges to the owner of \fBPORTDIR\fR for \fBemerge(1) --sync\fR
-operations.
+Drop privileges to the owner of ${repository_location} for \fBemerge(1) --sync\fR
+operations. Note that this feature assumes that all subdirectories of
+${repository_location} have the same ownership as ${repository_location} itself.
+It is the user's responsibility to ensure correct ownership, since otherwise
+Portage would have to waste time validating ownership for each and every sync
+operation.
.TP
.B webrsync-gpg
Enable GPG verification when using \fIemerge\-webrsync\fR.
.TP
.B xattr
Preserve extended attributes (filesystem-stored metadata) when installing
-files (see \fBattr\fR(1)).
+files (see \fBattr\fR(1)). The \fBPORTAGE_XATTR_EXCLUDE\fR variable may be
+used to exclude specific attributes from being preserved.
.RE
.TP
.B FETCHCOMMAND
@@ -587,9 +647,9 @@ the \fIebuild scripts\fR. Merging 'mirrorselect' can help. Entries in this
variable that have no protocol and simply start with a '/' path separator may
be used to specify mounted filesystem mirrors.
.TP
-\fBhttp_proxy ftp_proxy\fR = \fI[protocol://host:port]\fR
-These vars are used if the sources must be downloaded from the
-internet by \fBwget\fR(1). They are only required if you use a
+\fBhttp_proxy ftp_proxy RSYNC_PROXY\fR = \fI[protocol://host:port]\fR
+These variables are used by network clients such as \fBwget\fR(1) and
+\fBrsync\fR(1). They are only required if you use a
proxy server for internet access.
.TP
\fBINSTALL_MASK\fR = \fI[space delimited list of file names]\fR
@@ -615,7 +675,7 @@ enabled for these flags may be closed as INVALID.
.TP
.B MAKEOPTS
Use this variable if you want to use parallel make. For example, if you
-have a dual\-processor system, set this variable to "\-j2" or "\-j3" for
+have a dual\-processor system, set this variable to "\-j2" or "\-j3" for
enhanced build performance with many packages. Suggested settings are
between \fICPUs+1\fR and \fI2*CPUs+1\fR. In order to avoid
excess load, the \fB\-\-load\-average\fR option is recommended.
@@ -644,9 +704,9 @@ Defaults to /usr/portage/packages.
This variable defines the directory in which per\-ebuild logs are kept.
Logs are created only when this is set. They are stored as
${CATEGORY}:${PF}:YYYYMMDD\-HHMMSS.log in the directory specified. If the
-directory does not exist, it will be created automatically and group permissions
-will be applied to it. If the directory already exists, portage will not
-modify it's permissions.
+directory does not exist, it will be created automatically and group
+permissions will be applied to it. If the directory already exists, portage
+will not modify it's permissions.
.TP
.B PORT_LOGDIR_CLEAN
This variable should contain a command for portage to call in order
@@ -658,16 +718,13 @@ unless \fBclean\-logs\fR is enabled in \fBFEATURES\fR.
\fBPORTAGE_BINHOST\fR = \fI[space delimited URI list]\fR
This is a list of hosts from which portage will grab prebuilt\-binary packages.
Each entry in the list must specify the full address of a directory
-serving tbz2's for your system. This is only used when running with
-the get binary pkg options are given to \fBemerge\fR. Review \fBemerge\fR(1)
-for more information. For versions of portage less that 2.1.6, this variable
-should point to the 'All' directory on the host that creates the binary
-packages and not to the root of the \fBPKGDIR\fR. Starting with portage 2.1.6,
-it should point to a directory containing a 'Packages' index file. If
-${PORTAGE_BINHOST}/Packages does not exist then portage will attempt to use
-the older protocol.
-.TP
-\fBPORTAGE_BINHOST_HEADER_URI\fR = \fI"ftp://login:pass@grp.mirror.site/pub/grp/i686/athlon\-xp/"\fR
+serving tbz2's for your system (this directory must contain a 'Packages' index
+file). This is only used when running with
+the get binary pkg options are given to \fBemerge\fR. Review \fBemerge\fR(1)
+for more information.
+.TP
+\fBPORTAGE_BINHOST_HEADER_URI\fR = \
+\fI"ftp://login:pass@grp.mirror.site/pub/grp/i686/athlon\-xp/"\fR
This variable only makes sense on a system that will serve as a binhost and
build packages for clients. It defines the URI header field for the package
index file which is located at ${PKGDIR}/Packages. Clients that have
@@ -680,6 +737,10 @@ setting as the base URI.
This variable contains options to be passed to the tar command for creation
of binary packages.
.TP
+.B PORTAGE_BINPKG_FORMAT
+This variable sets default format used for binary packages. Possible values
+are tar and rpm or both.
+.TP
\fBPORTAGE_BUNZIP2_COMMAND\fR = \fI[bunzip2 command string]\fR
This variable should contain a command that is suitable for portage to call
for bunzip2 extraction operations.
@@ -690,6 +751,23 @@ for bzip2 compression operations. \fBPORTAGE_BZIP2_COMMAND\fR will also be
called for extraction operation, with -d appended, unless the
\fBPORTAGE_BUNZIP2_COMMAND\fR variable is set.
.TP
+\fBPORTAGE_CHECKSUM_FILTER\fR = \fI[space delimited list of hash names]\fR
+This variable may be used to filter the hash functions that are used to
+verify integrity of files. Hash function names are case\-insensitive, and
+the \fI*\fR and \fI\-*\fR wildcard tokens are supported.
+.br
+Defaults to the value of *.
+.br
+.I Examples:
+.nf
+# Use all available hash functions
+PORTAGE_CHECKSUM_FILTER="*"
+# Use any function except whirlpool
+PORTAGE_CHECKSUM_FILTER="* \-whirlpool"
+# Only use sha256
+PORTAGE_CHECKSUM_FILTER="\-* sha256"
+.fi
+.TP
\fBPORTAGE_COMPRESS\fR = \fI"bzip2"\fR
This variable contains the command used to compress documentation during the
install phase.
@@ -794,7 +872,7 @@ Additional rsync options to be used by \fBemerge \-\-sync\fR.
Defaults to no value.
.TP
\fBPORTAGE_RSYNC_OPTS\fR = \fI[rsync options string]\fR
-Default rsync options to be used by \fBemerge \-\-sync\fR.
+Default rsync options to be used by \fBemerge \-\-sync\fR.
.br
\fBDon't change this unless you know exactly what you're doing!\fR
.br
@@ -810,6 +888,14 @@ addresses are exhausted.
.br
Defaults to -1.
.TP
+\fBPORTAGE_SSH_OPTS\fR = \fI[list of ssh options]\fR
+Additional ssh options to be used when portage executes ssh or sftp.
+This variable supports use of embedded quote characters to quote
+whitespace or special shell characters within arguments (embedded
+quotes must be escaped in make.conf settings).
+.br
+Defaults to no value.
+.TP
\fBPORTAGE_SYNC_STALE\fR = \fI[NUMBER]\fR
Defines the number of days after the last `emerge \-\-sync` that a warning
message should be produced. A value of 0 will disable warnings.
@@ -820,6 +906,8 @@ Defaults to 30.
Defines the location of the temporary build directories.
.br
Defaults to /var/tmp.
+
+This should not be set to point anywhere under location of any repository.
.TP
\fBPORTAGE_USERNAME\fR = \fI[user]\fR
Defines the username to use when executing in userpriv/etc... modes (i.e.
@@ -830,12 +918,18 @@ Defaults to portage.
\fBPORTAGE_WORKDIR_MODE\fR = \fI"0700"\fR
This variable controls permissions for \fIWORKDIR\fR (see \fBebuild\fR(5)).
.TP
+\fBPORTAGE_XATTR_EXCLUDE\fR = \fI[space delimited list of fnmatch patterns]\fR
+This variable may be used to exclude specific attributes from being preserved
+when \fBxattr\fR is in \fBFEATURES\fR.
+.br
+Defaults to "security.*" (security labels are special, see bug #461868).
+.TP
\fBPORTDIR\fR = \fI[path]\fR
-Defines the location of the Portage tree. This is the repository for all
-profile information as well as all ebuilds. If you change this, you must update
-your /etc/make.profile symlink accordingly.
+Defines the location of main repository. This variable is deprecated in favor of
+settings in \fBrepos.conf\fR. If you change this, you must update
+your /etc/portage/make.profile symlink accordingly.
.br
-Defaults to /usr/portage.
+Defaults to /usr/portage.
.br
\fB***Warning***\fR
.br
@@ -843,13 +937,13 @@ Data stored inside \fBPORTDIR\fR is in peril of being overwritten or deleted by
the emerge \-\-sync command. The default value of
\fBPORTAGE_RSYNC_OPTS\fR will protect the default locations of
\fBDISTDIR\fR and \fBPKGDIR\fR, but users are warned that any other locations
-inside \fBPORTDIR\fR are not necessarily safe for data storage. You should not
-put other data (such as overlays) in your \fBPORTDIR\fB. Portage will walk
-directory structures and may arbitrary add invalid categories as packages.
+inside \fBPORTDIR\fR are not necessarily safe for data storage. You should not
+put other data (such as overlays) in your \fBPORTDIR\fB. Portage will walk
+directory structures and may arbitrarily add invalid categories as packages.
.TP
\fBPORTDIR_OVERLAY\fR = \fI"[path] [different\-path] [etc...]"\fR
-Defines the directories in which user made ebuilds may be stored and not
-overwriten when `emerge \-\-sync` is run. This is a space delimited list of
+Defines the locations of other repositories. This variable is deprecated in
+favor of settings in \fBrepos.conf\fR. This variable is a space\-delimited list of
directories.
.br
Defaults to no value.
@@ -870,10 +964,9 @@ settings from ebuilds. See also \fBebuild\fR(5).
Set this to cause portage to ignore any \fIQA_FLAGS_IGNORED\fR override
settings from ebuilds. See also \fBebuild\fR(5).
.TP
-\fBQA_STRICT_DT_HASH = \fI"set"\fR
-Set this to cause portage to ignore any \fIQA_DT_HASH\fR override
-settings from ebuilds. This variable is deprecated. Use
-\fIQA_STRICT_FLAGS_IGNORED\fR instead.
+\fBQA_STRICT_MULTILIB_PATHS = \fI"set"\fR
+Set this to cause portage to ignore any \fIQA_MULTILIB_PATHS\fR override
+settings from ebuilds. See also \fBebuild\fR(5).
.TP
\fBQA_STRICT_PRESTRIPPED = \fI"set"\fR
Set this to cause portage to ignore any \fIQA_PRESTRIPPED\fR override
@@ -887,25 +980,46 @@ be necessary in order to continue a partially downloaded file located at
\\${DISTDIR}/\\${FILE}.
.TP
\fBROOT\fR = \fI[path]\fR
-Use \fBROOT\fR to specify the target root filesystem to be used for merging
-packages or ebuilds. All \fBRDEPEND\fR and \fBPDEPEND\fR will be installed
-into \fBROOT\fR while all \fBDEPEND\fR will be still be installed into /.
-Typically, you should set this setting in the environment rather than in
-\fI/etc/make.conf\fR itself. It's commonly used for creating new build
-images. Make sure you use an absolute path.
+Use \fBROOT\fR to specify the target root filesystem to be used for merging
+packages or ebuilds.
+Typically, you should set this setting in the environment rather than in
+\fImake.conf\fR itself. It's commonly used for creating new build
+images. Make sure you use an absolute path. Refer to the
+\fBCross-compilation\fR section of \fBebuild\fR(5) for information about
+how dependencies are handled for \fBROOT\fR.
.br
Defaults to /.
.TP
\fBRPMDIR\fR = \fI[path]\fR
Defines the location where created RPM packages will be stored.
.br
-Defaults to ${PORTDIR}/rpm.
+Defaults to /usr/portage/rpm.
.TP
\fBSYNC\fR = \fI[RSYNC]\fR
Insert your preferred rsync mirror here. This rsync server
is used to sync the local portage tree when `emerge \-\-sync` is run.
-.br
+
+Note that the \fBSYNC\fR variable is now deprecated, and instead the
+sync\-type and sync\-uri attributes in repos.conf should be used. See
+\fBportage\fR(5) for more information.
+
Defaults to rsync://rsync.gentoo.org/gentoo\-portage
+.RS
+.TP
+.B Usage:
+(rsync|ssh)://[username@]hostname[:port]/(module|path)
+.TP
+.B Examples:
+rsync://private\-mirror.com/portage\-module
+.br
+rsync://rsync\-user@private\-mirror.com:873/gentoo\-portage
+.br
+ssh://ssh\-user@192.168.0.1:22/usr/portage
+.br
+ssh://ssh\-user@192.168.0.1:22/\\${HOME}/portage\-storage
+.TP
+Note: For the ssh:// scheme, key\-based authentication might be of interest.
+.RE
.TP
\fBUNINSTALL_IGNORE\fR = \fI[space delimited list of fnmatch patterns]\fR
This variable prevents uninstallation of files that match
@@ -920,15 +1034,6 @@ This variable contains options that control the build behavior of several
packages. More information in \fBebuild\fR(5). Possible USE values
can be found in \fI/usr/portage/profiles/use.desc\fR.
.TP
-\fBUSE_EXPAND\fR = \fI[space delimited list of variable names]\fR
-Any variable listed here will be used to augment USE by inserting a new flag
-for every value in that variable, so USE_EXPAND="FOO" and FOO="bar bla" results
-in USE="foo_bar foo_bla".
-.TP
-\fBUSE_EXPAND_HIDDEN\fR = \fI[space delimited list of variable names]\fR
-Names of \fBUSE_EXPAND\fR variables that should not be shown in the verbose merge
-list output of the \fBemerge\fR(1) command.
-.TP
\fBUSE_ORDER\fR = \fI"env:pkg:conf:defaults:pkginternal:repo:env.d"\fR
Determines the precedence of layers in the incremental stacking of the USE
variable. Precedence decreases from left to right such that env overrides
@@ -975,14 +1080,17 @@ Daniel Robbins <drobbins@gentoo.org>
Nicholas Jones <carpaski@gentoo.org>
Mike Frysinger <vapier@gentoo.org>
Saleem Abdulrasool <compnerd@gentoo.org>
+Arfrever Frehtes Taifersar Arahesis <arfrever@apache.org>
.fi
.SH "FILES"
.TP
\fB/etc/make.conf\fR and \fB/etc/portage/make.conf\fR
-Contains variables for the build\-process and overwrites those in make.defaults.
+Contains variables for the build\-process and overwrites those in
+make.defaults.
.TP
.B /usr/share/portage/config/make.globals
-Contains the default variables for the build\-process, you should edit \fI/etc/make.conf\fR instead.
+Contains the default variables for the build\-process, you should edit
+\fI/etc/portage/make.conf\fR instead.
.TP
.B /etc/portage/color.map
Contains variables customizing colors.
@@ -998,6 +1106,6 @@ Contains a list of all local USE variables.
.BR ebuild (1),
.BR ebuild (5)
.TP
-The \fI/usr/sbin/ebuild.sh\fR script.
+The \fI/usr/lib/portage/bin/ebuild.sh\fR script.
.TP
The helper apps in \fI/usr/lib/portage/bin\fR.
diff --git a/man/portage.5 b/man/portage.5
index ad84ff1a7..e399f0fca 100644
--- a/man/portage.5
+++ b/man/portage.5
@@ -1,26 +1,21 @@
-.TH "PORTAGE" "5" "Jun 2012" "Portage VERSION" "Portage"
+.TH "PORTAGE" "5" "Feb 2014" "Portage VERSION" "Portage"
.SH NAME
portage \- the heart of Gentoo
.SH "DESCRIPTION"
-The current portage code uses many different configuration files, most of which
-are unknown to users and normal developers. Here we will try to collect all
-the odds and ends so as to help users more effectively utilize portage. This
+The current portage code uses many different configuration files, most of which
+are unknown to users and normal developers. Here we will try to collect all
+the odds and ends so as to help users more effectively utilize portage. This
is a reference only for files which do not already have a man page.
-All files in the make.profile directory may be tweaked via parent profiles
-when using cascading profiles. For more info, please see
+All files in the make.profile directory may be tweaked via parent profiles
+when using cascading profiles. For more info, please see
http://www.gentoo.org/proj/en/releng/docs/cascading-profiles.xml
.IP Note:
-If you are looking for information on how to emerge something, please see
+If you are looking for information on how to emerge something, please see
.BR emerge (1).
.SH "SYNOPSIS"
.TP
-.BR /etc/
-.nf
-.BR make.conf (5)
-.fi
-.TP
-\fB/etc/make.profile/\fR or \fB/etc/portage/make.profile/\fR
+\fB/etc/portage/make.profile/\fR or \fB/etc/make.profile/\fR
site\-specific overrides go in \fB/etc/portage/profile/\fR
.nf
deprecated
@@ -36,10 +31,14 @@ package.unmask
package.use
package.use.force
package.use.mask
+package.use.stable.force
+package.use.stable.mask
parent
profile.bashrc
use.force
use.mask
+use.stable.mask
+use.stable.force
virtuals
.fi
.TP
@@ -49,10 +48,11 @@ bashrc
categories
color.map
license_groups
-make.conf
+.BR make.conf (5)
mirrors
modules
package.accept_keywords
+package.accept_restrict
package.env
package.keywords
package.license
@@ -67,7 +67,7 @@ repos.conf
package-specific bashrc files
.TP
.BR /etc/portage/profile/
-site-specific overrides of \fB/etc/make.profile/\fR
+site-specific overrides of \fB/etc/portage/make.profile/\fR
.TP
.BR /etc/portage/sets/
user\-defined package sets
@@ -90,6 +90,8 @@ package.unmask
package.use
package.use.force
package.use.mask
+package.use.stable.force
+package.use.stable.mask
profiles.desc
repo_name
thirdpartymirrors
@@ -97,11 +99,14 @@ use.desc
use.force
use.local.desc
use.mask
+use.stable.mask
+use.stable.force
.fi
.TP
.BR /usr/share/portage/config/
.nf
make.globals
+repos.conf
.fi
.TP
.BR /var/cache/edb/
@@ -117,8 +122,8 @@ world
world_sets
.fi
.SH "GLOSSARY"
-In the following sections, some terminology may be foreign to you or used
-with meaning specific to Portage. Please see the referenced manpages for
+In the following sections, some terminology may be foreign to you or used
+with meaning specific to Portage. Please see the referenced manpages for
more detailed explanations.
.RS
.TP
@@ -127,7 +132,7 @@ An atom is either of the form category/package or consists of an operator
followed by category/package followed by a hyphen and a version specification.
An atom might be suffixed by a slot specification.
.br
-More reading:
+More reading:
.BR ebuild (5)
.B Extended Atom Syntax
@@ -141,8 +146,7 @@ configuration files and command line arguments for programs such as
Atoms with repository constraints have a '::' separator appended to the
right side, followed by a repository name. Each repository name should
correspond to the value of a \fBrepo_name\fR entry from one of the
-repositories that is configured via the \fBPORTDIR\fR or
-\fBPORTDIR_OVERLAY\fR variables (see \fBmake.conf\fR(5)).
+repositories that is configured in \fBrepos.conf\fR file.
.I Examples:
.nf
@@ -165,6 +169,8 @@ next to each other.
# match anything with a version containing 9999, which can be used in
# package.mask to prevent emerge --autounmask from selecting live ebuilds
=*/*-*9999*
+# match anything with a version containing _beta
+=*/*-*_beta*
# match anything from the 'sys\-apps' category
sys\-apps/*
# match packages named 'zlib' from any category
@@ -181,38 +187,31 @@ net\-*/*
.B KEYWORD
Each architecture has a unique KEYWORD.
.br
-More reading:
+More reading:
.BR ebuild (5)
.TP
.B virtual
-A DEPEND atom that is part of the "virtual" category. They are used
-when different packages can satisfy a dependency and only one of them is
+A DEPEND atom that is part of the "virtual" category. They are used
+when different packages can satisfy a dependency and only one of them is
needed.
.br
-More reading:
+More reading:
.BR ebuild (5)
.RE
.SH "SPECIFIC FILE DESCRIPTIONS"
.TP
-.BR /etc/
-.RS
-.TP
-.BR make.conf
-The global custom settings for Portage. See \fBmake.conf\fR(5).
-.RE
-.TP
-\fB/etc/make.profile/\fR or \fB/etc/portage/make.profile/\fR
-This is usually just a symlink to the correct profile in
-\fB/usr/portage/profiles/\fR. Since it is part of the portage tree, it
-may easily be updated/regenerated by running `emerge \-\-sync`. It defines
-what a profile is (usually arch specific stuff). If you need a custom
-profile, then you should make your own \fB/etc/make.profile/\fR
-directory and populate it. However, if you just wish to override some
+\fB/etc/portage/make.profile/\fR or \fB/etc/make.profile/\fR
+This is usually just a symlink to the correct profile in
+\fB/usr/portage/profiles/\fR. Since it is part of the portage tree, it
+may easily be updated/regenerated by running `emerge \-\-sync`. It defines
+what a profile is (usually arch specific stuff). If you need a custom
+profile, then you should make your own \fBmake.profile\fR
+directory and populate it. However, if you just wish to override some
settings, use \fB/etc/portage/profile/\fR (it supports all of the same file
-types that \fB/etc/make.profile/\fR does, except parent). Do NOT edit the
-settings in \fB/etc/make.profile/\fR because they WILL be lost with the next
-`emerge \-\-sync`. If both \fB/etc/make.profile/\fR and
-\fB/etc/portage/make.profile/\fR exist, then \fB/etc/make.profile/\fR
+types that \fBmake.profile\fR does, except parent). Do NOT edit the
+settings in \fBmake.profile\fR because they WILL be lost with the next
+`emerge \-\-sync`. If both \fB/etc/portage/make.profile/\fR and
+\fB/etc/make.profile/\fR exist, then \fB/etc/portage/make.profile/\fR
will be preferred.
Any file in this directory, directories of other profiles or top-level
@@ -224,33 +223,39 @@ portage-2.1.6.7, and it is not included in PMS at this time.
.I Example:
.nf
-${PORTDIR}/profiles/package.mask/removals
-${PORTDIR}/profiles/package.mask/testing
+${repository_location}/profiles/package.mask/removals
+${repository_location}/profiles/package.mask/testing
.fi
.RS
.TP
.BR deprecated
-The existence of this file marks a profile as deprecated, meaning it is
-not supported by Gentoo anymore. The first line must be the profile to which
-users are encouraged to upgrade, optionally followed by some instructions
+The existence of this file marks a profile as deprecated, meaning it is
+not supported by Gentoo anymore. The first line must be the profile to which
+users are encouraged to upgrade, optionally followed by some instructions
explaining how they can upgrade.
.I Example:
.nf
default-linux/x86/2005.0
# emerge -n '>=sys-apps/portage-2.0.51'
-# rm -f /etc/make.profile
-# ln -s /usr/portage/profiles/default-linux/alpha/2005.0 /etc/make.profile
+# rm -f /etc/portage/make.profile
+# ln -s /usr/portage/profiles/default-linux/alpha/2005.0 \
+/etc/portage/make.profile
.fi
.TP
.BR eapi
The first line of this file specifies the \fBEAPI\fR to which files in the
same directory conform. See \fBebuild\fR(5) for information about \fBEAPI\fR
-and related features.
+and related features. Beginning with \fBEAPI 5\fR, new USE
+configuration files are supported: use.stable.mask,
+use.stable.force, package.use.stable.mask and
+package.use.stable.force. These files behave similarly to
+previously supported USE configuration files, except that they
+only influence packages that are merged due to a stable keyword.
.TP
.BR make.defaults
-The profile default settings for Portage. The general format is described
-in \fBmake.conf\fR(5). The \fImake.defaults\fR for your profile defines a
+The profile default settings for Portage. The general format is described
+in \fBmake.conf\fR(5). The \fImake.defaults\fR for your profile defines a
few specific variables too:
.PD 0
@@ -259,21 +264,66 @@ few specific variables too:
.BR ARCH
Architecture type (x86/ppc/hppa/etc...).
.TP
+\fBIUSE_IMPLICIT\fR = \fI[space delimited list of USE flags]\fR
+Defines implicit \fBIUSE\fR for ebuilds using \fBEAPI 5\fR or
+later. Flags that come from \fBUSE_EXPAND\fR or
+\fBUSE_EXPAND_UNPREFIXED\fR variables do not belong in
+\fBIUSE_IMPLICIT\fR, since \fBUSE_EXPAND_VALUES_*\fR variables
+are used to define implicit \fBIUSE\fR for those flags. See
+\fBebuild\fR(5) for more information about \fBIUSE\fR.
+.TP
.B USERLAND = \fI"GNU"\fR
Support BSD/cygwin/etc...
.TP
+\fBUSE_EXPAND\fR = \fI[space delimited list of variable names]\fR
+Any variable listed here will be used to augment USE by inserting a new flag
+for every value in that variable, so USE_EXPAND="FOO" and FOO="bar bla" results
+in USE="foo_bar foo_bla".
+.TP
+\fBUSE_EXPAND_HIDDEN\fR = \fI[space delimited list of variable names]\fR
+Names of \fBUSE_EXPAND\fR variables that should not be shown in the verbose
+merge list output of the \fBemerge\fR(1) command.
+.TP
+\fBUSE_EXPAND_IMPLICIT\fR = \fI[space delimited list of variable names]\fR
+Defines \fBUSE_EXPAND\fR and \fBUSE_EXPAND_UNPREFIXED\fR
+variables for which the corresponding USE flags may have
+implicit \fBIUSE\fR for ebuilds using \fBEAPI 5\fR or later.
+.TP
+\fBUSE_EXPAND_UNPREFIXED\fR = \fI[space delimited list of variable names]\fR
+Any variable listed here will be used to augment USE by
+inserting a new flag for every value in that variable, so
+USE_EXPAND_UNPREFIXED="FOO" and FOO="bar bla" results in
+USE="bar bla".
+.TP
+\fBUSE_EXPAND_VALUES_ARCH\fR = \fI[space delimited list of ARCH values]\fR
+Defines ARCH values used to generate implicit
+\fBIUSE\fR for ebuilds using \fBEAPI 5\fR or later.
+.TP
+\fBUSE_EXPAND_VALUES_ELIBC\fR = \fI[space delimited list of ELIBC values]\fR
+Defines ELIBC values used to generate implicit
+\fBIUSE\fR for ebuilds using \fBEAPI 5\fR or later.
+.TP
+\fBUSE_EXPAND_VALUES_KERNEL\fR = \fI[space delimited list of KERNEL values]\fR
+Defines KERNEL values used to generate implicit
+\fBIUSE\fR for ebuilds using \fBEAPI 5\fR or later.
+.TP
+\fBUSE_EXPAND_VALUES_USERLAND\fR = \fI[space delimited list of USERLAND \
+values]\fR
+Defines USERLAND values used to generate implicit
+\fBIUSE\fR for ebuilds using \fBEAPI 5\fR or later.
+.TP
.B ELIBC = \fI"glibc"\fR
Support uClibc/BSD libc/etc...
.TP
.B PROFILE_ONLY_VARIABLES = \fI"ARCH"\fR
-Prevent critical variables from being changed by the user in make.conf
+Prevent critical variables from being changed by the user in make.conf
or the env.
.TP
.BR PROFILE_ARCH
-Distinguish machines classes that have the same \fBARCH\fR. All sparc
+Distinguish machines classes that have the same \fBARCH\fR. All sparc
machines have ARCH=sparc but set this to either 'sparc32' or 'sparc64'.
.TP
-.BR STAGE1_USE
+.BR BOOTSTRAP_USE
Special USE flags which may be needed when bootstrapping from stage1 to stage2.
.RE
.PD 1
@@ -289,8 +339,8 @@ Provides the list of packages that compose the special \fIsystem\fR set.
\- atoms without * only appear for legacy reasons
.fi
.I Note:
-In a cascading profile setup, you can remove packages in children
-profiles which were added by parent profiles by prefixing the atom with
+In a cascading profile setup, you can remove packages in children
+profiles which were added by parent profiles by prefixing the atom with
a '\-'.
.I Example:
@@ -305,16 +355,16 @@ a '\-'.
.fi
.TP
.BR packages.build
-A list of packages (one per line) that make up a stage1 tarball. Really only
+A list of packages (one per line) that make up a stage1 tarball. Really only
useful for stage builders.
.TP
.BR package.provided
-A list of packages (one per line) that portage should assume have been
+A list of packages (one per line) that portage should assume have been
provided. Useful for porting to non-Linux systems. Basically, it's a
list that replaces the \fBemerge \-\-inject\fR syntax.
-For example, if you manage your own copy of a 2.6 kernel, then you can
-tell portage that 'sys-kernel/development-sources-2.6.7' is already taken
+For example, if you manage your own copy of a 2.6 kernel, then you can
+tell portage that 'sys-kernel/development-sources-2.6.7' is already taken
care of and it should get off your back about it.
Portage will not attempt to update a package that is listed here unless
@@ -324,7 +374,9 @@ entries may cause installed packages satisfying equivalent dependencies
to be removed by \fBemerge\fR(1) \fB\-\-depclean\fR actions (see the
\fBACTIONS\fR section of the \fBemerge\fR(1) man page for more information).
-Virtual packages (virtual/*) should not be specified in package.provided.
+Virtual packages (virtual/*) should not be specified in package.provided,
+since virtual packages themselves do not provide any files, and
+package.provided is intended to represent packages that do provide files.
Depending on the type of virtual, it may be necessary to add an entry to the
virtuals file and/or add a package that satisfies a virtual to
package.provided.
@@ -349,12 +401,12 @@ x11-libs/qt-3.3.0
x11-base/xorg-x11-6.8
.fi
.TP
-.BR package.use.force
+\fBpackage.use.force\fR and \fBpackage.use.stable.force\fR
Per\-package USE flag forcing.
.I Note:
-In a cascading profile setup, you can remove USE flags in children
-profiles which were added by parent profiles by prefixing the flag with
+In a cascading profile setup, you can remove USE flags in children
+profiles which were added by parent profiles by prefixing the flag with
a '\-'.
.I Format:
@@ -371,12 +423,12 @@ a '\-'.
x11\-libs/qt \-mysql
.fi
.TP
-.BR package.use.mask
+\fBpackage.use.mask\fR and \fBpackage.use.stable.mask\fR
Per\-package USE flag masks.
.I Note:
-In a cascading profile setup, you can remove USE flags in children
-profiles which were added by parent profiles by prefixing the flag with
+In a cascading profile setup, you can remove USE flags in children
+profiles which were added by parent profiles by prefixing the flag with
a '\-'.
.I Format:
@@ -394,23 +446,31 @@ x11\-libs/qt \-mysql
.fi
.TP
.BR parent
-This contains a path to the parent profile. It may be either relative or
-absolute. The paths will be relative to the location of the profile. Most
-commonly this file contains '..' to indicate the directory above. Utilized
-only in cascading profiles.
+This contains paths to the parent profiles (one per line). They may be either
+relative (to the location of the profile) or absolute. Most commonly this file
+contains '..' to indicate the directory above. Utilized only in cascading
+profiles.
+
+When multiple parent profiles are specified, they are inherited in order from
+the first line to the last.
+
+If \fBlayout.conf\fR is new enough, you can also use the <repo>:<path>
+syntax. The <repo> is the same string as is stored in the \fBrepo_name\fR
+file (or omitted to refer to the current repo), and <path> is a subdir starting
+at profiles/.
.TP
.BR profile.bashrc
If needed, this file can be used to set up a special environment for ebuilds,
different from the standard root environment. The syntax is the same as for
any other bash script.
.TP
-.BR use.force
+\fBuse.force\fR and \fBuse.stable.force\fR
Some USE flags don't make sense to disable under certain conditions. Here we
list forced flags.
.I Note:
-In a cascading profile setup, you can remove USE flags in children
-profiles which were added by parent profiles by prefixing the flag with
+In a cascading profile setup, you can remove USE flags in children
+profiles which were added by parent profiles by prefixing the flag with
a '\-'.
.I Format:
@@ -419,14 +479,14 @@ a '\-'.
\- one USE flag per line
.fi
.TP
-.BR use.mask
-Some USE flags don't make sense on some archs (for example altivec on
-non\-ppc or mmx on non\-x86), or haven't yet been tested. Here we list
+\fBuse.mask\fR and \fBuse.stable.mask\fR
+Some USE flags don't make sense on some archs (for example altivec on
+non\-ppc or mmx on non\-x86), or haven't yet been tested. Here we list
the masked ones.
.I Note:
-In a cascading profile setup, you can remove USE flags in children
-profiles which were added by parent profiles by prefixing the flag with
+In a cascading profile setup, you can remove USE flags in children
+profiles which were added by parent profiles by prefixing the flag with
a '\-'.
.I Format:
@@ -444,12 +504,13 @@ doc
.fi
.TP
.BR virtuals
-This controls what packages will provide a virtual by default. For example,
-if a package needs to send e\-mail, it will need virtual/mta. In the absence
-of a package that provides virtual/mta (like qmail, sendmail, postfix, etc...),
-portage will look here to see what package to use. In this case, Gentoo uses
-net\-mail/ssmtp as the default (as defined in the virtuals file) because it's
-the package that does the very bare minimum to send e\-mail.
+The virtuals file controls default preferences for virtuals that
+are defined via the \fBPROVIDE\fR ebuild variable (see
+\fBebuild\fR(5)). Since Gentoo now uses \fBGLEP 37\fR virtuals
+instead of \fBPROVIDE\fR virtuals, the virtuals file is
+irrelevant for all Gentoo ebuilds. However, it is still possible
+for third\-parties to distribute ebuilds that make use of
+\fBPROVIDE\fR.
.I Format:
.nf
@@ -467,10 +528,10 @@ virtual/aspell\-dict app\-dicts/aspell\-en
.RE
.TP
.BR /etc/portage/
-Any file in this directory that begins with "package." can be more than just a
-flat file. If it is a directory, then all the files in that directory will be
-sorted in ascending alphabetical order by file name and summed together as if
-it were a single file.
+Any file in this directory that begins with "package." or is repos.conf can be
+more than just a flat file. If it is a directory, then all the files in that
+directory will be sorted in ascending alphabetical order by file name and summed
+together as if it were a single file.
.I Example:
.nf
@@ -488,9 +549,8 @@ any other bash script.
Additional package-specific bashrc files can be created in /etc/portage/env.
.TP
.BR categories
-A simple list of valid categories that may be used in /usr/portage,
-PORTDIR_OVERLAY, and PKGDIR (see \fBmake.conf\fR(5)). This allows for custom
-categories to be created.
+A simple list of valid categories that may be used in repositories and PKGDIR
+(see \fBmake.conf\fR(5)). This allows for custom categories to be created.
.I Format:
.nf
@@ -508,14 +568,13 @@ Contains variables customizing colors. See \fBcolor.map\fR(5).
.TP
.BR make.conf
The global custom settings for Portage. See \fBmake.conf\fR(5).
-If present, this file will override settings from /etc/make.conf.
.TP
.BR mirrors
-Whenever portage encounters a mirror:// style URI it will look up the actual
-hosts here. If the mirror set is not found here, it will check the global
-mirrors file at /usr/portage/profiles/thirdpartymirrors. You may also set a
-special mirror type called "local". This list of mirrors will be checked
-before GENTOO_MIRRORS and will be used even if the package has
+Whenever portage encounters a mirror:// style URI it will look up the actual
+hosts here. If the mirror set is not found here, it will check the global
+mirrors file at /usr/portage/profiles/thirdpartymirrors. You may also set a
+special mirror type called "local". This list of mirrors will be checked
+before GENTOO_MIRRORS and will be used even if the package has
RESTRICT="mirror" or RESTRICT="fetch".
.I Format:
@@ -550,15 +609,15 @@ After changing the portdbapi.auxdbmodule setting, it may be necessary to
transfer or regenerate metadata cache. Users of the rsync tree need to
run `emerge \-\-metadata` if they have enabled FEATURES="metadata-transfer"
in \fBmake.conf\fR(5). In order to regenerate metadata for repositories
-listed in \fBPORTDIR_OVERLAY\fR or a cvs tree, run `emerge \-\-regen`
+not distributing pregenerated metadata cache, run `emerge \-\-regen`
(see \fBemerge\fR(1)). If you use something like the sqlite module and want
to keep all metadata in that format alone (useful for querying), enable
FEATURES="metadata-transfer" in \fBmake.conf\fR(5).
.TP
\fBpackage.accept_keywords\fR and \fBpackage.keywords\fR
-Per\-package ACCEPT_KEYWORDS. Useful for mixing unstable packages in with a normally
-stable system or vice versa. This will allow ACCEPT_KEYWORDS to be augmented
-for a single package. If both \fBpackage.accept_keywords\fR and
+Per\-package ACCEPT_KEYWORDS. Useful for mixing unstable packages in with a
+normally stable system or vice versa. This will allow ACCEPT_KEYWORDS to be
+augmented for a single package. If both \fBpackage.accept_keywords\fR and
\fBpackage.keywords\fR are present, both of them will be used, and values
from \fBpackage.accept_keywords\fR will override values from
\fBpackage.keywords\fR. The \fBpackage.accept_keywords\fR file is
@@ -593,22 +652,37 @@ three special tokens:
.fi
.I Additional Note:
-If you encounter the \fB-*\fR KEYWORD, this indicates that the package is known
-to be broken on all systems which are not otherwise listed in KEYWORDS. For
+If you encounter the \fB-*\fR KEYWORD, this indicates that the package is known
+to be broken on all systems which are not otherwise listed in KEYWORDS. For
example, a binary only package which is built for x86 will look like:
games-fps/quake3-demo-1.11.ebuild:KEYWORDS="-* x86"
-If you wish to accept this package anyways, then use one of the other keywords in your
-package.accept_keywords like this:
+If you wish to accept this package anyways, then use one of the other keywords
+in your package.accept_keywords like this:
games-fps/quake3-demo x86
.TP
+.BR package.accept_restrict
+This will allow ACCEPT_RESTRICT (see \fBmake.conf\fR(5)) to be augmented for a
+single package.
+
+.I Format:
+.nf
+\- comment lines begin with # (no inline comments)
+\- one DEPEND atom per line followed by additional RESTRICT tokens
+.fi
+.TP
.BR package.env
Per\-package environment variable settings. Entries refer to
environment files that are placed in the \fB/etc/portage/env/\fR
-directory and have the same format as \fBmake.conf\fR(5).
+directory and have the same format as \fBmake.conf\fR(5). Note that these
+files are interpreted much earlier than the package\-specific \fIbashrc\fR
+files which are described in a later section about \fB/etc/portage/env/\fR.
+Beginners should be careful to recognize the difference between these two types
+of files. When environment variable settings are all that's needed,
+\fBpackage.env\fR is the recommended approach to use.
.I Format:
.nf
@@ -618,13 +692,15 @@ directory and have the same format as \fBmake.conf\fR(5).
.I Example:
.nf
-# use environment variables from /etc/portage/env/glibc.conf for the glibc package
+# use environment variables from /etc/portage/env/glibc.conf for the glibc \
+package
sys\-libs/glibc glibc.conf
.fi
.TP
.BR package.license
-This will allow ACCEPT_LICENSE to be augmented for a single package.
+This will allow ACCEPT_LICENSE (see \fBmake.conf\fR(5)) to be augmented for a
+single package.
.I Format:
.nf
@@ -652,7 +728,8 @@ versions earlier than 1.0.4496. No problem!
.fi
.TP
.BR package.properties
-This will allow ACCEPT_PROPERTIES to be augmented for a single package.
+This will allow ACCEPT_PROPERTIES (see \fBmake.conf\fR(5)) to be augmented for
+a single package.
.I Format:
.nf
@@ -661,15 +738,15 @@ This will allow ACCEPT_PROPERTIES to be augmented for a single package.
.fi
.TP
.BR package.unmask
-Just like package.mask above, except here you list packages you want to
-unmask. Useful for overriding the global package.mask file (see
-above). Note that this does not override packages that are masked via
+Just like package.mask above, except here you list packages you want to
+unmask. Useful for overriding the global package.mask file (see
+above). Note that this does not override packages that are masked via
KEYWORDS.
.TP
.BR package.use
-Per\-package USE flags. Useful for tracking local USE flags or for
-enabling USE flags for certain packages only. Perhaps you develop GTK
-and thus you want documentation for it, but you don't want
+Per\-package USE flags. Useful for tracking local USE flags or for
+enabling USE flags for certain packages only. Perhaps you develop GTK
+and thus you want documentation for it, but you don't want
documentation for QT. Easy as pie my friend!
.I Format:
@@ -687,30 +764,153 @@ x11\-libs/qt \-mysql
.fi
.TP
.BR repos.conf
-Specifies \fIsite\-specific\fR repository configuration information. Note that
-configuration settings which are specified here do not apply to tools
-such as \fBrepoman\fR(1) and \fBegencache\fR(1), since operations
-performed by these tools
-are inherently \fBnot\fR \fIsite\-specific\fR. \fBWARNING:\fR Use of
-\fBrepos.conf\fR is generally not recommended since resulting changes in
-eclass inheritance (especially due to \fBeclass\-overrides\fR) may trigger
-performance issues under some circumstances (see \fBbug #124041\fR). When
-using \fBeclass\-overrides\fR, due to bug #276264, you must ensure that
-your portage tree does not contain a metadata/cache/ directory. If that
-directory exists then you should remove it entirely, and set
-PORTAGE_RSYNC_EXTRA_OPTS="\-\-exclude=/metadata/cache" in
-make.conf in order to exclude the metadata/cache/ directory during
-\fBemerge\fR(1) \-\-sync operations.
+Specifies \fIsite\-specific\fR repository configuration information.
+.br
+Configuration specified in \fBrepos.conf\fR can be overriden by \fBPORTAGE_REPOSITORIES\fR
+environmental variable, which has the same format as \fBrepos.conf\fR.
+
+.I Format:
+.nf
+\- comments begin with # (no inline comments)
+\- configuration of each repository is specified in a section starting with \
+"[${repository_name}]"
+\- attributes are specified in "${attribute} = ${value}" format
+.fi
+
+.I Attributes supported in DEFAULT section:
+.RS
+.RS
+.TP
+.B main\-repo
+Specifies main repository.
+.TP
+.B eclass\-overrides
+Makes all repositories inherit eclasses from specified repositories.
+.br
+Setting this attribute is generally not recommended since resulting changes
+in eclass inheritance may trigger performance issues due to invalidation
+of metadata cache.
+.br
+When 'force = eclass\-overrides' attribute is not set, \fBegencache\fR(1),
+\fBemirrordist\fR(1) and \fBrepoman\fR(1) ignore this attribute,
+since operations performed by these tools are inherently
+\fBnot\fR \fIsite\-specific\fR.
+.TP
+.B force
+Specifies names of attributes, which should be forcefully respected by
+\fBegencache\fR(1), \fBemirrordist\fR(1) and \fBrepoman\fR(1).
+.br
+Valid values: aliases, eclass\-overrides, masters
+.RE
+
+.I Attributes supported in sections of repositories:
+.RS
+.TP
+.B aliases
+Specifies aliases of given repository.
+.br
+Setting this attribute is generally not recommended since resulting changes
+in eclass inheritance may trigger performance issues due to invalidation
+of metadata cache.
+.br
+When 'force = aliases' attribute is not set, \fBegencache\fR(1),
+\fBemirrordist\fR(1) and \fBrepoman\fR(1) ignore this attribute,
+since operations performed by these tools are inherently
+\fBnot\fR \fIsite\-specific\fR.
+.TP
+.B eclass\-overrides
+Makes given repository inherit eclasses from specified repositories.
+.br
+Setting this attribute is generally not recommended since resulting changes
+in eclass inheritance may trigger performance issues due to invalidation
+of metadata cache.
+.br
+When 'force = eclass\-overrides' attribute is not set, \fBegencache\fR(1),
+\fBemirrordist\fR(1) and \fBrepoman\fR(1) ignore this attribute,
+since operations performed by these tools are inherently
+\fBnot\fR \fIsite\-specific\fR.
+.TP
+.B force
+Specifies names of attributes, which should be forcefully respected by
+\fBegencache\fR(1), \fBemirrordist\fR(1) and \fBrepoman\fR(1).
+.br
+Valid values: aliases, eclass\-overrides, masters
+.TP
+.B location
+Specifies location of given repository.
+.TP
+.B masters
+Specifies master repositories of given repository.
+.br
+Setting this attribute is generally not recommended since resulting changes
+in eclass inheritance may trigger performance issues due to invalidation
+of metadata cache.
+.br
+When 'force = masters' attribute is not set, \fBegencache\fR(1),
+\fBemirrordist\fR(1) and \fBrepoman\fR(1) ignore this attribute,
+since operations performed by these tools are inherently
+\fBnot\fR \fIsite\-specific\fR.
+.TP
+.B priority
+Specifies priority of given repository.
+.TP
+.B sync\-cvs\-repo
+Specifies CVS repository.
+.TP
+.B sync\-type
+Specifies type of synchronization performed by `emerge \-\-sync`.
+.br
+Valid non\-empty values: cvs, git, rsync
+.br
+This attribute can be set to empty value to disable synchronization of given
+repository. Empty value is default.
+.TP
+.B sync\-uri
+Specifies URI of repository used for synchronization performed by `emerge
+\-\-sync`.
+.br
+This attribute can be set to empty value to disable synchronization of given
+repository. Empty value is default.
+.RS
+.TP
+Syntax:
+cvs: [cvs://]:access_method:[username@]hostname[:port]:/path
+.br
+git: (git|git+ssh|http|https)://[username@]hostname[:port]/path
+.br
+rsync: (rsync|ssh)://[username@]hostname[:port]/(module|path)
+.TP
+Examples:
+.RS
+rsync://private\-mirror.com/portage\-module
+.br
+rsync://rsync\-user@private\-mirror.com:873/gentoo\-portage
+.br
+ssh://ssh\-user@192.168.0.1:22/usr/portage
+.br
+ssh://ssh\-user@192.168.0.1:22/\\${HOME}/portage\-storage
+.RE
+.TP
+Note: For the ssh:// scheme, key\-based authentication might be of interest.
+.RE
+.RE
.I Example:
.nf
[DEFAULT]
+# make gentoo the main repository, which makes it the default master
+# repository for repositories that do not specify masters
+main\-repo = gentoo
# make all repositories inherit eclasses from the java\-overlay and
# java\-experimental repositories, with eclasses from java\-experimental
# taking precedence over those from java\-overlay
eclass\-overrides = java\-overlay java\-experimental
[gentoo]
+# repos with higher priorities are preferred when ebuilds with equal versions
+# are found in multiple repos (see the `emerge \-\-info \-\-verbose` repo
+# display for a listing of repos and their corresponding priorities).
+priority = 9999
# disable all eclass overrides for ebuilds from the gentoo repository
eclass\-overrides =
# when processing metadata/layout.conf from other repositories, substitute
@@ -728,8 +928,16 @@ masters = gentoo kde
# those master repos won't be required as dependencies (the user must
# ensure that any required dependencies such as eclasses are satisfied)
masters =
+
+# Repository 'gentoo' synchronized using CVS
+[gentoo]
+location = /usr/portage
+sync\-type = cvs
+sync\-uri = :pserver:anonymous@anoncvs.gentoo.org:/var/cvsroot
+sync\-cvs\-repo = gentoo\-x86
.fi
.RE
+.RE
.TP
.BR /etc/portage/env/
.RS
@@ -739,20 +947,9 @@ needed, then \fB/etc/portage/package.env\fR should be used instead of the
bashrc approach that is described here. Also note that special variables
such as \fBFEATURES\fR and \fBINSTALL_MASK\fR will not produce the intended
results if they are set in bashrc, and therefore
-\fB/etc/portage/package.env\fR should be used instead.
-
-set_unless_changed and unset_unless_changed functions can be used to set or
-unset given variables only if these variable have not been set to values
-different than values set in make.conf. This functionality can be useful for
-temporary overriding of these variables during emerge invocation. Variables
-set without using set_unless_changed will unconditionally override variables
-set during emerge invocation.
-
-.I Syntax:
-.nf
-set_unless_changed VARIABLE=VALUE
-unset_unless_changed VALUE
-.fi
+\fB/etc/portage/package.env\fR should be used instead. Lastly, note that these
+files are interpreted much later than the portage environment file
+\fBpackage.env\fR.
Portage will source all of these bashrc files after \fB/etc/portage/bashrc\fR
in the following order:
@@ -771,8 +968,10 @@ in the following order:
.RS
For each file in this directory, a package set is created with its name
corresponding to the name of the file. Each file should contain a list
-of package atoms, one per line. When referencing package sets in
-\fBemerge\fR(1) arguments, the set name is prefixed with \fB@\fR.
+of package atoms and nested package sets, one per line. When a package
+set is referenced as an \fBemerge\fR(1) argument or when it is
+referenced as a nested package set (inside of another package set), the
+set name is prefixed with \fB@\fR.
Also see \fB/var/lib/portage/world_sets\fR and the \fBemerge\fR(1)
\fB\-\-list\-sets\fR option.
@@ -782,49 +981,127 @@ Also see \fB/var/lib/portage/world_sets\fR and the \fBemerge\fR(1)
.RS
.TP
.BR layout.conf
-Specifies information about the repository layout. A
-"masters" attribute is supported, which is used to specify names of
-repositories which satisfy dependencies on eclasses and/or ebuilds. Each
-repository name should correspond the value of a \fBrepo_name\fR entry
-from one of the repositories that is configured via the \fBPORTDIR\fR or
-\fBPORTDIR_OVERLAY\fR variables (see \fBmake.conf\fR(5)). Repositories listed
-toward the right of the \fBmasters\fR list take precedence over those listed
-toward the left of the list. An "aliases" attribute is also supported, which
-behaves like an "aliases" attribute in \fBrepos.conf\fR. \fISite-specific\fR
-overrides to \fBlayout.conf\fR settings may be specified in
-\fB/etc/portage/repos.conf\fR. Settings in \fBrepos.conf\fR take
-precedence over settings in \fBlayout.conf\fR, except tools such as
-\fBrepoman\fR(1) and \fBegencache\fR(1) will entirely ignore
+Specifies information about the repository layout.
+\fISite-specific\fR overrides to \fBlayout.conf\fR settings may be specified in
+\fB/etc/portage/repos.conf\fR.
+Settings in \fBrepos.conf\fR take precedence over settings in
+\fBlayout.conf\fR, except tools such as \fBrepoman\fR(1) and \fBegencache\fR(1)
+ignore "aliases", "eclass-overrides" and "masters" attributes set in
\fBrepos.conf\fR since their operations are inherently \fBnot\fR
\fIsite\-specific\fR.
+.I Format:
+.nf
+\- comments begin with # (no inline comments)
+\- attributes are specified in "${attribute} = ${value}" format
+.fi
+
+.I Supported attributes.
+.RS
+.RS
+.TP
+.BR aliases
+Behaves like an "aliases" attribute in \fBrepos.conf\fR.
+.TP
+.BR eapis\-banned
+List of EAPIs which are not allowed in this repo.
+.TP
+.BR eapis\-deprecated
+List of EAPIs which are allowed but generate warnings when used.
+.TP
+.BR masters
+Names of repositories which satisfy dependencies on eclasses and from which
+settings specified in various repository\-level files (\fBpackage.mask\fR,
+\fBpackage.use.mask\fR, \fBuse.mask\fR etc.) are inherited. Each repository
+name should correspond to the value of a \fBrepo_name\fR entry from one of
+the repositories that is configured in \fBrepos.conf\fR file. Repositories
+listed toward the right of the \fBmasters\fR list take precedence over those
+listed toward the left of the list.
+.TP
+.BR repo\-name " = <value of profiles/repo_name>"
+The name of this repository (overrides profiles/repo_name if it exists).
+.TP
+.BR sign\-commits " = [true|" false "]"
+Boolean value whether we should sign commits in this repo.
+.TP
+.BR sign\-manifests " = [" true "|false]"
+Boolean value whether we should sign Manifest files in this repo.
+.TP
+.BR thin\-manifests " = [true|" false "]"
+Boolean value whether Manifest files contain only DIST entries.
+.TP
+.BR use\-manifests " = [" strict "|true|false]"
+How Manifest files get used. Possible values are "strict" (require an entry
+for every file), "true" (if an entry exists for a file, enforce it), or "false"
+(don't check Manifest files at all).
+.TP
+.BR manifest\-hashes
+List of hashes to generate/check in Manifest files. Valid hashes depend on the
+current version of portage; see the portage.const.MANIFEST2_HASH_FUNCTIONS
+constant for the current list.
+.TP
+.BR update\-changelog " = [true|" false "]"
+The default setting for repoman's --echangelog option.
+.TP
+.BR cache\-formats " = [pms] [md5-dict]"
+The cache formats supported in the metadata tree. There is the old "pms" format
+and the newer/faster "md5-dict" format. Default is to detect dirs.
+.TP
+.BR profile\-formats " = [pms|portage-1|portage-2]"
+Control functionality available to profiles in this repo such as which files
+may be dirs, or the syntax available in parent files. Use "portage-2" if you're
+unsure. The default is "portage-1-compat" mode which is meant to be compatible
+with old profiles, but is not allowed to be opted into directly.
+.RE
+.RE
+
+.RS
.I Example:
.nf
+# Specify the repository name (overriding profils/repo_name).
+repo\-name = foo-overlay
+
# eclasses provided by java-overlay take precedence over identically named
# eclasses that are provided by gentoo
masters = gentoo java-overlay
+
# indicate that this repo can be used as a substitute for foo-overlay
aliases = foo-overlay
+
+# indicate that ebuilds with the specified EAPIs are banned
+eapis\-banned = 0 1
+
+# indicate that ebuilds with the specified EAPIs are deprecated
+eapis\-deprecated = 2 3
+
# sign commits in this repo, which requires Git >=1.7.9, and
# key configured by `git config user.signingkey key_id`
sign\-commits = true
-# do not sign manifests in this repo
+
+# do not sign Manifest files in this repo
sign\-manifests = false
-# thin\-manifests only contain DIST entries
+
+# Manifest files only contain DIST entries
thin\-manifests = true
+
# indicate that this repo requires manifests for each package, and is
# considered a failure if a manifest file is missing/incorrect
use\-manifests = strict
+
# customize the set of hashes generated for Manifest entries
manifest\-hashes = SHA256 SHA512 WHIRLPOOL
+
# indicate that this repo enables repoman's --echangelog=y option automatically
update\-changelog = true
+
# indicate that this repo contains both md5-dict and pms cache formats,
# which may be generated by egencache(1)
cache\-formats = md5-dict pms
+
# indicate that this repo contains profiles that may use directories for
-# package.mask, package.provided, package.use, package.use.mask,
-# package.use.force, use.mask and use.force.
+# package.mask, package.provided, package.use, package.use.force,
+# package.use.mask, package.use.stable.force, package.use.stable.mask,
+# use.force, use.mask, use.stable.force, and use.stable.mask.
# profile\-formats = portage-1
# indicate that paths such as 'gentoo:targets/desktop' or ':targets/desktop' in
# profile parent files can be used to express paths relative to the root
@@ -833,9 +1110,10 @@ cache\-formats = md5-dict pms
profile\-formats = portage-2
.fi
.RE
+.RE
.TP
.BR /usr/portage/profiles/
-Global Gentoo settings that are controlled by the developers. To override
+Global Gentoo settings that are controlled by the developers. To override
these settings, you can use the files in \fB/etc/portage/\fR.
.RS
.TP
@@ -855,8 +1133,8 @@ sparc
.fi
.TP
.BR categories
-A simple list of valid categories that may be used in /usr/portage,
-PORTDIR_OVERLAY, and PKGDIR (see \fBmake.conf\fR(5)).
+A simple list of valid categories that may be used in repositories and PKGDIR
+(see \fBmake.conf\fR(5)).
.I Format:
.nf
@@ -925,10 +1203,10 @@ net-analyzer/netcat -*
.fi
.TP
.BR package.mask
-This contains a list of DEPEND atoms for packages that should not be installed
-in any profile. Useful for adding the latest KDE betas and making sure no
-one accidentally upgrades to them. Also useful for quickly masking specific
-versions due to security issues. ALWAYS include a comment explaining WHY the
+This contains a list of DEPEND atoms for packages that should not be installed
+in any profile. Useful for adding the latest KDE betas and making sure no
+one accidentally upgrades to them. Also useful for quickly masking specific
+versions due to security issues. ALWAYS include a comment explaining WHY the
package has been masked and WHO is doing the masking.
.I Format:
@@ -948,7 +1226,7 @@ package has been masked and WHO is doing the masking.
.fi
.TP
.BR profiles.desc
-List all the current stable and development profiles. If a profile is listed
+List all the current stable and development profiles. If a profile is listed
here, then it will be checked by repoman.
.I Format:
.nf
@@ -970,10 +1248,11 @@ x86-linux prefix/linux/x86 exp
.BR repo_name
The first line of the file should define a unique repository name. The name
may contain any of the characters [A\-Za\-z0\-9_\-]. It must not begin with a
-hyphen.
+hyphen. If the repo\-name attribute is specified in layout.conf, then that
+setting will take precedence.
.TP
.BR thirdpartymirrors
-Controls the mapping of mirror:// style URIs to actual lists of
+Controls the mapping of mirror:// style URIs to actual lists of
mirrors. Keeps us from overloading a single server.
.I Format:
@@ -984,15 +1263,17 @@ mirrors. Keeps us from overloading a single server.
.I Example:
.nf
-sourceforge http://aleron.dl.sourceforge.net/sourceforge http://unc.dl.sourceforge.net/sourceforge
+sourceforge http://aleron.dl.sourceforge.net/sourceforge \
+http://unc.dl.sourceforge.net/sourceforge
-gentoo http://distro.ibiblio.org/pub/linux/distributions/gentoo/distfiles/ ftp://ftp.gtlib.cc.gatech.edu/pub/gentoo/distfiles
+gentoo http://distro.ibiblio.org/pub/linux/distributions/gentoo/distfiles/ \
+ftp://ftp.gtlib.cc.gatech.edu/pub/gentoo/distfiles
kernel http://www.kernel.org/pub http://www.us.kernel.org/pub
.fi
.TP
.BR use.desc
-All global USE flags must be listed here with a description of what they do.
+All global USE flags must be listed here with a description of what they do.
.I Format:
.nf
@@ -1008,7 +1289,7 @@ doc \- Adds extra documentation
.fi
.TP
.BR use.local.desc
-All local USE flags are listed here along with the package and a
+All local USE flags are listed here along with the package and a
description. This file is automatically generated from the
metadata.xml files that are included with each individual package.
Refer to GLEP 56 for further information:
@@ -1030,10 +1311,15 @@ games\-emulation/xmess:net \- Adds network support
.RS
.TP
.BR make.globals
-The global default settings for Portage. This comes from the portage package
-itself. Settings in \fBmake.conf\fR or \fBpackage.env\fR
-override values here. The format
-is described extensivly in \fBmake.conf\fR(5).
+The global default settings for Portage. This comes from the portage package
+itself. Settings in \fBmake.conf\fR or \fBpackage.env\fR override values set
+here. The format is described extensively in \fBmake.conf\fR(5).
+.TP
+.BR repos.conf
+The default configuration of repositories for Portage. This comes from
+the portage package itself. Settings in \fB/etc/portage/repos.conf\fR
+override values set here. The format is described extensively in section
+for \fB/etc/portage/repos.conf\fR.
.RE
.TP
.BR /var/cache/edb/
@@ -1077,12 +1363,12 @@ directories have been modified since being installed. Files which have not
been modified will automatically be unmerged.
.TP
.BR world
-Every time you emerge a package, the package that you requested is
-recorded here. Then when you run `emerge world \-up`, the list of
-packages is read from this file. Note that this does not mean that the
-packages that were installed as dependencies are listed here. For
-example, if you run `emerge mod_wsgi` and you do not have apache
-already, then "www\-apache/mod_wsgi" is recorded in the world file but
+Every time you emerge a package, the package that you requested is
+recorded here. Then when you run `emerge world \-up`, the list of
+packages is read from this file. Note that this does not mean that the
+packages that were installed as dependencies are listed here. For
+example, if you run `emerge mod_wsgi` and you do not have apache
+already, then "www\-apache/mod_wsgi" is recorded in the world file but
"www\-servers/apache" is not. For more information, review \fBemerge\fR(1).
.I Format:
@@ -1114,7 +1400,7 @@ Please report bugs via http://bugs.gentoo.org/
Marius Mauch <genone@gentoo.org>
Mike Frysinger <vapier@gentoo.org>
Drake Wyrm <wyrm@haell.com>
-Arfrever Frehtes Taifersar Arahesis <arfrever@gentoo.org>
+Arfrever Frehtes Taifersar Arahesis <arfrever@apache.org>
.fi
.SH "SEE ALSO"
.BR emerge (1),
diff --git a/man/quickpkg.1 b/man/quickpkg.1
index 738940063..a3f6165e4 100644
--- a/man/quickpkg.1
+++ b/man/quickpkg.1
@@ -1,22 +1,22 @@
-.TH "QUICKPKG" "1" "Mar 2010" "Portage VERSION" "Portage"
+.TH "QUICKPKG" "1" "Dec 2012" "Portage VERSION" "Portage"
.SH NAME
quickpkg \- creates portage packages
.SH SYNOPSIS
-.B quickpkg <list of packages or package\-sets>
+.B quickpkg [options] <list of packages or package\-sets>
.SH DESCRIPTION
.I quickpkg
can be utilized to quickly create a package for portage by
utilizing the files already on your filesystem. This package
-then can be emerged on any system. To review syntax for
+then can be emerged on any system. To review syntax for
emerging binary packages, review \fBemerge\fR(1). The upside
-of this process is that you don't have to wait for the package
-to unpack, configure, compile, and install before you can have
-the package ready to go. The downside is that the package will
-contain the files that exist on your filesystem even if they have
+of this process is that you don't have to wait for the package
+to unpack, configure, compile, and install before you can have
+the package ready to go. The downside is that the package will
+contain the files that exist on your filesystem even if they have
modified since they were first installed.
.br
-The packages, after being created, will be placed in \fBPKGDIR\fR.
-This variable is defined in \fBmake.conf\fR(5) and defaults to
+The packages, after being created, will be placed in \fBPKGDIR\fR.
+This variable is defined in \fBmake.conf\fR(5) and defaults to
/usr/portage/packages.
.SH OPTIONS
.TP
@@ -28,6 +28,20 @@ The second form is a portage depend atom or a portage package
set. The atom or set is of the same form that you would give
\fBemerge\fR if you wanted to emerge something.
See \fBebuild\fR(5) for full definition.
+.TP
+.BR "\-\-ignore\-default\-opts"
+Causes the \fIQUICKPKG_DEFAULT_OPTS\fR environment variable to be ignored.
+.TP
+.BR "\-\-include\-config < y | n >"
+Include all files protected by CONFIG_PROTECT (as a security precaution,
+default is 'n').
+.TP
+.BR "\-\-include\-unmodified\-config < y | n >"
+Include files protected by CONFIG_PROTECT that have not been modified
+since installation (as a security precaution, default is 'n').
+.TP
+.BR \-\-umask=UMASK
+The umask used during package creation (default is 0077).
.SH "EXAMPLES"
.B quickpkg
/var/db/pkg/dev-python/pyogg-1.1
@@ -52,7 +66,7 @@ Mike Frysinger <vapier@gentoo.org> (revamped version)
.fi
.SH "FILES"
.TP
-.B /etc/make.conf
+.B /etc/portage/make.conf
The \fBPKGDIR\fR variable is defined here.
.SH "SEE ALSO"
.BR ebuild (5),
diff --git a/man/repoman.1 b/man/repoman.1
index b8c0f48e3..a78f94e90 100644
--- a/man/repoman.1
+++ b/man/repoman.1
@@ -1,6 +1,7 @@
-.TH "REPOMAN" "1" "June 2012" "Portage VERSION" "Portage"
+.TH "REPOMAN" "1" "Aug 2013" "Portage VERSION" "Portage"
.SH NAME
-repoman \- Gentoo's program to enforce a minimal level of quality assurance in packages added to the portage tree
+repoman \- Gentoo's program to enforce a minimal level of quality assurance in
+packages added to the portage tree
.SH SYNOPSIS
\fBrepoman\fR [\fIoption\fR] [\fImode\fR]
.SH DESCRIPTION
@@ -9,7 +10,8 @@ repoman \- Gentoo's program to enforce a minimal level of quality assurance in p
.BR repoman
checks the quality of ebuild repositories.
-Note: \fBrepoman commit\fR only works \fIinside local\fR cvs, git, or subversion repositories.
+Note: \fBrepoman commit\fR only works \fIinside local\fR cvs, git, or
+subversion repositories.
.SH OPTIONS
.TP
\fB-a\fR, \fB--ask\fR
@@ -63,6 +65,10 @@ can be enabled by default for a particular repository by setting
"update\-changelog = true" in metadata/layout.conf (see
\fBportage(5)\fR).
.TP
+\fB\-\-experimental\-inherit=<y|n>\fR
+Enable experimental inherit.missing checks which may misbehave when the
+internal eclass database becomes outdated.
+.TP
\fB\-\-if\-modified=<y|n>\fR
Only check packages that have uncommitted modifications
.TP
@@ -75,9 +81,16 @@ Do not use the \fIREPOMAN_DEFAULT_OPTS\fR environment variable.
\fB\-I\fR, \fB\-\-ignore\-masked\fR
Ignore masked packages (not allowed with commit mode)
.TP
+.BR "\-\-include\-arches " ARCHES
+A space separated list of arches used to filter the selection of
+profiles for dependency checks.
+.TP
\fB\-d\fR, \fB\-\-include\-dev\fR
Include dev profiles in dependency checks.
.TP
+\fB\-e <y|n>\fR, \fB\-\-include\-exp\-profiles=<y|n>\fR
+Include exp profiles in dependency checks.
+.TP
\fB\-\-unmatched\-removal\fR
Enable strict checking of package.mask and package.unmask files for
unmatched removal atoms.
@@ -124,24 +137,6 @@ Scan directory tree for QA issues; if OK, commit via cvs
.B CVS/Entries.IO_error
Attempting to commit, and an IO error was encountered access the Entries file
.TP
-.B DEPEND.bad
-User-visible ebuilds with bad DEPEND settings (matched against *visible* ebuilds)
-.TP
-.B DEPEND.badindev
-User-visible ebuilds with bad DEPEND settings (matched against *visible* ebuilds) in developing arch
-.TP
-.B DEPEND.badmasked
-Masked ebuilds with bad DEPEND settings (matched against *all* ebuilds)
-.TP
-.B DEPEND.badmaskedindev
-Masked ebuilds with bad DEPEND settings (matched against *all* ebuilds) in developing arch
-.TP
-.B DEPEND.badtilde
-DEPEND uses the ~ dep operator with a non-zero revision part, which is useless (the revision is ignored)
-.TP
-.B DEPEND.syntax
-Syntax error in DEPEND (usually an extra/missing space/parenthesis)
-.TP
.B DESCRIPTION.missing
Ebuilds that have a missing or empty DESCRIPTION variable
.TP
@@ -165,19 +160,19 @@ Ebuilds that have a missing or empty HOMEPAGE variable
Virtuals that have a non-empty HOMEPAGE variable
.TP
.B IUSE.invalid
-This ebuild has a variable in IUSE that is not in the use.desc or its metadata.xml file
+This ebuild has a variable in IUSE that is not in the use.desc or its
+metadata.xml file
.TP
.B IUSE.missing
-This ebuild has a USE conditional which references a flag that is not listed in IUSE
-.TP
-.B IUSE.undefined
-This ebuild does not define IUSE (style guideline says to define IUSE even when empty)
+This ebuild has a USE conditional which references a flag that is not listed in
+IUSE
.TP
.B KEYWORDS.dropped
Ebuilds that appear to have dropped KEYWORDS for some arch
.TP
.B KEYWORDS.invalid
-This ebuild contains KEYWORDS that are not listed in profiles/arch.list or for which no valid profile was found
+This ebuild contains KEYWORDS that are not listed in profiles/arch.list or for
+which no valid profile was found
.TP
.B KEYWORDS.missing
Ebuilds that have a missing or empty KEYWORDS variable
@@ -188,6 +183,9 @@ Ebuilds that have been added directly with stable KEYWORDS
.B KEYWORDS.stupid
Ebuilds that use KEYWORDS=-* instead of package.mask
.TP
+.B LICENSE.deprecated
+This ebuild is listing a deprecated license.
+.TP
.B LICENSE.invalid
This ebuild is listing a license that doesnt exist in portages license/ dir.
.TP
@@ -201,51 +199,19 @@ Syntax error in LICENSE (usually an extra/missing space/parenthesis)
Virtuals that have a non-empty LICENSE variable
.TP
.B LIVEVCS.stable
-Ebuild is a live ebuild (cvs, git, darcs, svn, etc) checkout with stable keywords.
+Ebuild is a live ebuild (cvs, git, darcs, svn, etc) checkout with stable
+keywords.
.TP
.B LIVEVCS.unmasked
Ebuild is a live ebuild (cvs, git, darcs, svn, etc) checkout but has keywords
and is not masked in the global package.mask.
.TP
-.B PDEPEND.bad
-User-visible ebuilds with bad PDEPEND settings (matched against *visible* ebuilds)
-.TP
-.B PDEPEND.badindev
-User-visible ebuilds with bad PDEPEND settings (matched against *visible* ebuilds) in developing arch
-.TP
-.B PDEPEND.badmasked
-Masked ebuilds with PDEPEND settings (matched against *all* ebuilds)
-.TP
-.B PDEPEND.badmaskedindev
-Masked ebuilds with PDEPEND settings (matched against *all* ebuilds) in developing arch
-.TP
-.B PDEPEND.badtilde
-PDEPEND uses the ~ dep operator with a non-zero revision part, which is useless (the revision is ignored)
-.TP
.B PDEPEND.suspect
PDEPEND contains a package that usually only belongs in DEPEND
.TP
-.B PDEPEND.syntax
-Syntax error in PDEPEND (usually an extra/missing space/parenthesis)
-.TP
.B PROVIDE.syntax
Syntax error in PROVIDE (usually an extra/missing space/parenthesis)
.TP
-.B RDEPEND.bad
-User-visible ebuilds with bad RDEPEND settings (matched against *visible* ebuilds)
-.TP
-.B RDEPEND.badindev
-User-visible ebuilds with bad RDEPEND settings (matched against *visible* ebuilds) in developing arch
-.TP
-.B RDEPEND.badmasked
-Masked ebuilds with RDEPEND settings (matched against *all* ebuilds)
-.TP
-.B RDEPEND.badmaskedindev
-Masked ebuilds with RDEPEND settings (matched against *all* ebuilds) in developing arch
-.TP
-.B RDEPEND.badtilde
-RDEPEND uses the ~ dep operator with a non-zero revision part, which is useless (the revision is ignored)
-.TP
.B RDEPEND.implicit
RDEPEND is unset in the ebuild which triggers implicit RDEPEND=$DEPEND
assignment (prior to EAPI 4)
@@ -253,9 +219,6 @@ assignment (prior to EAPI 4)
.B RDEPEND.suspect
RDEPEND contains a package that usually only belongs in DEPEND
.TP
-.B RDEPEND.syntax
-Syntax error in RDEPEND (usually an extra/missing space/parenthesis)
-.TP
.B PROPERTIES.syntax
Syntax error in PROPERTIES (usually an extra/missing space/parenthesis)
.TP
@@ -276,6 +239,28 @@ Missing ChangeLog files
.B changelog.notadded
ChangeLogs that exist but have not been added to cvs
.TP
+.B dependency.bad
+User-visible ebuilds with unsatisfied dependencies (matched against *visible*
+ebuilds)
+.TP
+.B dependency.badindev
+User-visible ebuilds with unsatisfied dependencies (matched against *visible*
+ebuilds) in developing arch
+.TP
+.B dependency.badmasked
+Masked ebuilds with unsatisfied dependencies (matched against *all* ebuilds)
+.TP
+.B dependency.badmaskedindev
+Masked ebuilds with unsatisfied dependencies (matched against *all* ebuilds) in
+developing arch
+.TP
+.B dependency.badtilde
+Uses the ~ dep operator with a non-zero revision part, which is useless (the
+revision is ignored)
+.TP
+.B dependency.syntax
+Syntax error in dependency string (usually an extra/missing space/parenthesis)
+.TP
.B dependency.unknown
Ebuild has a dependency that refers to an unknown package (which may be
valid if it is a blocker for a renamed/removed package, or is an
@@ -294,10 +279,12 @@ Some files listed in the Manifest aren't referenced in SRC_URI
This ebuild has a malformed header
.TP
.B ebuild.invalidname
-Ebuild files with a non-parseable or syntactically incorrect name (or using 2.1 versioning extensions)
+Ebuild files with a non-parseable or syntactically incorrect name (or using 2.1
+versioning extensions)
.TP
.B ebuild.majorsyn
-This ebuild has a major syntax error that may cause the ebuild to fail partially or fully
+This ebuild has a major syntax error that may cause the ebuild to fail
+partially or fully
.TP
.B ebuild.minorsyn
This ebuild has a minor syntax error that contravenes gentoo coding style
@@ -318,14 +305,15 @@ A simple sourcing of the ebuild produces output; this breaks ebuild policy.
PATCHES variable should be a bash array to ensure white space safety
.TP
.B ebuild.syntax
-Error generating cache entry for ebuild; typically caused by ebuild syntax error
-or digest verification failure.
+Error generating cache entry for ebuild; typically caused by ebuild syntax
+error or digest verification failure.
.TP
.B file.UTF8
File is not UTF8 compliant
.TP
.B file.executable
-Ebuilds, digests, metadata.xml, Manifest, and ChangeLog do not need the executable bit
+Ebuilds, digests, metadata.xml, Manifest, and ChangeLog do not need the
+executable bit
.TP
.B file.name
File/dir name must be composed of only the following chars: a-zA-Z0-9._-+:
@@ -358,8 +346,19 @@ Missing metadata.xml files
.B metadata.warning
Warnings in metadata.xml files
.TP
+.B repo.eapi.banned
+The ebuild uses an EAPI which is banned by the repository's
+metadata/layout.conf settings.
+.TP
+.B repo.eapi.deprecated
+The ebuild uses an EAPI which is deprecated by the repository's
+metadata/layout.conf settings.
+.TP
+.B IUSE.rubydeprecated
+The ebuild has set a ruby interpreter in USE_RUBY, that is not available as a ruby target anymore
+.TP
.B portage.internal
-The ebuild uses an internal Portage function
+The ebuild uses an internal Portage function or variable
.TP
.B upstream.workaround
The ebuild works around an upstream bug, an upstream bug should be filed and
diff --git a/man/ru/color.map.5 b/man/ru/color.map.5
new file mode 100644
index 000000000..f7e65e339
--- /dev/null
+++ b/man/ru/color.map.5
@@ -0,0 +1,217 @@
+.TH "COLOR.MAP" "5" "Jul 2013" "Portage VERSION" "Portage"
+.SH "НАЗВАНИЕ"
+color.map \- пользовательские настройки цвета в Portage
+.SH "ПАРАМЕТРЫ"
+.B /etc/portage/color.map
+.SH "ОПИСАНИЕ"
+Указанный файл содержит переменные, определяющие классы цвета, которые
+использует Portage. Проверяя настройки цвета, Portage в первую очередь
+обращается к нему. Если тот или иной класс цвета не определен в
+\fB/etc/portage/color.map\fR, Portage использует внутренние значения,
+принятые по умолчанию.
+.SH "СИНТАКСИС"
+\fBПЕРЕМЕННАЯ\fR = \fI[атрибуты или коды ansi, через пробел]\fR
+.TP
+\fBАТРИБУТ\fR = \fI[атрибуты или коды ansi, через пробел]]\fR
+.SH "ПЕРЕМЕННЫЕ"
+.TP
+\fBNORMAL\fR = \fI"normal"\fR
+Определяет цвет, используемый для некоторых слов, встречающихся в контекстах,
+отличных от перечисленных ниже.
+.TP
+\fBBAD\fR = \fI"red"\fR
+Определяет цвет, используемый для некоторых слов, встречающихся в отрицательном
+контексте.
+.TP
+\fBBRACKET\fR = \fI"blue"\fR
+Определяет цвет, используемый для скобок.
+.TP
+\fBGOOD\fR = \fI"green"\fR
+Определяет цвет, используемый для некоторых слов, встречающихся в положительном
+контексте.
+.TP
+\fBHILITE\fR = \fI"teal"\fR
+Определяет цвет, используемый для выделения слов.
+.TP
+\fBINFORM\fR = \fI"darkgreen"\fR
+Определяет цвет, используемый для информационных сообщений.
+.TP
+\fBMERGE_LIST_PROGRESS\fR = \fI"yellow"\fR
+Определяет цвет, используемый для чисел, отображающих ход установки.
+.TP
+\fBPKG_BLOCKER\fR = \fI"red"\fR
+Определяет цвет, используемый для пакетов, создающих неразрешенный конфликт.
+.TP
+\fBPKG_BLOCKER_SATISFIED\fR = \fI"darkblue"\fR
+Определяет цвет, используемый для пакетов, создававших конфликт, который
+затем был разрешен.
+.TP
+\fBPKG_MERGE\fR = \fI"darkgreen"\fR
+Определяет цвет, используемый для пакетов, которые будут установлены.
+.TP
+\fBPKG_MERGE_SYSTEM\fR = \fI"darkgreen"\fR
+Определяет цвет, используемый для system-пакетов, которые будут установлены.
+.TP
+\fBPKG_MERGE_WORLD\fR = \fI"green"\fR
+Определяет цвет, используемый для world-пакетов, которые будут установлены.
+.TP
+\fBPKG_BINARY_MERGE\fR = \fI"purple"\fR
+Определяет цвет, используемый для пакетов, которые будут установлены в бинарной версии.
+.TP
+\fBPKG_BINARY_MERGE_SYSTEM\fR = \fI"purple"\fR
+Определяет цвет, используемый для system-пакетов, которые будут установлены в
+бинарной версии.
+.TP
+\fBPKG_BINARY_MERGE_WORLD\fR = \fI"fuchsia"\fR
+Определяет цвет, используемый для world-пакетов, которые будут установлены в
+бинарной версии.
+.TP
+\fBPKG_NOMERGE\fR = \fI"darkblue"\fR
+Определяет цвет, используемый для имен пакетов, которые не будут установлены.
+.TP
+\fBPKG_NOMERGE_SYSTEM\fR = \fI"darkblue"\fR
+Определяет цвет, используемый для имен system-пакетов, которые не будут установлены.
+.TP
+\fBPKG_NOMERGE_WORLD\fR = \fI"blue"\fR
+Определяет цвет, используемый для имен world-пакетов, которые не будут установлены.
+.TP
+\fBPKG_UNINSTALL\fR = \fI"red"\fR
+Определяет цвет, используемый для имен пакетов, которые должны быть удалены для
+разрешения конфликтов.
+.TP
+\fBPROMPT_CHOICE_DEFAULT\fR = \fI"green"\fR
+Определяет цвет, используемый для предлагаемого на выбор значения по умолчанию.
+.TP
+\fBPROMPT_CHOICE_OTHER\fR = \fI"red"\fR
+Определяет цвет, используемый для предлагаемого на выбор значения не по умолчанию.
+.TP
+\fBSECURITY_WARN\fR = \fI"red"\fR
+Определяет цвет, используемый для предупреждений о безопасности.
+.TP
+\fBUNMERGE_WARN\fR = \fI"red"\fR
+Определяет цвет, используемый для предупреждений об удалении пакета.
+.TP
+\fBWARN\fR = \fI"yellow"\fR
+Определяет цвет, используемый для предупреждений.
+.SH "ДОПУСТИМЫЕ АТРИБУТЫ"
+.TP
+.B Цвет текста
+.RS
+.TP
+.B black - черный
+.TP
+.B darkgray - темно-серый
+.TP
+.B darkred - темно-красный
+.TP
+.B red - красный
+.TP
+.B darkgreen - темно-зеленый
+.TP
+.B green - зеленый
+.TP
+.B brown - коричневый
+.TP
+.B yellow - желтый
+.TP
+.B darkyellow - темно-желтый
+.TP
+.B darkblue - темно-синий
+.TP
+.B blue - синий
+.TP
+.B purple - фиолетовый
+.TP
+.B fuchsia - лиловый
+.TP
+.B teal - серо-зеленый
+.TP
+\fBturquoise\fR = \fBdarkteal\fR - \fBбирюзовый\fR
+.TP
+.B lightgray - светло-серый
+.TP
+.B white - белый
+.RE
+.TP
+.B Цвет фона
+.RS
+.TP
+.B bg_black - черный фон
+.TP
+.B bg_darkred - темно-красный фон
+.TP
+.B bg_darkgreen - темно-зеленый фон
+.TP
+\fBbg_brown\fR = \fBbg_darkyellow\fR - \fBкоричневый фон\fR
+.TP
+.B bg_darkblue - темно-синий фон
+.TP
+.B bg_purple - фиолетовый фон
+.TP
+.B bg_teal - серо-зеленый фон
+.TP
+.B bg_lightgray - светло-серый фон
+.RE
+.TP
+.B Другие атрибуты
+.RS
+.TP
+.B normal - обычный
+.TP
+.B no\-attr - без атрибутов
+.TP
+.B reset - переопределить
+.TP
+.B bold - жирный
+.TP
+.B faint - бледный
+.TP
+.B standout - стандартный вывод
+.TP
+.B no\-standout - не использовать стандартный вывод
+.TP
+.B underline - с подчеркиванием
+.TP
+.B no\-underline - без подчеркивания
+.TP
+.B blink - мигающий
+.TP
+.B no\-blink - без мигания
+.TP
+.B overline - с надчеркиванием
+.TP
+.B no\-overline - без надчеркивания
+.TP
+.B reverse - негатив
+.TP
+.B no\-reverse - не отражать цвет
+.TP
+.B invisible - невидимый
+.RE
+.SH "БАГТРЕКЕР"
+Об обнаруженных ошибках сообщайте на http://bugs.gentoo.org/
+.SH "АВТОРЫ"
+.nf
+Arfrever Frehtes Taifersar Arahesis <arfrever@apache.org>
+.fi
+.SH "ФАЙЛЫ"
+.TP
+.B /etc/portage/color.map
+Содержит переменные, используемые для пользовательской настройки цветного вывода.
+.TP
+.B /etc/portage/make.conf
+Содержит другие переменные.
+.SH "СМ. ТАКЖЕ"
+.BR console_codes (4),
+.BR make.conf (5),
+.BR portage (5),
+.BR emerge (1),
+.BR ebuild (1),
+.BR ebuild (5)
+.TP
+Модуль \fIPython /usr/lib/portage/pym/portage/output.py\fR.
+.SH "ПЕРЕВОД"
+.nf
+\fRПереводчик\fR - Елена Гаврилова <e.vl.gavrilova@yandex.ru>
+\fRРедактор\fR - Романов Владимир <blueboar2@gmail.com>
+.fi
diff --git a/man/ru/dispatch-conf.1 b/man/ru/dispatch-conf.1
new file mode 100644
index 000000000..a511e97e0
--- /dev/null
+++ b/man/ru/dispatch-conf.1
@@ -0,0 +1,100 @@
+.TH "DISPATCH-CONF" "1" "Jan 2011" "Portage VERSION" "Portage"
+.SH "НАЗВАНИЕ"
+dispatch\-conf \- безопасное обновление конфигурационных файлов после
+установки новых пакетов
+.SH "СИНТАКСИС"
+.B dispatch\-conf
+.SH "ОПИСАНИЕ"
+Утилиту \fIdispatch\-conf\fR следует запускать после установки новых
+пакетов для проверки конфигурационных файлов на обновления. Если
+новый конфигурационный файл попытается затереть текущий,
+\fIdispatch\-conf\fR предложит пользователю самому решить, каким
+образом разрешить эту ситуацию. Среди достоинств \fIdispatch\-conf\fR \-
+легкость отката изменений (изменения конфигурационных файлов сохраняются
+с помощью либо патчей, либо RCS) и возможность автоматического
+обновления тех файлов, которые пользователь не изменял, и тех, которые
+отличаются от текущей версии только CVS-мусором или пробелом.
+
+\fIdispatch\-conf\fR проверит на обновления все каталоги, указанные в
+переменной \fICONFIG_PROTECT\fR. Также, программа \fIdispatch\-conf\fR
+автоматически обновит все файлы конфигурации, найденные в
+\fICONFIG_PROTECT_MASK\fR. Подробнее см. в \fBmake.conf\fR(5).
+.SH "ОПЦИИ"
+.TP
+Нет.
+.SH "СИНТАКСИС"
+\fIdispatch\-conf\fR следует запускать от пользователя root, поскольку
+владельцем файлов, с которыми работает утилита, как правило, является
+именно пользователь root. Перед первым запуском \fIdispatch\-conf\fR
+необходимо отредактировать настройки в файле \fB/etc/dispatch\-conf.conf\fR
+и создать каталог архивов, указанный в \fB/etc/dispatch\-conf.conf\fR.
+Все изменения конфигурационных файлов сохраняются в каталоге архивов \-
+либо как патчи, либо с помощью RCS, благодаря чему довольно просто
+вернуться к предыдущей версии.
+
+Всякий раз, когда \fIdispatch\-conf\fR обнаруживает конфигурационный файл,
+который был обновлен, пользователю дается возможность выбрать один из
+следующих вариантов, чтобы решить, что делать с предлагаемым обновлением:
+.TP
+.B u
+Обновить (заменить) текущий конфигурационный файл новым и продолжить.
+.TP
+.B z
+Затереть (удалить) новый конфигурационный файл и продолжить.
+.TP
+.B n
+Пропустить и перейти к следующему конфигурационному файлу, не удаляя ни
+исходную версию, ни файлы, защищенные \fICONFIG_PROTECT\fR.
+.TP
+.B e
+Редактировать новый конфигурационный файл в редакторе текста,
+определенном переменной \fIEDITOR\fR.
+.TP
+.B m
+В интерактивном режиме произвести слияние текущего и нового конфигурационных файлов.
+.TP
+.B l
+Просмотреть различия между текущим и новым конфигурационными файлами.
+.TP
+.B t
+Переключаться между текущим и новым конфигурационными файлами
+(в конечном итоге потребуется установить конечную версию, нажав \fBu\fR).
+.TP
+.B h
+Вывести справку.
+.TP
+.B q
+Выйти из \fIdispatch\-conf\fR.
+.SH "ПРАВА НА ФАЙЛЫ"
+\fBВНИМАНИЕ\fR: Если \fB/etc/dispatch\-conf.conf\fR сконфигурирован
+для использования \fBrcs\fR(1), права на чтение и исполнение
+архивированных файлов могут быть унаследованы от первой проверки
+рабочего файла, как описано в man\-руководстве \fBci\fR(1). Это
+означает, что даже если права доступа к рабочему файлу изменились,
+прежние права, действовавшие при первой проверке, могут быть
+возвращены. Согласно руководству \fBci\fR(1), пользователи могут
+управлять доступом к RCS\-файлам, изменив права на доступ к
+каталогу, в котором они лежат.
+.SH "БАГТРЕКЕР"
+Об обнаруженных ошибках сообщайте на http://bugs.gentoo.org/
+.SH "АВТОРЫ"
+.nf
+Jeremy Wohl
+Karl Trygve Kalleberg <karltk@gentoo.org>
+Mike Frysinger <vapier@gentoo.org>
+Grant Goodyear <g2boojum@gentoo.org>
+.fi
+.SH "ФАЙЛЫ"
+.TP
+.B /etc/dispatch\-conf.conf
+Здесь хранятся настройки конфигурации для \fIdispatch\-conf\fR.
+.SH "СМОТРИ ТАКЖЕ"
+.BR make.conf (5),
+.BR ci (1),
+.BR etc-update (1),
+.BR rcs (1)
+.SH "ПЕРЕВОД"
+.nf
+Переводчик: Елена Гаврилова <e.vl.gavrilova@yandex.ru>
+Редактор: Романов Владимир <blueboar2@gmail.com>
+.fi
diff --git a/man/ru/ebuild.1 b/man/ru/ebuild.1
new file mode 100644
index 000000000..98d4b5ab5
--- /dev/null
+++ b/man/ru/ebuild.1
@@ -0,0 +1,249 @@
+.TH "EBUILD" "1" "Jan 2014" "Portage VERSION" "Portage"
+.SH "НАЗВАНИЕ"
+ebuild \- низкоуровневый интерфейс системы Portage
+.SH "СИНТАКСИС"
+.B ebuild
+.I файл команда [команда]\fR...
+.SH "ОПИСАНИЕ"
+Программа ebuild представляет собой низкоуровневый интерфейс
+системы Portage. Она обеспечивает возможность непосредственного
+взаимодействия со сценарием ebuild при помощи специальных
+подкоманд или групп команд, выполняемых в контексте данного файла,
+и функций. Утилита ebuild принимает в качестве аргументов
+ebuild-сценарий и одну или более команд, подвергает сценарий
+синтаксическому анализу и выполняет указанные команды. Имеются
+отдельные команды для загрузки исходных файлов, их распаковки,
+компиляции, установки объектных файлов во временный каталог image,
+установки образа в локальную файловую систему, создания архива
+пакета из образа и т.д.
+.SH "ФАЙЛ"
+Должен быть рабочим ebuild-сценарием. Подробнее смотри в руководстве
+по \fBebuild\fR(5).
+.SH "КОМАНДЫ"
+По умолчанию portage выполняет все функции по порядку вплоть до
+указанной, кроме функций, которые уже были вызваны в предыдущих
+вызовах ebuild. Например, если вы дадите команду \fBcompile\fR, то
+вызовете тем самым и предшествующие ей функции (такие как
+\fBsetup\fR и \fBunpack\fR), если они не были запущены в
+предыдущем запуске ebuild. Если вы хотите быть уверенным, что
+все они были выполнены, вам нужно сначала использовать команду
+\fBclean\fR. Если вы хотите, чтобы запускалась только одна команда,
+вам следует добавить опцию \fInoauto\fR к значению переменной
+окружения \fBFEATURES\fR. Подробнее смотри в справке по \fBmake.conf\fR(5).
+
+.TP
+.BR help
+Выводит справочную информацию о программе в сжатом изложении,
+а также целый ряд сведений о пакете.
+.TP
+.BR setup
+Запускает все действия по настройке данного пакета, в том числе
+специфические системные тесты.
+.TP
+.BR clean
+Очищает временный каталог сборки, созданный Portage специально для
+этого файла ebuild. Временный каталог сборки обычно содержит
+извлеченный из архива исходный код, а также, возможно, так называемый
+установочный образ (все файлы, которые будут установлены в локальную
+файловую систему или сохранены в пакете). Расположение каталога
+сборки определяется значением переменной PORTAGE_TMPDIR. Чтобы узнать
+ее текущее значение, выполните \fIemerge \-\-info\fR. О том, как
+переопределить эту переменную, смотри \fBmake.conf\fR(5).
+
+Примечание: Portage удаляет практически все данные, оставшиеся после
+успешной установки пакета, за исключением тех случаев, когда в
+переменной FEATURES явно указано 'noclean'. Если вы добавите noclean
+в значение FEATURES, очень скоро большой объем дискового пространства
+будет занят ненужными файлами. Не рекомендуется пользоваться этим
+режимом постоянно, а лишь в том случае, если исходники пакетов
+потребуются вам после установки. Впрочем, возможно и ручное удаление
+этих файлов: для этого следует выполнить \fIrm \-rf /var/tmp/portage\fR.
+.TP
+.BR fetch
+Проверяет, все ли источники данных, фигурирующие в SRC_URI, доступны
+в каталоге DISTDIR (подробнее см. в \fBmake.conf\fR(5)) и имеют
+верную контрольную сумму. Если исходные коды недоступны, будет
+предпринята попытка загрузить их с серверов, адреса которых указаны
+в SRC_URI. Если для того или иного файла имеется несколько адресов
+загрузки, Portage проверит каждый из них и выберет тот сервер,
+который ближе. (Точность этого выбора на данный момент не
+гарантируется.) В первую очередь всегда обрабатываются зеркала Gentoo
+Linux, содержащиеся в переменной GENTOO_MIRRORS. Если по какой-либо
+причине контрольная сумма текущих или только что загруженных исходных
+кодов не совпадает с контрольной суммой, записанной в файле
+files/digest\-[пакет]\-[версия\-ревизия], выводится предупреждение, и
+программа ebuild завершает работу с кодом ошибки 1.
+.TP
+.BR digest
+В настоящее время \- эквивалент команды \fImanifest\fR.
+.TP
+.BR manifest
+Обновляет Manifest\-файл пакета. В результате создаются контрольные суммы
+для всех файлов, обнаруженных в одном каталоге с обрабатываемым файлом
+ebuild, а также содержимое вложенных каталогов подкаталога files.
+При этом контрольные суммы генерируются и для всех файлов, перечисленных
+в SRC_URI для каждого файла ebuild. Подробнее о поведении данной команды,
+см. в разделе о смысле значения \fIassume\-digests\fR переменной
+\fBFEATURES\fR справочного руководства по \fBmake.conf\fR(5). Если вы
+не хотите, чтобы дайджесты принимались неявно, см. опцию \fB\-\-force\fR.
+.TP
+.BR unpack
+Извлекает исходные коды в подкаталог \fIкаталога сборки\fR (BUILD_PREFIX),
+вызывая функцию \fIsrc_unpack()\fR внутри файла ebuild. Если функция
+src_unpack() не определена, для распаковки всех файлов, перечисленных в
+SRC_URI, будет использована стандартная src_unpack(). Как правило,
+исходники распаковываются в каталог ${BUILD_PREFIX}/[пакет]\-[версия-ревизия]/work.
+Обращаться к нему можно с помощью переменной ${WORKDIR}.
+
+Создавая файл ebuild самостоятельно, убедитесь, что переменная S
+(каталог исходных файлов), определенная в начале ebuild-сценария, указывает
+на каталог, в котором действительно содержатся распакованные исходные коды.
+По умолчанию он определяется как ${WORKDIR}/${P}, поэтому, как правило,
+ничего не требуется исправлять. Функция src_unpack() также отвечает за
+наложение патчей перед компиляцией пакетов.
+.TP
+.BR prepare
+Подготавливает извлеченные из архива исходные коды, вызывая функцию
+\fIsrc_prepare()\fR, определенную в ebuild-файле. При запуске src_prepare()
+текущим рабочим каталогом становится ${S}. Данная функция поддерживается,
+начиная с \fBEAPI 2\fR.
+.TP
+.BR configure
+Производит конфигурирование распакованных исходных кодов, вызывая функцию
+\fIsrc_configure()\fR, определенную в ebuild-файле. При запуске src_configure()
+текущим рабочим каталогом становится ${S}. Данная функция поддерживается
+начиная с \fBEAPI 2\fR.
+.TP
+.BR compile
+Компилирует распакованные исходные коды, вызывая функцию \fIsrc_compile()\fR,
+определенную в ebuild-файле. При запуске src_compile() текущим рабочим
+каталогом становится ${S}. По завершении работы src_compile() исходные
+коды должны быть полностью скомпилированы.
+.TP
+.BR test
+Выполняет специальные тесты для отдельных пакетов, проверяя сборку.
+.TP
+.BR preinst
+Выполняет специальные действия для отдельных пакетов, которые
+требуется произвести до установки пакета в текущую файловую систему.
+.TP
+.BR install
+Устанавливает пакет во временный \fIкаталог установки\fR, вызывая
+функцию \fIsrc_install()\fR. По завершении каталог установки в
+(${BUILD_PREFIX}/[пакет]\-[версия\-ревизия]/image) будет содержать
+все файлы, которые должны быть либо установлены в текущую файловую
+систему, либо включены в бинарный пакет.
+.TP
+.BR postinst
+Выполняет специальные действия для отдельных пакетов, которые
+требуется произвести после установки пакета в текущую файловую
+систему. Как правило, при этом выводятся полезные сообщения.
+.TP
+.BR qmerge
+Эта функция устанавливает все файлы в \fIкаталоге установки\fR на
+текущую файловую систему. Это производится следующим образом:
+сначала запускается функция \fIpkg_preinst()\fR (если она существует).
+Затем все файлы устанавливаются в файловую систему, а их
+контрольные суммы записываются в
+\fI/var/db/pkg/${CATEGORY}/${PN}-${PVR}/CONTENTS\fR. Наконец, по
+завершении установки всех файлов выполняется функция
+\fIpkg_postinst()\fR (если она существует).
+.TP
+.BR merge
+Обычно для установки файла ebuild, необходимо последовательно выполнить
+следующие действия: \fIfetch\fR, \fIunpack\fR, \fIcompile\fR,
+\fIinstall\fR и \fIqmerge\fR. Если вам нужно только установить
+файл ebuild, вы можете использовать данную команду: она сама выполнит
+все перечисленные операции и остановится в процессе выполнения только
+в том случае, если какая-либо функция отрабатывает с ошибкой.
+.TP
+.BR unmerge
+Эта команда сначала вызывает функцию \fIpkg_prerm()\fR (если она существует).
+Затем она удаляет все файлы из текущих файловых систем, файл содержимого
+пакета для которых имеет верную контрольную сумму и время изменения.
+Все пустые каталоги удаляются вместе с вложенными. Наконец, команда
+запускает функцию \fIpkg_postrm()\fR (если она существует). Можно сначала
+установить новую версию пакета, а затем удалить прежнюю - собственно,
+именно в этом заключается рекомендуемый метод обновления.
+.TP
+.BR prerm
+Запускает для определенного пакета действия, которые необходимо выполнить
+до удаления пакета из файловой системы. См. также \fIunmerge\fR.
+.TP
+.BR postrm
+Запускает для определенного пакета действия, которые необходимо выполнить
+после удаления пакета из файловой системы. См. также \fIunmerge\fR.
+.TP
+.BR config
+Запускает для определенного пакета действия, которые необходимо выполнить
+до начала установки. Как правило, это настройка конфигурационных файлов
+или другие настроечные действия, которые пользователь может захотеть
+выполнить.
+.TP
+.BR package
+Эта команда очень напоминает \fImerge\fR, за исключением того, что после
+загрузки, распаковки, компиляции и установки создается .tbz2-архив
+бинарного пакета, который затем сохраняется в каталоге \fBPKGDIR\fR
+(см. \fBmake.conf\fR(5)).
+.TP
+.BR rpm
+Собирает RPM\-пакет RedHat из файлов во временном \fIкаталоге установки\fR.
+На данный момент сведения о зависимостях файла ebuild не включаются в RPM.
+.SH "ОПЦИИ"
+.TP
+.BR "\-\-debug"
+Запустить bash с опцией \-x, в результате чего стандартный вывод будет
+включать подробную отладочную информацию.
+.TP
+.BR "\-\-color < y | n >"
+Включить или отключить цветное отображение. Эта опция переопределяет
+значение переменной \fINOCOLOR\fR (см. \fBmake.conf\fR(5)) и может быть
+использована для принудительного назначения цвета в том случае, если
+стандартный вывод - не терминал (по умолчанию цвет включен только в том
+случае, если стандартный вывод - терминал).
+.TP
+.BR "\-\-force"
+При использовании в связке с командой digest или manifest данная опция
+принудительно генерирует новые дайджесты для всех файлов исходного кода,
+относящихся к данному файлу ebuild. Если в каталоге ${DISTDIR} требуемых
+исходников нет, они будут автоматически загружены.
+.TP
+.BR "\-\-ignore\-default\-opts"
+Не использовать переменную окружения \fIEBUILD_DEFAULT_OPTS\fR.
+.TP
+.BR "\-\-skip\-manifest"
+Пропустить проверку Manifest-файлов.
+.SH "БАГТРЕКЕР"
+Об обнаруженных ошибках сообщайте на http://bugs.gentoo.org/
+.SH "АВТОРЫ"
+.nf
+Achim Gottinger <achim@gentoo.org>
+Daniel Robbins <drobbins@gentoo.org>
+Nicholas Jones <carpaski@gentoo.org>
+Mike Frysinger <vapier@gentoo.org>
+.fi
+.SH "ФАЙЛЫ"
+.TP
+.B /etc/portage/make.conf
+Содержит переменные сборки, имеющие приоритет перед значениями,
+указанными в файле make.globals.
+.TP
+.B /etc/portage/color.map
+Содержит переменные, позволяющие назначать пользовательские настройки
+цветного вывода.
+.SH "СМОТРИ ТАКЖЕ"
+.BR emerge (1),
+.BR ebuild (5),
+.BR make.conf (5),
+.BR color.map (5)
+.TP
+Сценарий \fI/usr/lib/portage/bin/ebuild.sh\fR.
+.TP
+Вспомогательные приложения в \fI/usr/lib/portage/bin\fR.
+
+.SH "ПЕРЕВОД"
+.nf
+Переводчик: Елена Гаврилова <e.vl.gavrilova@yandex.ru>
+Правка и обновление: Романов Владимир <blueboar2@gmail.com>
+Переведенная версия соответствует английской версии от 2013-07-31
+.fi
diff --git a/man/ru/env-update.1 b/man/ru/env-update.1
new file mode 100644
index 000000000..9e0775e86
--- /dev/null
+++ b/man/ru/env-update.1
@@ -0,0 +1,35 @@
+.TH "ENV-UPDATE" "1" "Aug 2008" "Portage VERSION" "Portage"
+.SH "НАЗВАНИЕ"
+env\-update \- автоматическое обновление настроек окружения
+.SH "СИНТАКСИС"
+\fBenv\-update\fR \fI[опции]\fR
+.SH "ОПИСАНИЕ"
+.B env\-update
+читает файлы в каталоге \fI/etc/env.d\fR и автоматически генерирует
+\fI/etc/profile.env\fR и \fI/etc/ld.so.conf\fR. Затем для обновления
+\fI/etc/ld.so.cache\fR запускается \fBldconfig\fR(8). \fBemerge\fR(1)
+автоматически вызывает \fBenv-update\fR после каждой установки пакета.
+Если же вы вносите изменения в \fI/etc/env.d\fR, вам следует
+самостоятельно выполнить \fBenv-update\fR, чтобы внесенные
+изменения вступили в силу. Обратите внимание, что это повлияет
+только на последующие операции. Чтобы изменения отразились на уже
+запущенных процессах, вероятно, понадобится выполнить
+\fIsource /etc/profile\fR.
+.SH "ОПЦИИ"
+.TP
+.B \-\-no\-ldconfig
+Не запускать \fBldconfig\fR (и, тем самым, опустить пересборку
+кэша \fIld.so.cache\fR и т.д.).
+.SH "БАГТРЕКЕР"
+Об обнаруженных ошибках сообщайте на http://bugs.gentoo.org/
+.SH "АВТОРЫ"
+Daniel Robbins <drobbins@gentoo.org>
+.SH "СМОТРИ ТАКЖЕ"
+.BR emerge (1),
+.BR ldconfig (8)
+
+.SH "ПЕРЕВОД"
+.nf
+Переводчик: Елена Гаврилова <e.vl.gavrilova@yandex.ru>
+Правка и обновление: Романов Владимир <blueboar2@gmail.com>
+.fi
diff --git a/man/ru/etc-update.1 b/man/ru/etc-update.1
new file mode 100644
index 000000000..f799317f3
--- /dev/null
+++ b/man/ru/etc-update.1
@@ -0,0 +1,63 @@
+.TH "ETC-UPDATE" "1" "Mar 2012" "Portage VERSION" "Portage"
+.SH "НАЗВАНИЕ"
+etc\-update \- обработка изменений конфигурационных файлов
+.SH "СИНТАКСИС"
+.BR etc\-update
+[\fIопции\fR] [\fI--automode <режим>\fR] [\fIпути для сканирования\fR]
+.SH "ОПИСАНИЕ"
+Утилиту \fIetc\-update\fR следует запускать после установки
+новых пакетов для проверки предлагаемых обновлений
+конфигурационных файлов. Если новый конфигурационный файл
+может перезаписать имеющийся, \fIetc\-update\fR спросит
+у пользователя, как с ним поступить.
+.PP
+\fIetc\-update\fR проверяет все каталоги, заданные в командной
+строке. Если никаких путей не задано, тогда будет использована
+переменная \fICONFIG_PROTECT\fR. Все конфигурационные файлы,
+которые будут найдены в переменной \fICONFIG_PROTECT_MASK\fR
+будут обновлены программой \fIetc\-update\fR автоматически.
+Подробнее об этом смотри в справке по \fBmake.conf\fR(5).
+.PP
+\fIetc\-update\fR учитывает переменные \fIPORTAGE_CONFIGROOT\fR
+и \fIEROOT\fR при использовании ранее означенных переменных
+(\fICONFIG_PROTECT\fR и \fICONFIG_PROTECT_MASK\fR).
+.SH "ОПЦИИ"
+.TP
+.BR \-d ", " \-\-debug
+Запускает оболочку со включенным режимом отладки.
+.TP
+.BR \-h ", " \-\-help
+Неожиданно, показывает помощь.
+.TP
+.BR \-p ", " \-\-preen
+Автоматически применить тривиальные изменения и выйти.
+.TP
+.BR \-v ", " \-\-verbose
+Показывать в процессе работы настройки и информацию о
+важных решениях.
+.TP
+.BR "\-\-automode <режим>"
+Выбрать один из автоматических режимов работы. Разрешенные
+режимы работы это \-3, \-5, \-7, \-9. Для более подробной
+информации смотри текст, выдаваемый опцией \fI\-\-help\fR.
+.SH "БАГТРЕКЕР"
+Об обнаруженных ошибках сообщайте на http://bugs.gentoo.org/
+.SH "АВТОРЫ"
+.nf
+Jochem Kossen and Leo Lipelis
+Karl Trygve Kalleberg <karltk@gentoo.org>
+Mike Frysinger <vapier@gentoo.org>
+.fi
+.SH "ФАЙЛЫ"
+.TP
+.B /etc/etc-update.conf
+Здесь хранятся настройки \fIetc-update\fR.
+.SH "СМОТРИ ТАКЖЕ"
+.BR dispatch-conf (1),
+.BR make.conf (5)
+
+.SH "ПЕРЕВОД"
+.nf
+Переводчик: Елена Гаврилова <e.vl.gavrilova@yandex.ru>
+Правка и обновление: Романов Владимир <blueboar2@gmail.com>
+.fi
diff --git a/man/ru/fixpackages.1 b/man/ru/fixpackages.1
new file mode 100644
index 000000000..41eb2843d
--- /dev/null
+++ b/man/ru/fixpackages.1
@@ -0,0 +1,22 @@
+.TH "FIXPACKAGES" "1" "Dec 2011" "Portage VERSION" "Portage"
+.SH "НАЗВАНИЕ"
+fixpackages \- выполняет переносы пакетов при обновлениях
+для всех пакетов
+.SH "СИНТАКСИС"
+\fBfixpackages\fR
+.SH "ОПИСАНИЕ"
+Программа fixpackages выполняет переносы пакетов при обновлениях
+для конфигурационных файлов, установленных пакетов и двоичных
+пакетов.
+.SH "БАГТРЕКЕР"
+Об обнаруженных ошибках сообщайте на http://bugs.gentoo.org/
+.SH "АВТОРЫ"
+Zac Medico <zmedico@gentoo.org>
+.SH "СМОТРИ ТАКЖЕ"
+.BR emaint (1),
+.BR emerge (1)
+
+.SH "ПЕРЕВОД"
+.nf
+Переводчик: Романов Владимир <blueboar2@gmail.com>
+.fi
diff --git a/man/xpak.5 b/man/xpak.5
index 0b5b87495..536810db5 100644
--- a/man/xpak.5
+++ b/man/xpak.5
@@ -11,7 +11,8 @@ The following conventions cover all occurrences in this documentation
.IP Integer
All offsets/lengths are big endian unsigned 32bit integers
.IP String
-All strings are ASCII encoded, and not NUL terminated (quotes are for illustration only)
+All strings are ASCII encoded, and not NUL terminated (quotes are for
+illustration only)
.IP Values
The actual values of the individual xpak entries are stored as Strings
.P
@@ -46,7 +47,7 @@ String \fI"STOP"\fR.
|<xpak_offset>|
<tar>|<---xpak---->|<xpak_offset>"STOP"
-Here you see the \fItar\fR archive, the attached \fIxpak\fR blob, the
+Here you see the \fItar\fR archive, the attached \fIxpak\fR blob, the
\fIxpak_offset\fR and the string \fI"STOP"\fR at the end. This metadata
is not considered "part" of the \fIxpak\fR, but rather part of the binpkg.
diff --git a/misc/emerge-delta-webrsync b/misc/emerge-delta-webrsync
new file mode 100755
index 000000000..96564af8e
--- /dev/null
+++ b/misc/emerge-delta-webrsync
@@ -0,0 +1,809 @@
+#!/bin/bash
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# Author: Brian Harring <ferringb@gentoo.org>, karltk@gentoo.org originally.
+# Rewritten from the old, Perl-based emerge-webrsync script
+
+#
+# gpg key import
+# KEY_ID=0x96D8BF6D
+# gpg --homedir /etc/portage/gnupg --keyserver subkeys.pgp.net --recv-keys $KEY_ID
+# gpg --homedir /etc/portage/gnupg --edit-key $KEY_ID trust
+#
+
+argv0=$0
+
+# Only echo if not in verbose mode
+nvecho() { [[ ${do_verbose} -eq 0 ]] && echo "$@" ; }
+# warning echos
+wecho() { echo "${argv0##*/}: warning: $*" 1>&2 ; }
+# error echos
+eecho() { echo "${argv0##*/}: error: $*" 1>&2 ; }
+
+
+#-------------------
+#initialization
+#------------------
+
+# Use portageq from the same directory/prefix as the current script, so
+# that we don't have to rely on PATH including the current EPREFIX.
+scriptpath=${BASH_SOURCE[0]}
+if [ -x "${scriptpath%/*}/portageq" ]; then
+ portageq=${scriptpath%/*}/portageq
+elif type -P portageq > /dev/null ; then
+ portageq=portageq
+else
+ eecho "could not find 'portageq'; aborting"
+ exit 1
+fi
+eval "$("${portageq}" envvar -v DISTDIR EPREFIX FEATURES \
+ FETCHCOMMAND GENTOO_MIRRORS \
+ PORTAGE_BIN_PATH PORTAGE_CONFIGROOT PORTAGE_GPG_DIR \
+ PORTAGE_NICENESS PORTAGE_REPOSITORIES PORTAGE_RSYNC_EXTRA_OPTS \
+ PORTAGE_RSYNC_OPTS PORTAGE_TMPDIR \
+ USERLAND http_proxy ftp_proxy)"
+export http_proxy ftp_proxy
+
+source "${PORTAGE_BIN_PATH}"/isolated-functions.sh || exit
+
+repo_name=gentoo
+repo_location=$(__repo_attr "${repo_name}" location)
+if [[ -z ${repo_location} ]]; then
+ eecho "Repository '${repo_name}' not found"
+ exit 1
+fi
+
+if [ -z "$NICENESS_PULLED" ]; then
+ if [ -n "${PORTAGE_NICENESS}" ]; then
+ export NICENESS_PULLED=asdf
+ exec nice -n "${PORTAGE_NICENESS}" "$0" "$@"
+ echo "failed setting PORTAGE_NICENESS to '$PORTAGE_NICENESS', disabling"
+ fi
+fi
+
+STATE_DIR="${EPREFIX}/var/delta-webrsync/"
+
+# hack. bug 92224
+if [ "${FETCHCOMMAND/getdelta.sh}" != "${FETCHCOMMAND}" ]; then
+ # evil evil evil evil
+ eval "$(grep "^FETCHCOMMAND=" "${EPREFIX}/usr/share/portage/config/make.globals")"
+fi
+
+unset f
+unset IFS
+
+do_verbose=0
+MUST_SYNC='1'
+unset PUKE_HELP
+for x in $*; do
+ case "${x}" in
+ -q|--quiet)
+ PORTAGE_QUIET=1
+ continue
+ ;;
+ esac
+ if [[ $x == "-u" ]]; then
+ MUST_SYNC=''
+ elif [[ $x == "-k" ]]; then
+ KEEP_OLDIES='asdf'
+ elif [[ $x == "-h" ]]; then
+ PUKE_HELP=1
+ elif [[ $x == "-v" ]]; then
+ do_verbose=1
+ else
+ PUKE_HELP=1
+ echo "$x isn't a valid arg. bailing."
+ fi
+ if [[ -n $PUKE_HELP ]]; then
+ echo "-u for upgrade; sync only if new snapshots are found"
+ echo "-k for keep; keep old tree snapshots around"
+ exit -1
+ fi
+done
+
+if [[ ! -d $STATE_DIR ]]; then
+ echo "$STATE_DIR doesn't exist. don't have the ability to compensate for compressor differences without it!"
+ exit -2
+fi
+
+if has webrsync-gpg ${FEATURES} ; then
+ WEBSYNC_VERIFY_SIGNATURE=1
+else
+ WEBSYNC_VERIFY_SIGNATURE=0
+fi
+if [ ${WEBSYNC_VERIFY_SIGNATURE} != 0 -a -z "${PORTAGE_GPG_DIR}" ]; then
+ eecho "please set PORTAGE_GPG_DIR in make.conf"
+ exit 1
+fi
+
+[[ -d ${repo_location} ]] || mkdir -p "${repo_location}"
+if [[ ! -w ${repo_location} ]] ; then
+ eecho "Repository '${repo_name}' is not writable: ${repo_location}"
+ exit 1
+fi
+
+[[ -d ${DISTDIR} ]] || mkdir -p "${DISTDIR}"
+if [[ ! -w ${DISTDIR} ]] ; then
+ eecho "DISTDIR is not writable: ${DISTDIR}"
+ exit 1
+fi
+
+[[ -d ${PORTAGE_TMPDIR}/portage ]] || mkdir -p "${PORTAGE_TMPDIR}/portage"
+TMPDIR=$(mktemp -d "${PORTAGE_TMPDIR}/portage/delta-webrsync-XXXXXX")
+if [[ ! -w ${TMPDIR} ]] ; then
+ eecho "TMPDIR is not writable: ${TMPDIR}"
+ exit 1
+fi
+
+cd "$DISTDIR"
+
+found=0
+
+if type -p md5sum > /dev/null; then
+ md5_com='md5sum -c "${MD5_LOC}" &> /dev/null'
+elif type -p md5 > /dev/null; then
+ md5_com='[ "$(md5 -q ${FILE})" == "$(cut -d \ -f 1 ${MD5_LOC})" ]'
+else
+ echo "warning, unable to do md5 verification of the snapshot!"
+ echo "no suitable md5/md5sum binary was found!"
+ md5_com='true'
+fi
+
+#---------------
+#funcs
+#---------------
+
+cleanse_state_dir() {
+ [[ ${STATE_DIR:-/} != '/' ]] && rm -f "${STATE_DIR}"/* &> /dev/null
+}
+
+do_tar() {
+ local file=$1; shift
+ local decompressor
+ case ${file} in
+ *.xz) decompressor="xzcat" ;;
+ *.bz2) decompressor="bzcat" ;;
+ *.gz) decompressor="zcat" ;;
+ *) decompressor="cat" ;;
+ esac
+ ${decompressor} "${file}" | tar "$@"
+ _pipestatus=${PIPESTATUS[*]}
+ [[ ${_pipestatus// /} -eq 0 ]]
+}
+
+get_utc_date_in_seconds() {
+ date -u +"%s"
+}
+
+get_date_part() {
+ local utc_time_in_secs="$1"
+ local part="$2"
+
+ if [[ ${USERLAND} == BSD ]] ; then
+ date -r ${utc_time_in_secs} -u +"${part}"
+ else
+ date -d @${utc_time_in_secs} -u +"${part}"
+ fi
+}
+
+get_utc_second_from_string() {
+ local s="$1"
+ if [[ ${USERLAND} == BSD ]] ; then
+ # Specify zeros for the least significant digits, or else those
+ # digits are inherited from the current system clock time.
+ date -juf "%Y%m%d%H%M.%S" "${s}0000.00" +"%s"
+ else
+ date -d "${s:0:4}-${s:4:2}-${s:6:2}" -u +"%s"
+ fi
+}
+
+get_portage_timestamp() {
+ local portage_current_timestamp=0
+
+ if [ -f "${repo_location}/metadata/timestamp.x" ]; then
+ portage_current_timestamp=$(cut -f 1 -d " " "${repo_location}/metadata/timestamp.x" )
+ fi
+
+ echo "${portage_current_timestamp}"
+}
+
+increment_date() {
+ local s="$1" inc="$2"
+ if [[ ${USERLAND} == BSD ]] ; then
+ # Specify zeros for the least significant digits, or else those
+ # digits are inherited from the current system clock time.
+ date -v${inc}d -juf "%Y%m%d%H%M.%S" "${s}0000.00" +"%Y%m%d"
+ else
+ date -d "${s:0:4}-${s:4:2}-${s:6:2} ${inc} day" -u +"%Y%m%d"
+ fi
+}
+
+
+fetch_file() {
+ local URI="$1"
+ local FILE="$2"
+ local opts
+
+ if [ "${FETCHCOMMAND/wget/}" != "${FETCHCOMMAND}" ]; then
+ opts="--continue $(nvecho -q)"
+ elif [ "${FETCHCOMMAND/curl/}" != "${FETCHCOMMAND}" ]; then
+ opts="--continue-at - $(nvecho -s -f)"
+ else
+ rm -f "${DISTDIR}/${FILE}"
+ fi
+
+ __vecho "Fetching file ${FILE} ..."
+ # already set DISTDIR=
+ eval "${FETCHCOMMAND} ${opts}"
+ if [[ $? -eq 0 && -s ${DISTDIR}/${FILE} ]] ; then
+ return 0
+ else
+ rm -f "${DISTDIR}/${FILE}"
+ return 1
+ fi
+}
+
+check_file_digest() {
+ local digest="$1"
+ local file="$2"
+ local r=1
+
+ __vecho "Checking digest ..."
+
+ if type -P md5sum > /dev/null; then
+ local md5sum_output=$(md5sum "${file}")
+ local digest_content=$(< "${digest}")
+ [ "${md5sum_output%%[[:space:]]*}" = "${digest_content%%[[:space:]]*}" ] && r=0
+ elif type -P md5 > /dev/null; then
+ [ "$(md5 -q "${file}")" == "$(cut -d ' ' -f 1 "${digest}")" ] && r=0
+ else
+ eecho "cannot check digest: no suitable md5/md5sum binaries found"
+ fi
+
+ return "${r}"
+}
+
+check_file_signature() {
+ local signature="$1"
+ local file="$2"
+ local r=1
+
+ if [[ ${WEBSYNC_VERIFY_SIGNATURE} != 0 ]] ; then
+
+ __vecho "Checking signature ..."
+
+ if type -P gpg > /dev/null; then
+ gpg --homedir "${PORTAGE_GPG_DIR}" --verify "$signature" "$file" && r=0
+ else
+ eecho "cannot check signature: gpg binary not found"
+ exit 1
+ fi
+ else
+ r=0
+ fi
+
+ return "${r}"
+}
+
+get_snapshot_timestamp() {
+ local file="$1"
+
+ do_tar "${file}" --to-stdout -xf - portage/metadata/timestamp.x | cut -f 1 -d " "
+}
+
+sync_local() {
+ local file="$1"
+
+ __vecho "Syncing local tree ..."
+
+ local ownership="portage:portage"
+ if has usersync ${FEATURES} ; then
+ case "${USERLAND}" in
+ BSD)
+ ownership=$(stat -f '%Su:%Sg' "${repo_location}")
+ ;;
+ *)
+ ownership=$(stat -c '%U:%G' "${repo_location}")
+ ;;
+ esac
+ fi
+
+ if type -P tarsync > /dev/null ; then
+ local chown_opts="-o ${ownership%:*} -g ${ownership#*:}"
+ chown ${ownership} "${repo_location}" > /dev/null 2>&1 || chown_opts=""
+ if ! tarsync $(__vecho -v) -s 1 ${chown_opts} \
+ -e /distfiles -e /packages -e /local "${file}" "${repo_location}"; then
+ eecho "tarsync failed; tarball is corrupt? (${file})"
+ return 1
+ fi
+ else
+ if ! do_tar "${file}" xf - -C "${TMPDIR}" ; then
+ eecho "tar failed to extract the image. tarball is corrupt? (${file})"
+ rm -fr "${TMPDIR}"/portage
+ return 1
+ fi
+
+ local rsync_opts="${PORTAGE_RSYNC_OPTS} ${PORTAGE_RSYNC_EXTRA_OPTS}"
+ if chown ${ownership} "${TMPDIR}"/portage > /dev/null 2>&1; then
+ chown -R ${ownership} "${TMPDIR}"/portage
+ rsync_opts+=" --owner --group"
+ fi
+ cd "${TMPDIR}"/portage
+ rsync ${rsync_opts} . "${repo_location%%/}"
+ cd "${DISTDIR}"
+
+ __vecho "Cleaning up ..."
+ rm -fr "${TMPDIR}"
+ fi
+
+ if has metadata-transfer ${FEATURES} ; then
+ __vecho "Updating cache ..."
+ "${PORTAGE_BIN_PATH}/emerge" --metadata
+ fi
+ local post_sync=${PORTAGE_CONFIGROOT}etc/portage/bin/post_sync
+ [ -x "${post_sync}" ] && "${post_sync}"
+ # --quiet suppresses output if there are no relevant news items
+ has news ${FEATURES} && "${PORTAGE_BIN_PATH}/emerge" --check-news --quiet
+ return 0
+}
+
+do_snapshot() {
+ local ignore_timestamp="$1"
+ local date="$2"
+
+ local r=1
+
+ local base_file="portage-${date}.tar"
+
+ local have_files=0
+ local mirror
+
+ local compressions=""
+ type -P bzcat > /dev/null && compressions="${compressions} bz2"
+
+ if [[ -z ${compressions} ]] ; then
+ eecho "unable to locate any decompressors (xzcat or bzcat or zcat)"
+ exit 1
+ fi
+
+ for mirror in ${GENTOO_MIRRORS} ; do
+
+ mirror=${mirror%/}
+ __vecho "Trying to retrieve ${date} snapshot from ${mirror} ..."
+
+ for compression in ${compressions} ; do
+ local file="portage-${date}.tar.${compression}"
+ local digest="${file}.md5sum"
+ local signature="${file}.gpgsig"
+
+ if [ -s "${DISTDIR}/${file}" -a -s "${DISTDIR}/${digest}" -a -s "${DISTDIR}/${signature}" ] ; then
+ check_file_digest "${DISTDIR}/${digest}" "${DISTDIR}/${file}" && \
+ check_file_signature "${DISTDIR}/${signature}" "${DISTDIR}/${file}" && \
+ have_files=1
+ fi
+
+ if [ ${have_files} -eq 0 ] ; then
+ fetch_file "${mirror}/snapshots/${digest}" "${digest}" && \
+ fetch_file "${mirror}/snapshots/${signature}" "${signature}" && \
+ fetch_file "${mirror}/snapshots/${file}" "${file}" && \
+ check_file_digest "${DISTDIR}/${digest}" "${DISTDIR}/${file}" && \
+ check_file_signature "${DISTDIR}/${signature}" "${DISTDIR}/${file}" && \
+ have_files=1
+ fi
+
+ #
+ # If timestamp is invalid
+ # we want to try and retrieve
+ # from a different mirror
+ #
+ if [ ${have_files} -eq 1 ]; then
+
+ __vecho "Getting snapshot timestamp ..."
+ local snapshot_timestamp=$(get_snapshot_timestamp "${DISTDIR}/${file}")
+
+ if [ ${ignore_timestamp} == 0 ]; then
+ if [ ${snapshot_timestamp} -lt $(get_portage_timestamp) ]; then
+ wecho "portage is newer than snapshot"
+ have_files=0
+ fi
+ else
+ local utc_seconds=$(get_utc_second_from_string "${date}")
+
+ #
+ # Check that this snapshot
+ # is what it claims to be ...
+ #
+ if [ ${snapshot_timestamp} -lt ${utc_seconds} ] || \
+ [ ${snapshot_timestamp} -gt $((${utc_seconds}+ 2*86400)) ]; then
+
+ wecho "snapshot timestamp is not in acceptable period"
+ have_files=0
+ fi
+ fi
+ fi
+
+ if [ ${have_files} -eq 1 ]; then
+ break
+ else
+ #
+ # Remove files and use a different mirror
+ #
+ rm -f "${DISTDIR}/${file}" "${DISTDIR}/${digest}" "${DISTDIR}/${signature}"
+ fi
+ done
+
+ [ ${have_files} -eq 1 ] && break
+ done
+
+ if [ ${have_files} -eq 1 ]; then
+ sync_local "${DISTDIR}/${file}" && r=0
+ else
+ __vecho "${date} snapshot was not found"
+ fi
+
+ return "${r}"
+}
+
+do_latest_snapshot() {
+ local attempts=0
+ local r=1
+
+ __vecho "Fetching most recent snapshot ..."
+
+ # The snapshot for a given day is generated at 00:45 UTC on the following
+ # day, so the current day's snapshot (going by UTC time) hasn't been
+ # generated yet. Therefore, always start by looking for the previous day's
+ # snapshot (for attempts=1, subtract 1 day from the current UTC time).
+
+ # Timestamps that differ by less than 2 hours
+ # are considered to be approximately equal.
+ local min_time_diff=$(( 2 * 60 * 60 ))
+
+ local existing_timestamp=$(get_portage_timestamp)
+ local timestamp_difference
+ local timestamp_problem
+ local approx_snapshot_time
+ local start_time=$(get_utc_date_in_seconds)
+ local start_hour=$(get_date_part ${start_time} "%H")
+
+ # Daily snapshots are created at 00:45 and are not
+ # available until after 01:00. Don't waste time trying
+ # to fetch a snapshot before it's been created.
+ if [ ${start_hour} -lt 1 ] ; then
+ (( start_time -= 86400 ))
+ fi
+ local snapshot_date=$(get_date_part ${start_time} "%Y%m%d")
+ local snapshot_date_seconds=$(get_utc_second_from_string ${snapshot_date})
+
+ while (( ${attempts} < 40 )) ; do
+ (( attempts++ ))
+ (( snapshot_date_seconds -= 86400 ))
+ # snapshots are created at 00:45
+ (( approx_snapshot_time = snapshot_date_seconds + 86400 + 2700 ))
+ (( timestamp_difference = existing_timestamp - approx_snapshot_time ))
+ [ ${timestamp_difference} -lt 0 ] && (( timestamp_difference = -1 * timestamp_difference ))
+ snapshot_date=$(get_date_part ${snapshot_date_seconds} "%Y%m%d")
+
+ timestamp_problem=""
+ if [ ${timestamp_difference} -eq 0 ]; then
+ timestamp_problem="is identical to"
+ elif [ ${timestamp_difference} -lt ${min_time_diff} ]; then
+ timestamp_problem="is possibly identical to"
+ elif [ ${approx_snapshot_time} -lt ${existing_timestamp} ] ; then
+ timestamp_problem="is newer than"
+ fi
+
+ if [ -n "${timestamp_problem}" ]; then
+ ewarn "Latest snapshot date: ${snapshot_date}"
+ ewarn
+ ewarn "Approximate snapshot timestamp: ${approx_snapshot_time}"
+ ewarn " Current local timestamp: ${existing_timestamp}"
+ ewarn
+ echo -e "The current local timestamp" \
+ "${timestamp_problem} the" \
+ "timestamp of the latest" \
+ "snapshot. In order to force sync," \
+ "use the --revert option or remove" \
+ "the timestamp file located at" \
+ "'${repo_location}/metadata/timestamp.x'." | fmt -w 70 | \
+ while read -r line ; do
+ ewarn "${line}"
+ done
+ r=0
+ break
+ fi
+
+ if do_snapshot 0 "${snapshot_date}"; then
+ r=0
+ break;
+ fi
+ done
+
+ return "${r}"
+}
+
+fetch_from_mirrors() {
+ local i URI FILE MIRRORS
+ if [[ "$#" == 3 ]]; then
+ MIRRORS="${3}"
+ else
+ MIRRORS=$GENTOO_MIRRORS
+ fi
+ FILE="$2"
+ for i in $MIRRORS ; do
+ URI="${i%/}/${1#/}"
+ fetch_file "${URI}" "${FILE}" && return 0
+ done
+ return 1
+}
+
+verify_md5_file() {
+ local FILE MD5_LOC
+ FILE="$1"
+ if [[ $# == 2 ]]; then
+ MD5_LOC="$2"
+ else
+ MD5_LOC="$(pwd)/$1.md5sum"
+ fi
+ check_file_digest "${MD5_LOC}" "${FILE}"
+}
+
+#--------------------
+#inline actual script
+#--------------------
+
+if ! type -p patcher &> /dev/null; then
+ echo "!!!"
+ echo "!!! cannot find patcher, did you emerge dev-util/diffball?"
+ echo "!!! lack of patcher == have to do full fetch"
+ echo "!!!"
+ sleep 10
+ if do_latest_snapshot; then
+ rm -fr "${TMPDIR}"
+ cleanse_state_dir
+ exit 0
+ fi
+ exit 1
+fi
+
+echo "Looking for available base versions for a delta"
+
+#note we're already in distdir
+
+unset base_version
+# portage-snapshots in reverse order.
+# icky.
+unset dfile
+potentials="$(ls -1 portage-2[[:digit:]][[:digit:]][[:digit:]][[:digit:]][[:digit:]][[:digit:]][[:digit:]].tar.bz2 ${STATE_DIR}/portage-2[[:digit:]][[:digit:]][[:digit:]][[:digit:]][[:digit:]][[:digit:]][[:digit:]].tar.bz2 2> /dev/null | sed -e 's:^.*/::' | sort -r)"
+for basef in ${potentials}; do
+ chksum=''
+ found="dar"
+ if [ -e "${STATE_DIR}/${basef}.md5sum" ]; then
+ chksum="${STATE_DIR}/${basef}.md5sum"
+ elif [ -e "${basef}.md5sum" ]; then
+ chksum="${DISTDIR}/${basef}.md5sum"
+ else
+ echo "attempting to get md5sum for $basef"
+ if ! fetch_from_mirrors "/snapshots/${basef}.md5sum" "${basef}.md5sum"; then
+ echo "can't get md5 for ${basef}"
+ continue
+ fi
+ chksum="${basef}.md5sum"
+ fi
+ if [ -e "${basef}" ]; then
+ dfile="${DISTDIR}/${basef}"
+ else
+ dfile="${STATE_DIR}/${basef}"
+ fi
+ if ! verify_md5_file "${dfile}" "${chksum}"; then
+ echo "found a stale snapshot. cleansing"
+ rm -f "${dfile}" &> /dev/null
+ rm -f "${chksum}.md5sum" &> /dev/null
+ dar=""
+ else
+ base_version="${basef}"
+ break
+ fi
+done
+
+#by this point, we either have a base_version, or we don't.
+if [[ -z ${base_version} ]]; then
+ echo "no base found. resorting to pulling a full version"
+ if do_latest_snapshot; then
+ rm -fr "${TMPDIR}"
+ cleanse_state_dir
+ exit 0
+ fi
+ exit 1
+fi
+
+#we have a md5 verified base. now we get the patch.
+
+base_date="${base_version%.tar.bz2}"
+base_date="${base_date#portage-}"
+# we now have yyyymmdd
+
+patches=''
+echo "fetching patches"
+fetched='asdf'
+while [[ -n ${fetched} ]]; do
+ next_day=$(increment_date ${base_date} +1)
+ # if we can't get a *single* patch or md5, even one missing, do full.
+ p="snapshot-${base_date}-${next_day}.patch.bz2"
+ if [[ ! -e ${p}.md5sum ]] && ! fetch_from_mirrors "/snapshots/deltas/${p}.md5sum" "${p}.md5sum"; then
+ echo "failed fetching ${p}.md5sum"
+ fetched=''
+ break
+ fi
+ fetch="yes"
+ if [[ -e ${p} ]]; then
+ if ! verify_md5_file "${p}"; then
+ rm -f "${p}" &> /dev/null
+ else
+ fetch=""
+ fi
+ fi
+ if [[ -n $fetch ]]; then
+ if ! fetch_from_mirrors "/snapshots/deltas/${p}" "${p}"; then
+ echo "failed fetching ${p}"
+ fetched=''
+ fi
+ fi
+ if [[ -z ${fetched} ]]; then
+ break
+ fi
+ if ! verify_md5_file "${p}"; then
+ echo "md5 failed on ${p}"
+ fetched=''
+ break
+ fi
+ patches="${patches} ${p}"
+ base_date="${next_day}"
+done
+final_date=${base_date}
+
+if [[ -z $patches ]]; then
+ echo "no patches found? up to date?"
+ if [[ -n $MUST_SYNC ]]; then
+ echo "syncing with existing file"
+ if [[ ${WEBSYNC_VERIFY_SIGNATURE} == 1 &&
+ ! -e ${DISTDIR}/portage-${base_date}.tar.bz2.gpgsig ]] && \
+ ! fetch_from_mirrors "/snapshots/portage-${base_date}.tar.bz2.gpgsig" "portage-${base_date}.tar.bz2.gpgsig" ; then
+ eecho "Couldn't fetch portage-${base_date}.tar.bz2.gpgsig"
+ exit 5
+ fi
+ if [[ ${WEBSYNC_VERIFY_SIGNATURE} == 1 ]] ; then
+ check_file_signature "${DISTDIR}/portage-${base_date}.tar.bz2.gpgsig" "${dfile}" || exit 1
+ fi
+ sync_local "${dfile}" && rm -fr "${TMPDIR}"
+ else
+ rm -fr "${TMPDIR}"
+ fi
+ exit $?
+fi
+
+unset got_umd5
+#grab the md5 for later usage.
+if [[ ! -e portage-${final_date}.tar.bz2.md5sum ]] && ! fetch_from_mirrors "/snapshots/portage-${final_date}.tar.bz2.md5sum" "portage-${final_date}.tar.bz2.md5sum"; then
+ echo "warning... couldn't grab the md5sum for ${final_date}. which is odd"
+ echo "thus, bailing (sorry)"
+ exit 5
+else
+ if [[ ! -e portage-${final_date}.tar.bz2.umd5sum ]] && ! fetch_from_mirrors "/snapshots/portage-${final_date}.tar.bz2.umd5sum" "portage-${final_date}.tar.bz2.umd5sum"; then
+ if ! fetch_from_mirrors "/snapshots/portage-${final_date}.tar.bz2.umd5sum" "portage-${final_date}.tar.bz2.umd5sum"; then
+ echo "couldn't grab umd5sum (uncompressed md5sum) for ${final_date}."
+ echo "can't compensate for bzip2 version differences iow."
+ else
+ got_umd5=1
+ fi
+ else
+ got_umd5=1
+ fi
+fi
+
+if [[ ${WEBSYNC_VERIFY_SIGNATURE} == 1 && ! -e portage-${final_date}.tar.bz2.gpgsig ]] && \
+ ! fetch_from_mirrors "/snapshots/portage-${final_date}.tar.bz2.gpgsig" "portage-${final_date}.tar.bz2.gpgsig" ; then
+ echo "warning... couldn't grab the gpgsig for ${final_date}. which is odd"
+ echo "thus, bailing (sorry)"
+ exit 5
+fi
+
+# got our patches.
+if ! patcher -v "${dfile}" ${patches} "${TMPDIR}/portage-${final_date}.tar"; then
+ echo "reconstruction failed (contact the author with the error from the reconstructor please)"
+ rm -f "${TMPDIR}/portage-${final_date}.tar"
+ if do_latest_snapshot; then
+ rm -fr "${TMPDIR}"
+ cleanse_state_dir
+ exit 0
+ fi
+ exit 1
+fi
+verified=0
+if [[ -n $got_umd5 ]]; then
+ echo "verifying uncompressed md5"
+ if ! verify_md5_file "${TMPDIR}/portage-${final_date}.tar" "${DISTDIR}/portage-${final_date}.tar.bz2.umd5sum"; then
+ echo "uncompressed verification failed. This means either you found a bug in diffball, or something odd is going on"
+ echo "with upstream patch generation"
+ echo "trying md5sum next, which probably will fail."
+ else
+ verified="1"
+ fi
+fi
+
+unset need_last_sync
+if [ "$verified" == "1" ]; then
+ need_last_sync="dar"
+ if [[ ${WEBSYNC_VERIFY_SIGNATURE} == 1 ]] ; then
+ # BUG: Signature verification will fail if the local bzip2
+ # program does not produce output that is perfectly identical
+ # to the bzip2 program used to compress the signed tar file.
+ echo "recompressing ..."
+ bzip2 -vk9 "${TMPDIR}/portage-${final_date}.tar"
+ check_file_signature "${DISTDIR}/portage-${final_date}.tar.bz2.gpgsig" "${TMPDIR}/portage-${final_date}.tar.bz2" || exit 1
+ else
+ echo "recompressing. (backgrounding)"
+ bzip2 -vk9 "${TMPDIR}/portage-${final_date}.tar" &
+ fi
+
+ echo "beginning update to the tree"
+ sync_local "${TMPDIR}/portage-${final_date}.tar"
+ echo "doing final md5 stuff"
+ wait
+ # bzip2 is finished now.
+ rm -f "${TMPDIR}/portage-${final_date}.tar"
+else
+ echo "recompressing."
+ bzip2 -v9 "${TMPDIR}/portage-${final_date}.tar.bz2"
+fi
+
+echo "verifying generated tarball"
+
+if ! verify_md5_file "${TMPDIR}/portage-${final_date}.tar.bz2" "${DISTDIR}/portage-${final_date}.tar.bz2.md5sum"; then
+ if [[ -z $verified ]]; then
+ echo "couldn't verify the generated tarball. bug, most likely."
+ exit 5
+ fi
+ # hokay. md5 doesn't agree with umd5. bzip2 issue in effect.
+ echo "compressed md5 differs, but uncompressed md5 says it right. bzip2 version incompatability in other words"
+ echo "saving the md5"
+ if type -p md5sum &> /dev/null; then
+ md5sum "${TMPDIR}/portage-${final_date}.tar.bz2" | sed -e "s:${TMPDIR}/\?::" > \
+ "${STATE_DIR}/portage-${final_date}.tar.bz2.md5sum"
+ elif type -p md5 &> /dev/null; then
+ echo "$(md5 -q "${TMPDIR}/portage-${final_date}.tar.bz2") portage-${final_date}.tar.bz2" > \
+ "${STATE_DIR}/portage-${final_date}.tar.bz2.md5sum"
+ else
+ echo "couldn't find either md5 or md5sum. something is screwed... (bailing, sorry)"
+ exit 7
+ fi
+ mv "${DISTDIR}/portage-${final_date}.tar.bz2.umd5sum" "${TMPDIR}/portage-${final_date}.tar.bz2" "${STATE_DIR}/"
+ dfile="${STATE_DIR}/portage-${final_date}.tar.bz2"
+else
+ dfile="${DISTDIR}/portage-${final_date}.tar.bz2"
+ mv "${TMPDIR}/portage-${final_date}.tar.bz2" "${DISTDIR}/"
+fi
+
+if [ -z "${need_last_sync}" ]; then
+ if [[ ${WEBSYNC_VERIFY_SIGNATURE} == 1 ]] ; then
+ check_file_signature "${DISTDIR}/portage-${final_date}.tar.bz2.gpgsig" "${dfile}" || exit 1
+ fi
+ echo "beginning update to the tree"
+ sync_local "${dfile}"
+fi
+
+for x in ${patches} ; do
+ rm -f "${DISTDIR}/${x}"{,.md5sum}
+done
+
+if [[ -z $KEEP_OLDIES ]]; then
+ echo "cleansing"
+ for x in $potentials; do
+ echo "removing ${x}"
+ rm -f "${DISTDIR}/${x}"{,.md5sum,.umd5sum,.gpgsig} &> /dev/null
+ rm -f "${STATE_DIR}/${x}"{,.md5sum,.umd5sum} &> /dev/null
+ done
+fi
+rm -rf "${TMPDIR}"
+echo "done."
+
diff --git a/mkrelease.sh b/mkrelease.sh
index 87bb4bf50..f9f75644e 100755
--- a/mkrelease.sh
+++ b/mkrelease.sh
@@ -1,4 +1,6 @@
#!/bin/bash
+# Copyright 2008-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
RELEASE_BUILDDIR=${RELEASE_BUILDDIR:-/var/tmp/portage-release}
SOURCE_DIR=${RELEASE_BUILDDIR}/checkout
@@ -6,21 +8,27 @@ BRANCH=${BRANCH:-master}
USE_TAG=false
CHANGELOG_REVISION=
UPLOAD_LOCATION=
+RUNTESTS=false
+USER=
+
+usage() {
+ echo "Usage: ${0##*/} [--changelog-rev <tree-ish>] [-t|--tag] [-u|--upload <location>] [--user <username>] [--runtests] <version>"
+ exit ${1:-0}
+}
die() {
- echo $@
- echo "Usage: ${0##*/} [--changelog-rev <tree-ish>] [-t|--tag] [-u|--upload <location>] <version>"
- exit 1
+ printf 'error: %s\n' "$*"
+ usage 1
}
-ARGS=$(getopt -o tu: --long changelog-rev:,tag,upload: \
- -n ${0##*/} -- "$@")
+ARGS=$(getopt -o htu: --long help,changelog-rev:,runtests,tag,upload:,user: \
+ -n "${0##*/}" -- "$@")
[ $? != 0 ] && die "initialization error"
eval set -- "${ARGS}"
while true; do
- case "$1" in
+ case $1 in
--changelog-rev)
CHANGELOG_REVISION=$2
shift 2
@@ -30,9 +38,20 @@ while true; do
shift
;;
-u|--upload)
- UPLOAD_LOCATION=${2}
+ UPLOAD_LOCATION=$2
shift 2
;;
+ --user)
+ USER=$2"@"
+ shift 2
+ ;;
+ -h|--help)
+ usage
+ ;;
+ --runtests)
+ RUNTESTS=true
+ shift
+ ;;
--)
shift
break
@@ -43,56 +62,62 @@ while true; do
esac
done
-[ -z "$1" ] && die "Need version argument"
-[ -n "${1/[0-9]*}" ] && die "Invalid version argument"
+[ $# != 1 ] && die "Need version argument"
+[[ -n ${1/[0-9]*} ]] && die "Invalid version argument"
-VERSION=${1}
+VERSION=$1
RELEASE=portage-${VERSION}
RELEASE_DIR=${RELEASE_BUILDDIR}/${RELEASE}
RELEASE_TARBALL="${RELEASE_BUILDDIR}/${RELEASE}.tar.bz2"
-TREE_ISH=$BRANCH
-if [[ $USE_TAG = true ]] ; then
- TREE_ISH=v$VERSION
+TREE_ISH=${BRANCH}
+if [[ ${USE_TAG} == "true" ]] ; then
+ TREE_ISH="v${VERSION}"
fi
echo ">>> Cleaning working directories ${RELEASE_DIR} ${SOURCE_DIR}"
rm -rf "${RELEASE_DIR}" "${SOURCE_DIR}" || die "directory cleanup failed"
mkdir -p "${RELEASE_DIR}" || die "directory creation failed"
-mkdir -p "$SOURCE_DIR" || die "mkdir failed"
+mkdir -p "${SOURCE_DIR}" || die "mkdir failed"
echo ">>> Starting GIT archive"
-git archive --format=tar $TREE_ISH | \
- tar -xf - -C "$SOURCE_DIR" || die "git archive failed"
+git archive --format=tar ${TREE_ISH} | \
+ tar -xf - -C "${SOURCE_DIR}" || die "git archive failed"
echo ">>> Building release tree"
-cp -a "${SOURCE_DIR}/"{bin,cnf,doc,man,pym} "${RELEASE_DIR}/" || die "directory copy failed"
-cp "${SOURCE_DIR}/"{DEVELOPING,LICENSE,Makefile,NEWS,RELEASE-NOTES,TEST-NOTES} \
+cp -a "${SOURCE_DIR}/"{bin,cnf,doc,man,misc,pym} "${RELEASE_DIR}/" || die "directory copy failed"
+cp "${SOURCE_DIR}/"{.portage_not_installed,DEVELOPING,LICENSE,Makefile,NEWS,README,RELEASE-NOTES,TEST-NOTES} \
"${RELEASE_DIR}/" || die "file copy failed"
-rm -rf "$SOURCE_DIR" || die "directory cleanup failed"
+if [[ ${RUNTESTS} == "true" ]] ; then
+ pushd "${SOURCE_DIR}" >/dev/null
+ ./runtests.sh --python-versions=supported || die "tests failed"
+ popd >/dev/null
+fi
+
+rm -rf "${SOURCE_DIR}" || die "directory cleanup failed"
echo ">>> Setting portage.VERSION"
-sed -e "s/^VERSION=.*/VERSION=\"${VERSION}\"/" \
+sed -e "s/^VERSION = .*/VERSION = \"${VERSION}\"/" \
-i "${RELEASE_DIR}/pym/portage/__init__.py" || \
die "Failed to patch portage.VERSION"
echo ">>> Creating Changelog"
git_log_opts=""
-if [ -n "$CHANGELOG_REVISION" ] ; then
- git_log_opts+=" $CHANGELOG_REVISION^..$TREE_ISH"
+if [[ -n ${CHANGELOG_REVISION} ]] ; then
+ git_log_opts+=" ${CHANGELOG_REVISION}^..${TREE_ISH}"
else
- git_log_opts+=" $TREE_ISH"
+ git_log_opts+=" ${TREE_ISH}"
fi
skip_next=false
-git log $git_log_opts | fmt -w 80 -p " " | while read -r ; do
- if [[ $skip_next = true ]] ; then
+git log ${git_log_opts} | fmt -w 80 -p " " | while read -r ; do
+ if [[ ${skip_next} == "true" ]] ; then
skip_next=false
- elif [[ $REPLY = " svn path="* ]] ; then
+ elif [[ ${REPLY} == " svn path="* ]] ; then
skip_next=true
else
- echo "$REPLY"
+ echo "${REPLY}"
fi
-done > "$RELEASE_DIR/ChangeLog" || die "ChangeLog creation failed"
+done > "${RELEASE_DIR}/ChangeLog" || die "ChangeLog creation failed"
cd "${RELEASE_BUILDDIR}"
@@ -101,16 +126,16 @@ tar --owner portage --group portage -cjf "${RELEASE_TARBALL}" "${RELEASE}" || \
die "tarball creation failed"
DISTDIR=$(portageq distdir)
-if [ -n "${DISTDIR}" -a -d "${DISTDIR}" -a -w "${DISTDIR}" ]; then
+if [[ -n ${DISTDIR} && -d ${DISTDIR} && -w ${DISTDIR} ]] ; then
echo ">>> Copying release tarball into ${DISTDIR}"
cp "${RELEASE_TARBALL}" "${DISTDIR}"/ || echo "!!! tarball copy failed"
fi
-if [ -n "${UPLOAD_LOCATION}" ]; then
- echo ">>> Uploading ${RELEASE_TARBALL} to ${UPLOAD_LOCATION}"
- scp "${RELEASE_TARBALL}" "dev.gentoo.org:${UPLOAD_LOCATION}" || die "upload failed"
+if [[ -n ${UPLOAD_LOCATION} ]] ; then
+ echo ">>> Uploading ${RELEASE_TARBALL} to ${USER}dev.gentoo.org:${UPLOAD_LOCATION}"
+ scp "${RELEASE_TARBALL}" "${USER}dev.gentoo.org:${UPLOAD_LOCATION}" || die "upload failed"
else
- echo "${RELEASE_TARBALL} created"
+ du -h "${RELEASE_TARBALL}"
fi
exit 0
diff --git a/pym/_emerge/AbstractDepPriority.py b/pym/_emerge/AbstractDepPriority.py
index 94f26efc5..1fcd04345 100644
--- a/pym/_emerge/AbstractDepPriority.py
+++ b/pym/_emerge/AbstractDepPriority.py
@@ -1,11 +1,12 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import copy
from portage.util.SlotObject import SlotObject
class AbstractDepPriority(SlotObject):
- __slots__ = ("buildtime", "runtime", "runtime_post")
+ __slots__ = ("buildtime", "buildtime_slot_op",
+ "runtime", "runtime_post", "runtime_slot_op")
def __lt__(self, other):
return self.__int__() < other
diff --git a/pym/_emerge/AbstractEbuildProcess.py b/pym/_emerge/AbstractEbuildProcess.py
index c7b8f83ca..31127f474 100644
--- a/pym/_emerge/AbstractEbuildProcess.py
+++ b/pym/_emerge/AbstractEbuildProcess.py
@@ -1,8 +1,10 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import io
+import platform
import stat
+import subprocess
import textwrap
from _emerge.SpawnProcess import SpawnProcess
from _emerge.EbuildBuildDir import EbuildBuildDir
@@ -20,8 +22,10 @@ class AbstractEbuildProcess(SpawnProcess):
__slots__ = ('phase', 'settings',) + \
('_build_dir', '_ipc_daemon', '_exit_command', '_exit_timeout_id')
+
_phases_without_builddir = ('clean', 'cleanrm', 'depend', 'help',)
_phases_interactive_whitelist = ('config',)
+ _phases_without_cgroup = ('preinst', 'postinst', 'prerm', 'postrm', 'config')
# Number of milliseconds to allow natural exit of the ebuild
# process after it has called the exit command via IPC. It
@@ -52,13 +56,48 @@ class AbstractEbuildProcess(SpawnProcess):
if need_builddir and \
not os.path.isdir(self.settings['PORTAGE_BUILDDIR']):
msg = _("The ebuild phase '%s' has been aborted "
- "since PORTAGE_BUILDIR does not exist: '%s'") % \
+ "since PORTAGE_BUILDDIR does not exist: '%s'") % \
(self.phase, self.settings['PORTAGE_BUILDDIR'])
self._eerror(textwrap.wrap(msg, 72))
self._set_returncode((self.pid, 1 << 8))
- self.wait()
+ self._async_wait()
return
+ # Check if the cgroup hierarchy is in place. If it's not, mount it.
+ if (os.geteuid() == 0 and platform.system() == 'Linux'
+ and 'cgroup' in self.settings.features
+ and self.phase not in self._phases_without_cgroup):
+ cgroup_root = '/sys/fs/cgroup'
+ cgroup_portage = os.path.join(cgroup_root, 'portage')
+ cgroup_path = os.path.join(cgroup_portage,
+ '%s:%s' % (self.settings["CATEGORY"],
+ self.settings["PF"]))
+ try:
+ # cgroup tmpfs
+ if not os.path.ismount(cgroup_root):
+ # we expect /sys/fs to be there already
+ if not os.path.isdir(cgroup_root):
+ os.mkdir(cgroup_root, 0o755)
+ subprocess.check_call(['mount', '-t', 'tmpfs',
+ '-o', 'rw,nosuid,nodev,noexec,mode=0755',
+ 'tmpfs', cgroup_root])
+
+ # portage subsystem
+ if not os.path.ismount(cgroup_portage):
+ if not os.path.isdir(cgroup_portage):
+ os.mkdir(cgroup_portage, 0o755)
+ subprocess.check_call(['mount', '-t', 'cgroup',
+ '-o', 'rw,nosuid,nodev,noexec,none,name=portage',
+ 'tmpfs', cgroup_portage])
+
+ # the ebuild cgroup
+ if not os.path.isdir(cgroup_path):
+ os.mkdir(cgroup_path)
+ except (subprocess.CalledProcessError, OSError):
+ pass
+ else:
+ self.cgroup = cgroup_path
+
if self.background:
# Automatically prevent color codes from showing up in logs,
# since we're not displaying to a terminal anyway.
@@ -67,7 +106,7 @@ class AbstractEbuildProcess(SpawnProcess):
if self._enable_ipc_daemon:
self.settings.pop('PORTAGE_EBUILD_EXIT_FILE', None)
if self.phase not in self._phases_without_builddir:
- if 'PORTAGE_BUILDIR_LOCKED' not in self.settings:
+ if 'PORTAGE_BUILDDIR_LOCKED' not in self.settings:
self._build_dir = EbuildBuildDir(
scheduler=self.scheduler, settings=self.settings)
self._build_dir.lock()
@@ -143,9 +182,14 @@ class AbstractEbuildProcess(SpawnProcess):
self._exit_command.reply_hook = self._exit_command_callback
query_command = QueryCommand(self.settings, self.phase)
commands = {
- 'best_version' : query_command,
- 'exit' : self._exit_command,
- 'has_version' : query_command,
+ 'available_eclasses' : query_command,
+ 'best_version' : query_command,
+ 'eclass_path' : query_command,
+ 'exit' : self._exit_command,
+ 'has_version' : query_command,
+ 'license_path' : query_command,
+ 'master_repositories' : query_command,
+ 'repository_path' : query_command,
}
input_fifo, output_fifo = self._init_ipc_fifos()
self._ipc_daemon = EbuildIpcDaemon(commands=commands,
diff --git a/pym/_emerge/AbstractPollTask.py b/pym/_emerge/AbstractPollTask.py
index 2c8470925..3f6dd6cef 100644
--- a/pym/_emerge/AbstractPollTask.py
+++ b/pym/_emerge/AbstractPollTask.py
@@ -151,4 +151,4 @@ class AbstractPollTask(AsynchronousTask):
while self._registered and not timeout_cb.timed_out:
self.scheduler.iteration()
finally:
- self.scheduler.unregister(timeout_cb.timeout_id)
+ self.scheduler.source_remove(timeout_cb.timeout_id)
diff --git a/pym/_emerge/AsynchronousLock.py b/pym/_emerge/AsynchronousLock.py
index 587aa4650..c0b9b26dc 100644
--- a/pym/_emerge/AsynchronousLock.py
+++ b/pym/_emerge/AsynchronousLock.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import dummy_threading
@@ -49,7 +49,7 @@ class AsynchronousLock(AsynchronousTask):
pass
else:
self.returncode = os.EX_OK
- self.wait()
+ self._async_wait()
return
if self._force_process or \
@@ -105,44 +105,27 @@ class _LockThread(AbstractPollTask):
"""
__slots__ = ('path',) + \
- ('_files', '_force_dummy', '_lock_obj',
- '_thread', '_reg_id',)
+ ('_force_dummy', '_lock_obj', '_thread',)
def _start(self):
- pr, pw = os.pipe()
- self._files = {}
- self._files['pipe_read'] = pr
- self._files['pipe_write'] = pw
- for f in self._files.values():
- fcntl.fcntl(f, fcntl.F_SETFL,
- fcntl.fcntl(f, fcntl.F_GETFL) | os.O_NONBLOCK)
- self._reg_id = self.scheduler.register(self._files['pipe_read'],
- self.scheduler.IO_IN, self._output_handler)
self._registered = True
threading_mod = threading
if self._force_dummy:
threading_mod = dummy_threading
self._thread = threading_mod.Thread(target=self._run_lock)
+ self._thread.daemon = True
self._thread.start()
def _run_lock(self):
self._lock_obj = lockfile(self.path, wantnewlockfile=True)
- os.write(self._files['pipe_write'], b'\0')
-
- def _output_handler(self, f, event):
- buf = None
- if event & self.scheduler.IO_IN:
- try:
- buf = os.read(self._files['pipe_read'], self._bufsize)
- except OSError as e:
- if e.errno not in (errno.EAGAIN,):
- raise
- if buf:
- self._unregister()
- self.returncode = os.EX_OK
- self.wait()
+ # Thread-safe callback to EventLoop
+ self.scheduler.idle_add(self._run_lock_cb)
- return True
+ def _run_lock_cb(self):
+ self._unregister()
+ self.returncode = os.EX_OK
+ self.wait()
+ return False
def _cancel(self):
# There's currently no way to force thread termination.
@@ -163,15 +146,6 @@ class _LockThread(AbstractPollTask):
self._thread.join()
self._thread = None
- if self._reg_id is not None:
- self.scheduler.unregister(self._reg_id)
- self._reg_id = None
-
- if self._files is not None:
- for f in self._files.values():
- os.close(f)
- self._files = None
-
class _LockProcess(AbstractPollTask):
"""
This uses the portage.locks module to acquire a lock asynchronously,
@@ -190,16 +164,28 @@ class _LockProcess(AbstractPollTask):
self._files = {}
self._files['pipe_in'] = in_pr
self._files['pipe_out'] = out_pw
+
fcntl.fcntl(in_pr, fcntl.F_SETFL,
fcntl.fcntl(in_pr, fcntl.F_GETFL) | os.O_NONBLOCK)
- self._reg_id = self.scheduler.register(in_pr,
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(in_pr, fcntl.F_SETFD,
+ fcntl.fcntl(in_pr, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ self._reg_id = self.scheduler.io_add_watch(in_pr,
self.scheduler.IO_IN, self._output_handler)
self._registered = True
self._proc = SpawnProcess(
args=[portage._python_interpreter,
os.path.join(portage._bin_path, 'lock-helper.py'), self.path],
env=dict(os.environ, PORTAGE_PYM_PATH=portage._pym_path),
- fd_pipes={0:out_pr, 1:in_pw, 2:sys.stderr.fileno()},
+ fd_pipes={0:out_pr, 1:in_pw, 2:sys.__stderr__.fileno()},
scheduler=self.scheduler)
self._proc.addExitListener(self._proc_exit)
self._proc.start()
@@ -273,7 +259,7 @@ class _LockProcess(AbstractPollTask):
self._registered = False
if self._reg_id is not None:
- self.scheduler.unregister(self._reg_id)
+ self.scheduler.source_remove(self._reg_id)
self._reg_id = None
if self._files is not None:
diff --git a/pym/_emerge/AsynchronousTask.py b/pym/_emerge/AsynchronousTask.py
index 7a193ce7d..da58261db 100644
--- a/pym/_emerge/AsynchronousTask.py
+++ b/pym/_emerge/AsynchronousTask.py
@@ -60,6 +60,20 @@ class AsynchronousTask(SlotObject):
def _wait(self):
return self.returncode
+ def _async_wait(self):
+ """
+ For cases where _start exits synchronously, this method is a
+ convenient way to trigger an asynchronous call to self.wait()
+ (in order to notify exit listeners), avoiding excessive event
+ loop recursion (or stack overflow) that synchronous calling of
+ exit listeners can cause. This method is thread-safe.
+ """
+ self.scheduler.idle_add(self._async_wait_cb)
+
+ def _async_wait_cb(self):
+ self.wait()
+ return False
+
def cancel(self):
"""
Cancel the task, but do not wait for exit status. If asynchronous exit
diff --git a/pym/_emerge/Binpkg.py b/pym/_emerge/Binpkg.py
index ea8a1ad13..a740efdb9 100644
--- a/pym/_emerge/Binpkg.py
+++ b/pym/_emerge/Binpkg.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.EbuildPhase import EbuildPhase
@@ -298,6 +298,7 @@ class Binpkg(CompositeTask):
extractor = BinpkgExtractorAsync(background=self.background,
env=self.settings.environ(),
+ features=self.settings.features,
image_dir=self._image_dir,
pkg=self.pkg, pkg_path=self._pkg_path,
logfile=self.settings.get("PORTAGE_LOG_FILE"),
@@ -328,11 +329,13 @@ class Binpkg(CompositeTask):
self.wait()
return
+ env = self.settings.environ()
+ env["PYTHONPATH"] = self.settings["PORTAGE_PYTHONPATH"]
chpathtool = SpawnProcess(
args=[portage._python_interpreter,
os.path.join(self.settings["PORTAGE_BIN_PATH"], "chpathtool.py"),
self.settings["D"], self._build_prefix, self.settings["EPREFIX"]],
- background=self.background, env=self.settings.environ(),
+ background=self.background, env=env,
scheduler=self.scheduler,
logfile=self.settings.get('PORTAGE_LOG_FILE'))
self._writemsg_level(">>> Adjusting Prefix to %s\n" % self.settings["EPREFIX"])
diff --git a/pym/_emerge/BinpkgExtractorAsync.py b/pym/_emerge/BinpkgExtractorAsync.py
index f25cbf933..be74c2fb7 100644
--- a/pym/_emerge/BinpkgExtractorAsync.py
+++ b/pym/_emerge/BinpkgExtractorAsync.py
@@ -1,23 +1,31 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.SpawnProcess import SpawnProcess
import portage
import signal
+import subprocess
class BinpkgExtractorAsync(SpawnProcess):
- __slots__ = ("image_dir", "pkg", "pkg_path")
+ __slots__ = ("features", "image_dir", "pkg", "pkg_path")
_shell_binary = portage.const.BASH_BINARY
def _start(self):
+ tar_options = ""
+ if "xattr" in self.features:
+ process = subprocess.Popen(["tar", "--help"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ output = process.communicate()[0]
+ if b"--xattrs" in output:
+ tar_options = "--xattrs"
+
# Add -q to bzip2 opts, in order to avoid "trailing garbage after
# EOF ignored" warning messages due to xpak trailer.
# SIGPIPE handling (128 + SIGPIPE) should be compatible with
# assert_sigpipe_ok() that's used by the ebuild unpack() helper.
self.args = [self._shell_binary, "-c",
- ("${PORTAGE_BUNZIP2_COMMAND:-${PORTAGE_BZIP2_COMMAND} -d} -cq -- %s | tar -xp -C %s -f - ; " + \
+ ("${PORTAGE_BUNZIP2_COMMAND:-${PORTAGE_BZIP2_COMMAND} -d} -cq -- %s | tar -xp %s -C %s -f - ; " + \
"p=(${PIPESTATUS[@]}) ; " + \
"if [[ ${p[0]} != 0 && ${p[0]} != %d ]] ; then " % (128 + signal.SIGPIPE) + \
"echo bzip2 failed with status ${p[0]} ; exit ${p[0]} ; fi ; " + \
@@ -25,6 +33,7 @@ class BinpkgExtractorAsync(SpawnProcess):
"echo tar failed with status ${p[1]} ; exit ${p[1]} ; fi ; " + \
"exit 0 ;") % \
(portage._shell_quote(self.pkg_path),
+ tar_options,
portage._shell_quote(self.image_dir))]
SpawnProcess._start(self)
diff --git a/pym/_emerge/BinpkgFetcher.py b/pym/_emerge/BinpkgFetcher.py
index f415e2ec7..543881ee6 100644
--- a/pym/_emerge/BinpkgFetcher.py
+++ b/pym/_emerge/BinpkgFetcher.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.AsynchronousLock import AsynchronousLock
@@ -63,7 +63,7 @@ class BinpkgFetcher(SpawnProcess):
if pretend:
portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
self._set_returncode((self.pid, os.EX_OK << 8))
- self.wait()
+ self._async_wait()
return
protocol = urllib_parse_urlparse(uri)[0]
@@ -80,6 +80,12 @@ class BinpkgFetcher(SpawnProcess):
"FILE" : os.path.basename(pkg_path)
}
+ for k in ("PORTAGE_SSH_OPTS",):
+ try:
+ fcmd_vars[k] = settings[k]
+ except KeyError:
+ pass
+
fetch_env = dict(settings.items())
fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
for x in portage.util.shlex_split(fcmd)]
@@ -91,9 +97,9 @@ class BinpkgFetcher(SpawnProcess):
# Redirect all output to stdout since some fetchers like
# wget pollute stderr (if portage detects a problem then it
# can send it's own message to stderr).
- fd_pipes.setdefault(0, sys.stdin.fileno())
- fd_pipes.setdefault(1, sys.stdout.fileno())
- fd_pipes.setdefault(2, sys.stdout.fileno())
+ fd_pipes.setdefault(0, portage._get_stdin().fileno())
+ fd_pipes.setdefault(1, sys.__stdout__.fileno())
+ fd_pipes.setdefault(2, sys.__stdout__.fileno())
self.args = fetch_args
self.env = fetch_env
@@ -104,7 +110,7 @@ class BinpkgFetcher(SpawnProcess):
def _pipe(self, fd_pipes):
"""When appropriate, use a pty so that fetcher progress bars,
like wget has, will work properly."""
- if self.background or not sys.stdout.isatty():
+ if self.background or not sys.__stdout__.isatty():
# When the output only goes to a log file,
# there's no point in creating a pty.
return os.pipe()
diff --git a/pym/_emerge/BinpkgVerifier.py b/pym/_emerge/BinpkgVerifier.py
index 0052967f6..2c6979265 100644
--- a/pym/_emerge/BinpkgVerifier.py
+++ b/pym/_emerge/BinpkgVerifier.py
@@ -1,75 +1,120 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-from _emerge.AsynchronousTask import AsynchronousTask
-from portage.util import writemsg
+import errno
import io
import sys
+
+from _emerge.CompositeTask import CompositeTask
import portage
from portage import os
+from portage.checksum import (_apply_hash_filter,
+ _filter_unaccelarated_hashes, _hash_filter)
+from portage.output import EOutput
+from portage.util._async.FileDigester import FileDigester
from portage.package.ebuild.fetch import _checksum_failure_temp_file
-class BinpkgVerifier(AsynchronousTask):
- __slots__ = ("logfile", "pkg", "scheduler")
+class BinpkgVerifier(CompositeTask):
+ __slots__ = ("logfile", "pkg", "_digests", "_pkg_path")
def _start(self):
- """
- Note: Unlike a normal AsynchronousTask.start() method,
- this one does all work is synchronously. The returncode
- attribute will be set before it returns.
- """
-
- pkg = self.pkg
- root_config = pkg.root_config
- bintree = root_config.trees["bintree"]
- rval = os.EX_OK
+
+ bintree = self.pkg.root_config.trees["bintree"]
+ digests = bintree._get_digests(self.pkg)
+ if "size" not in digests:
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return
+
+ digests = _filter_unaccelarated_hashes(digests)
+ hash_filter = _hash_filter(
+ bintree.settings.get("PORTAGE_CHECKSUM_FILTER", ""))
+ if not hash_filter.transparent:
+ digests = _apply_hash_filter(digests, hash_filter)
+
+ self._digests = digests
+ self._pkg_path = bintree.getname(self.pkg.cpv)
+
+ try:
+ size = os.stat(self._pkg_path).st_size
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ self.scheduler.output(("!!! Fetching Binary failed "
+ "for '%s'\n") % self.pkg.cpv, log_path=self.logfile,
+ background=self.background)
+ self.returncode = 1
+ self._async_wait()
+ return
+ else:
+ if size != digests["size"]:
+ self._digest_exception("size", size, digests["size"])
+ self.returncode = 1
+ self._async_wait()
+ return
+
+ self._start_task(FileDigester(file_path=self._pkg_path,
+ hash_names=(k for k in digests if k != "size"),
+ background=self.background, logfile=self.logfile,
+ scheduler=self.scheduler),
+ self._digester_exit)
+
+ def _digester_exit(self, digester):
+
+ if self._default_exit(digester) != os.EX_OK:
+ self.wait()
+ return
+
+ for hash_name in digester.hash_names:
+ if digester.digests[hash_name] != self._digests[hash_name]:
+ self._digest_exception(hash_name,
+ digester.digests[hash_name], self._digests[hash_name])
+ self.returncode = 1
+ self.wait()
+ return
+
+ if self.pkg.root_config.settings.get("PORTAGE_QUIET") != "1":
+ self._display_success()
+
+ self.returncode = os.EX_OK
+ self.wait()
+
+ def _display_success(self):
stdout_orig = sys.stdout
stderr_orig = sys.stderr
global_havecolor = portage.output.havecolor
out = io.StringIO()
- file_exists = True
try:
sys.stdout = out
sys.stderr = out
if portage.output.havecolor:
portage.output.havecolor = not self.background
- try:
- bintree.digestCheck(pkg)
- except portage.exception.FileNotFound:
- writemsg("!!! Fetching Binary failed " + \
- "for '%s'\n" % pkg.cpv, noiselevel=-1)
- rval = 1
- file_exists = False
- except portage.exception.DigestException as e:
- writemsg("\n!!! Digest verification failed:\n",
- noiselevel=-1)
- writemsg("!!! %s\n" % e.value[0],
- noiselevel=-1)
- writemsg("!!! Reason: %s\n" % e.value[1],
- noiselevel=-1)
- writemsg("!!! Got: %s\n" % e.value[2],
- noiselevel=-1)
- writemsg("!!! Expected: %s\n" % e.value[3],
- noiselevel=-1)
- rval = 1
- if rval == os.EX_OK:
- pass
- elif file_exists:
- pkg_path = bintree.getname(pkg.cpv)
- head, tail = os.path.split(pkg_path)
- temp_filename = _checksum_failure_temp_file(head, tail)
- writemsg("File renamed to '%s'\n" % (temp_filename,),
- noiselevel=-1)
+
+ eout = EOutput()
+ eout.ebegin("%s %s ;-)" % (os.path.basename(self._pkg_path),
+ " ".join(sorted(self._digests))))
+ eout.eend(0)
+
finally:
sys.stdout = stdout_orig
sys.stderr = stderr_orig
portage.output.havecolor = global_havecolor
- msg = out.getvalue()
- if msg:
- self.scheduler.output(msg, log_path=self.logfile,
- background=self.background)
+ self.scheduler.output(out.getvalue(), log_path=self.logfile,
+ background=self.background)
- self.returncode = rval
- self.wait()
+ def _digest_exception(self, name, value, expected):
+
+ head, tail = os.path.split(self._pkg_path)
+ temp_filename = _checksum_failure_temp_file(head, tail)
+ self.scheduler.output((
+ "\n!!! Digest verification failed:\n"
+ "!!! %s\n"
+ "!!! Reason: Failed on %s verification\n"
+ "!!! Got: %s\n"
+ "!!! Expected: %s\n"
+ "File renamed to '%s'\n") %
+ (self._pkg_path, name, value, expected, temp_filename),
+ log_path=self.logfile,
+ background=self.background)
diff --git a/pym/_emerge/BlockerCache.py b/pym/_emerge/BlockerCache.py
index fce81f83a..53342d6d6 100644
--- a/pym/_emerge/BlockerCache.py
+++ b/pym/_emerge/BlockerCache.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import errno
@@ -62,7 +62,9 @@ class BlockerCache(portage.cache.mappings.MutableMapping):
self._cache_data = mypickle.load()
f.close()
del f
- except (AttributeError, EOFError, EnvironmentError, ValueError, pickle.UnpicklingError) as e:
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except Exception as e:
if isinstance(e, EnvironmentError) and \
getattr(e, 'errno', None) in (errno.ENOENT, errno.EACCES):
pass
@@ -126,9 +128,9 @@ class BlockerCache(portage.cache.mappings.MutableMapping):
self._modified.clear()
def flush(self):
- """If the current user has permission and the internal blocker cache
+ """If the current user has permission and the internal blocker cache has
been updated, save it to disk and mark it unmodified. This is called
- by emerge after it has proccessed blockers for all installed packages.
+ by emerge after it has processed blockers for all installed packages.
Currently, the cache is only written if the user has superuser
privileges (since that's required to obtain a lock), but all users
have read access and benefit from faster blocker lookups (as long as
diff --git a/pym/_emerge/BlockerDB.py b/pym/_emerge/BlockerDB.py
index 459affdb0..8bb8f5fda 100644
--- a/pym/_emerge/BlockerDB.py
+++ b/pym/_emerge/BlockerDB.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import sys
@@ -9,6 +9,7 @@ from portage import digraph
from portage._sets.base import InternalPackageSet
from _emerge.BlockerCache import BlockerCache
+from _emerge.Package import Package
from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
if sys.hexversion >= 0x3000000:
@@ -38,7 +39,7 @@ class BlockerDB(object):
"""
blocker_cache = BlockerCache(None,
self._vartree.dbapi)
- dep_keys = ["RDEPEND", "PDEPEND"]
+ dep_keys = Package._runtime_keys
settings = self._vartree.settings
stale_cache = set(blocker_cache)
fake_vartree = self._fake_vartree
@@ -50,7 +51,7 @@ class BlockerDB(object):
stale_cache.discard(inst_pkg.cpv)
cached_blockers = blocker_cache.get(inst_pkg.cpv)
if cached_blockers is not None and \
- cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
+ cached_blockers.counter != inst_pkg.counter:
cached_blockers = None
if cached_blockers is not None:
blocker_atoms = cached_blockers.atoms
@@ -71,9 +72,8 @@ class BlockerDB(object):
blocker_atoms = [atom for atom in atoms \
if atom.startswith("!")]
blocker_atoms.sort()
- counter = long(inst_pkg.metadata["COUNTER"])
blocker_cache[inst_pkg.cpv] = \
- blocker_cache.BlockerData(counter, blocker_atoms)
+ blocker_cache.BlockerData(inst_pkg.counter, blocker_atoms)
for cpv in stale_cache:
del blocker_cache[cpv]
blocker_cache.flush()
@@ -92,7 +92,7 @@ class BlockerDB(object):
blocking_pkgs.update(blocker_parents.parent_nodes(atom))
# Check for blockers in the other direction.
- depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
+ depstr = " ".join(new_pkg._metadata[k] for k in dep_keys)
success, atoms = portage.dep_check(depstr,
vardb, settings, myuse=new_pkg.use.enabled,
trees=dep_check_trees, myroot=new_pkg.root)
diff --git a/pym/_emerge/CompositeTask.py b/pym/_emerge/CompositeTask.py
index 3e434780b..40cf8596b 100644
--- a/pym/_emerge/CompositeTask.py
+++ b/pym/_emerge/CompositeTask.py
@@ -142,6 +142,10 @@ class CompositeTask(AsynchronousTask):
a task.
"""
+ try:
+ task.scheduler = self.scheduler
+ except AttributeError:
+ pass
task.addExitListener(exit_handler)
self._current_task = task
task.start()
diff --git a/pym/_emerge/DepPriority.py b/pym/_emerge/DepPriority.py
index 3c2256a8e..34fdb481c 100644
--- a/pym/_emerge/DepPriority.py
+++ b/pym/_emerge/DepPriority.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.AbstractDepPriority import AbstractDepPriority
@@ -16,31 +16,38 @@ class DepPriority(AbstractDepPriority):
Attributes Hardness
- buildtime 0
- runtime -1
- runtime_post -2
- optional -3
- (none of the above) -4
+ buildtime_slot_op 0
+ buildtime -1
+ runtime -2
+ runtime_post -3
+ optional -4
+ (none of the above) -5
"""
if self.optional:
- return -3
- if self.buildtime:
+ return -4
+ if self.buildtime_slot_op:
return 0
- if self.runtime:
+ if self.buildtime:
return -1
- if self.runtime_post:
+ if self.runtime:
return -2
- return -4
+ if self.runtime_post:
+ return -3
+ return -5
def __str__(self):
if self.ignored:
return "ignored"
if self.optional:
return "optional"
+ if self.buildtime_slot_op:
+ return "buildtime_slot_op"
if self.buildtime:
return "buildtime"
+ if self.runtime_slot_op:
+ return "runtime_slot_op"
if self.runtime:
return "runtime"
if self.runtime_post:
diff --git a/pym/_emerge/DepPrioritySatisfiedRange.py b/pym/_emerge/DepPrioritySatisfiedRange.py
index edb29df96..391f5409b 100644
--- a/pym/_emerge/DepPrioritySatisfiedRange.py
+++ b/pym/_emerge/DepPrioritySatisfiedRange.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.DepPriority import DepPriority
@@ -7,17 +7,18 @@ class DepPrioritySatisfiedRange(object):
DepPriority Index Category
not satisfied and buildtime HARD
- not satisfied and runtime 6 MEDIUM
- not satisfied and runtime_post 5 MEDIUM_SOFT
+ not satisfied and runtime 7 MEDIUM
+ not satisfied and runtime_post 6 MEDIUM_SOFT
+ satisfied and buildtime_slot_op 5 SOFT
satisfied and buildtime 4 SOFT
satisfied and runtime 3 SOFT
satisfied and runtime_post 2 SOFT
optional 1 SOFT
(none of the above) 0 NONE
"""
- MEDIUM = 6
- MEDIUM_SOFT = 5
- SOFT = 4
+ MEDIUM = 7
+ MEDIUM_SOFT = 6
+ SOFT = 5
NONE = 0
@classmethod
@@ -50,6 +51,16 @@ class DepPrioritySatisfiedRange(object):
def _ignore_satisfied_buildtime(cls, priority):
if priority.__class__ is not DepPriority:
return False
+ if priority.optional:
+ return True
+ if priority.buildtime_slot_op:
+ return False
+ return bool(priority.satisfied)
+
+ @classmethod
+ def _ignore_satisfied_buildtime_slot_op(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
return bool(priority.optional or \
priority.satisfied)
@@ -80,6 +91,7 @@ DepPrioritySatisfiedRange.ignore_priority = (
DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
DepPrioritySatisfiedRange._ignore_satisfied_runtime,
DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
+ DepPrioritySatisfiedRange._ignore_satisfied_buildtime_slot_op,
DepPrioritySatisfiedRange._ignore_runtime_post,
DepPrioritySatisfiedRange._ignore_runtime
)
diff --git a/pym/_emerge/DependencyArg.py b/pym/_emerge/DependencyArg.py
index 80134c804..29a0072c4 100644
--- a/pym/_emerge/DependencyArg.py
+++ b/pym/_emerge/DependencyArg.py
@@ -1,9 +1,11 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
import sys
-from portage import _encodings, _unicode_encode, _unicode_decode
+from portage import _encodings, _unicode_encode
class DependencyArg(object):
@@ -31,10 +33,10 @@ class DependencyArg(object):
return hash((self.arg, self.root_config.root))
def __str__(self):
- # Force unicode format string for python-2.x safety,
+ # Use unicode_literals format string for python-2.x safety,
# ensuring that self.arg.__unicode__() is used
# when necessary.
- return _unicode_decode("%s") % (self.arg,)
+ return "%s" % (self.arg,)
if sys.hexversion < 0x3000000:
diff --git a/pym/_emerge/EbuildBuild.py b/pym/_emerge/EbuildBuild.py
index 784a3e298..e13b1cf39 100644
--- a/pym/_emerge/EbuildBuild.py
+++ b/pym/_emerge/EbuildBuild.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.EbuildExecuter import EbuildExecuter
@@ -10,11 +10,14 @@ from _emerge.EbuildMerge import EbuildMerge
from _emerge.EbuildFetchonly import EbuildFetchonly
from _emerge.EbuildBuildDir import EbuildBuildDir
from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
+from _emerge.TaskSequence import TaskSequence
+
from portage.util import writemsg
import portage
from portage import os
from portage.output import colorize
from portage.package.ebuild.digestcheck import digestcheck
+from portage.package.ebuild.digestgen import digestgen
from portage.package.ebuild.doebuild import _check_temp_dir
from portage.package.ebuild._spawn_nofetch import spawn_nofetch
@@ -35,7 +38,7 @@ class EbuildBuild(CompositeTask):
if rval != os.EX_OK:
self.returncode = rval
self._current_task = None
- self.wait()
+ self._async_wait()
return
root_config = pkg.root_config
@@ -60,7 +63,7 @@ class EbuildBuild(CompositeTask):
if not self._check_manifest():
self.returncode = 1
self._current_task = None
- self.wait()
+ self._async_wait()
return
prefetcher = self.prefetcher
@@ -91,7 +94,8 @@ class EbuildBuild(CompositeTask):
success = True
settings = self.settings
- if 'strict' in settings.features:
+ if 'strict' in settings.features and \
+ 'digest' not in settings.features:
settings['O'] = os.path.dirname(self._ebuild_path)
quiet_setting = settings.get('PORTAGE_QUIET')
settings['PORTAGE_QUIET'] = '1'
@@ -160,6 +164,10 @@ class EbuildBuild(CompositeTask):
if self.returncode != os.EX_OK:
portdb = self.pkg.root_config.trees[self._tree].dbapi
spawn_nofetch(portdb, self._ebuild_path, settings=self.settings)
+ elif 'digest' in self.settings.features:
+ if not digestgen(mysettings=self.settings,
+ myportdb=self.pkg.root_config.trees[self._tree].dbapi):
+ self.returncode = 1
self.wait()
def _pre_clean_exit(self, pre_clean_phase):
@@ -260,8 +268,8 @@ class EbuildBuild(CompositeTask):
# to be displayed for problematic packages even though they do
# not set RESTRICT=fetch (bug #336499).
- if 'fetch' not in self.pkg.metadata.restrict and \
- 'nofetch' not in self.pkg.metadata.defined_phases:
+ if 'fetch' not in self.pkg.restrict and \
+ 'nofetch' not in self.pkg.defined_phases:
self._unlock_builddir()
self.wait()
return
@@ -300,10 +308,20 @@ class EbuildBuild(CompositeTask):
self.scheduler.output(msg,
log_path=self.settings.get("PORTAGE_LOG_FILE"))
- packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
- scheduler=self.scheduler, settings=self.settings)
+ binpkg_tasks = TaskSequence()
+ requested_binpkg_formats = self.settings.get("PORTAGE_BINPKG_FORMAT", "tar").split()
+ for pkg_fmt in portage.const.SUPPORTED_BINPKG_FORMATS:
+ if pkg_fmt in requested_binpkg_formats:
+ if pkg_fmt == "rpm":
+ binpkg_tasks.add(EbuildPhase(background=self.background,
+ phase="rpm", scheduler=self.scheduler,
+ settings=self.settings))
+ else:
+ binpkg_tasks.add(EbuildBinpkg(background=self.background,
+ pkg=self.pkg, scheduler=self.scheduler,
+ settings=self.settings))
- self._start_task(packager, self._buildpkg_exit)
+ self._start_task(binpkg_tasks, self._buildpkg_exit)
def _buildpkg_exit(self, packager):
"""
diff --git a/pym/_emerge/EbuildBuildDir.py b/pym/_emerge/EbuildBuildDir.py
index 9773bd790..58905c2f6 100644
--- a/pym/_emerge/EbuildBuildDir.py
+++ b/pym/_emerge/EbuildBuildDir.py
@@ -7,7 +7,6 @@ import portage
from portage import os
from portage.exception import PortageException
from portage.util.SlotObject import SlotObject
-import errno
class EbuildBuildDir(SlotObject):
@@ -60,7 +59,7 @@ class EbuildBuildDir(SlotObject):
builddir_lock.wait()
self._assert_lock(builddir_lock)
self._lock_obj = builddir_lock
- self.settings['PORTAGE_BUILDIR_LOCKED'] = '1'
+ self.settings['PORTAGE_BUILDDIR_LOCKED'] = '1'
finally:
self.locked = self._lock_obj is not None
catdir_lock.unlock()
@@ -92,16 +91,14 @@ class EbuildBuildDir(SlotObject):
self._lock_obj.unlock()
self._lock_obj = None
self.locked = False
- self.settings.pop('PORTAGE_BUILDIR_LOCKED', None)
+ self.settings.pop('PORTAGE_BUILDDIR_LOCKED', None)
catdir_lock = AsynchronousLock(path=self._catdir, scheduler=self.scheduler)
catdir_lock.start()
if catdir_lock.wait() == os.EX_OK:
try:
os.rmdir(self._catdir)
- except OSError as e:
- if e.errno not in (errno.ENOENT,
- errno.ENOTEMPTY, errno.EEXIST, errno.EPERM):
- raise
+ except OSError:
+ pass
finally:
catdir_lock.unlock()
diff --git a/pym/_emerge/EbuildExecuter.py b/pym/_emerge/EbuildExecuter.py
index fd663a41d..5587d4eb0 100644
--- a/pym/_emerge/EbuildExecuter.py
+++ b/pym/_emerge/EbuildExecuter.py
@@ -16,16 +16,7 @@ class EbuildExecuter(CompositeTask):
_phases = ("prepare", "configure", "compile", "test", "install")
- _live_eclasses = frozenset([
- "bzr",
- "cvs",
- "darcs",
- "git",
- "git-2",
- "mercurial",
- "subversion",
- "tla",
- ])
+ _live_eclasses = portage.const.LIVE_ECLASSES
def _start(self):
pkg = self.pkg
@@ -83,7 +74,7 @@ class EbuildExecuter(CompositeTask):
pkg = self.pkg
phases = self._phases
- eapi = pkg.metadata["EAPI"]
+ eapi = pkg.eapi
if not eapi_has_src_prepare_and_src_configure(eapi):
# skip src_prepare and src_configure
phases = phases[2:]
diff --git a/pym/_emerge/EbuildFetcher.py b/pym/_emerge/EbuildFetcher.py
index c0a7fddaa..d98d00736 100644
--- a/pym/_emerge/EbuildFetcher.py
+++ b/pym/_emerge/EbuildFetcher.py
@@ -1,23 +1,22 @@
# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-import traceback
-
-from _emerge.SpawnProcess import SpawnProcess
import copy
import io
-import signal
import sys
+
import portage
from portage import os
from portage import _encodings
from portage import _unicode_encode
from portage import _unicode_decode
+from portage.checksum import _hash_filter
from portage.elog.messages import eerror
from portage.package.ebuild.fetch import _check_distfile, fetch
+from portage.util._async.ForkProcess import ForkProcess
from portage.util._pty import _create_pty_or_pipe
-class EbuildFetcher(SpawnProcess):
+class EbuildFetcher(ForkProcess):
__slots__ = ("config_pool", "ebuild_path", "fetchonly", "fetchall",
"pkg", "prefetch") + \
@@ -57,6 +56,9 @@ class EbuildFetcher(SpawnProcess):
if st.st_size != expected_size:
return False
+ hash_filter = _hash_filter(settings.get("PORTAGE_CHECKSUM_FILTER", ""))
+ if hash_filter.transparent:
+ hash_filter = None
stdout_orig = sys.stdout
stderr_orig = sys.stderr
global_havecolor = portage.output.havecolor
@@ -78,7 +80,7 @@ class EbuildFetcher(SpawnProcess):
break
continue
ok, st = _check_distfile(os.path.join(distdir, filename),
- mydigests, eout, show_errors=False)
+ mydigests, eout, show_errors=False, hash_filter=hash_filter)
if not ok:
success = False
break
@@ -115,13 +117,13 @@ class EbuildFetcher(SpawnProcess):
msg_lines.append(msg)
self._eerror(msg_lines)
self._set_returncode((self.pid, 1 << 8))
- self.wait()
+ self._async_wait()
return
if not uri_map:
# Nothing to fetch.
self._set_returncode((self.pid, os.EX_OK << 8))
- self.wait()
+ self._async_wait()
return
settings = self.config_pool.allocate()
@@ -133,7 +135,7 @@ class EbuildFetcher(SpawnProcess):
self._prefetch_size_ok(uri_map, settings, ebuild_path):
self.config_pool.deallocate(settings)
self._set_returncode((self.pid, os.EX_OK << 8))
- self.wait()
+ self._async_wait()
return
nocolor = settings.get("NOCOLOR")
@@ -148,7 +150,7 @@ class EbuildFetcher(SpawnProcess):
settings["NOCOLOR"] = nocolor
self._settings = settings
- SpawnProcess._start(self)
+ ForkProcess._start(self)
# Free settings now since it's no longer needed in
# this process (the subprocess has a private copy).
@@ -156,48 +158,20 @@ class EbuildFetcher(SpawnProcess):
settings = None
self._settings = None
- def _spawn(self, args, fd_pipes=None, **kwargs):
- """
- Fork a subprocess, apply local settings, and call fetch().
- """
-
- pid = os.fork()
- if pid != 0:
- if not isinstance(pid, int):
- raise AssertionError(
- "fork returned non-integer: %s" % (repr(pid),))
- portage.process.spawned_pids.append(pid)
- return [pid]
-
- portage.locks._close_fds()
- # Disable close_fds since we don't exec (see _setup_pipes docstring).
- portage.process._setup_pipes(fd_pipes, close_fds=False)
-
- # Use default signal handlers in order to avoid problems
- # killing subprocesses as reported in bug #353239.
- signal.signal(signal.SIGINT, signal.SIG_DFL)
- signal.signal(signal.SIGTERM, signal.SIG_DFL)
-
+ def _run(self):
# Force consistent color output, in case we are capturing fetch
# output through a normal pipe due to unavailability of ptys.
portage.output.havecolor = self._settings.get('NOCOLOR') \
not in ('yes', 'true')
rval = 1
- allow_missing = self._get_manifest().allow_missing
- try:
- if fetch(self._uri_map, self._settings, fetchonly=self.fetchonly,
- digests=copy.deepcopy(self._get_digests()),
- allow_missing_digests=allow_missing):
- rval = os.EX_OK
- except SystemExit:
- raise
- except:
- traceback.print_exc()
- finally:
- # Call os._exit() from finally block, in order to suppress any
- # finally blocks from earlier in the call stack. See bug #345289.
- os._exit(rval)
+ allow_missing = self._get_manifest().allow_missing or \
+ 'digest' in self._settings.features
+ if fetch(self._uri_map, self._settings, fetchonly=self.fetchonly,
+ digests=copy.deepcopy(self._get_digests()),
+ allow_missing_digests=allow_missing):
+ rval = os.EX_OK
+ return rval
def _get_ebuild_path(self):
if self.ebuild_path is not None:
@@ -297,7 +271,7 @@ class EbuildFetcher(SpawnProcess):
self.scheduler.output(msg, log_path=self.logfile)
def _set_returncode(self, wait_retval):
- SpawnProcess._set_returncode(self, wait_retval)
+ ForkProcess._set_returncode(self, wait_retval)
# Collect elog messages that might have been
# created by the pkg_nofetch phase.
# Skip elog messages for prefetch, in order to avoid duplicates.
diff --git a/pym/_emerge/EbuildMetadataPhase.py b/pym/_emerge/EbuildMetadataPhase.py
index c2d3747f7..bbb1ca9dc 100644
--- a/pym/_emerge/EbuildMetadataPhase.py
+++ b/pym/_emerge/EbuildMetadataPhase.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.SubProcess import SubProcess
@@ -6,12 +6,14 @@ import sys
from portage.cache.mappings import slot_dict_class
import portage
portage.proxy.lazyimport.lazyimport(globals(),
- 'portage.package.ebuild._eapi_invalid:eapi_invalid',
+ 'portage.package.ebuild._metadata_invalid:eapi_invalid',
)
from portage import os
from portage import _encodings
from portage import _unicode_decode
from portage import _unicode_encode
+from portage.dep import extract_unpack_dependencies
+from portage.eapi import eapi_has_automatic_unpack_dependencies
import errno
import fcntl
@@ -25,12 +27,11 @@ class EbuildMetadataPhase(SubProcess):
"""
__slots__ = ("cpv", "eapi_supported", "ebuild_hash", "fd_pipes",
- "metadata", "portdb", "repo_path", "settings") + \
+ "metadata", "portdb", "repo_path", "settings", "write_auxdb") + \
("_eapi", "_eapi_lineno", "_raw_metadata",)
_file_names = ("ebuild",)
_files_dict = slot_dict_class(_file_names, prefix="")
- _metadata_fd = 9
def _start(self):
ebuild_path = self.ebuild_hash.location
@@ -49,14 +50,14 @@ class EbuildMetadataPhase(SubProcess):
# An empty EAPI setting is invalid.
self._eapi_invalid(None)
self._set_returncode((self.pid, 1 << 8))
- self.wait()
+ self._async_wait()
return
self.eapi_supported = portage.eapi_is_supported(parsed_eapi)
if not self.eapi_supported:
self.metadata = {"EAPI": parsed_eapi}
self._set_returncode((self.pid, os.EX_OK << 8))
- self.wait()
+ self._async_wait()
return
settings = self.settings
@@ -74,28 +75,41 @@ class EbuildMetadataPhase(SubProcess):
null_input = open('/dev/null', 'rb')
fd_pipes.setdefault(0, null_input.fileno())
- fd_pipes.setdefault(1, sys.stdout.fileno())
- fd_pipes.setdefault(2, sys.stderr.fileno())
+ fd_pipes.setdefault(1, sys.__stdout__.fileno())
+ fd_pipes.setdefault(2, sys.__stderr__.fileno())
# flush any pending output
+ stdout_filenos = (sys.__stdout__.fileno(), sys.__stderr__.fileno())
for fd in fd_pipes.values():
- if fd == sys.stdout.fileno():
- sys.stdout.flush()
- if fd == sys.stderr.fileno():
- sys.stderr.flush()
+ if fd in stdout_filenos:
+ sys.__stdout__.flush()
+ sys.__stderr__.flush()
+ break
self._files = self._files_dict()
files = self._files
master_fd, slave_fd = os.pipe()
+
fcntl.fcntl(master_fd, fcntl.F_SETFL,
fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
- fd_pipes[self._metadata_fd] = slave_fd
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(master_fd, fcntl.F_SETFD,
+ fcntl.fcntl(master_fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ fd_pipes[slave_fd] = slave_fd
+ settings["PORTAGE_PIPE_FD"] = str(slave_fd)
self._raw_metadata = []
files.ebuild = master_fd
- self._reg_id = self.scheduler.register(files.ebuild,
+ self._reg_id = self.scheduler.io_add_watch(files.ebuild,
self._registered_events, self._output_handler)
self._registered = True
@@ -103,6 +117,7 @@ class EbuildMetadataPhase(SubProcess):
settings=settings, debug=debug,
mydbapi=self.portdb, tree="porttree",
fd_pipes=fd_pipes, returnpid=True)
+ settings.pop("PORTAGE_PIPE_FD", None)
os.close(slave_fd)
null_input.close()
@@ -111,11 +126,10 @@ class EbuildMetadataPhase(SubProcess):
# doebuild failed before spawning
self._unregister()
self._set_returncode((self.pid, retval << 8))
- self.wait()
+ self._async_wait()
return
self.pid = retval[0]
- portage.process.spawned_pids.remove(self.pid)
def _output_handler(self, fd, event):
@@ -141,8 +155,7 @@ class EbuildMetadataPhase(SubProcess):
def _set_returncode(self, wait_retval):
SubProcess._set_returncode(self, wait_retval)
# self._raw_metadata is None when _start returns
- # early due to an unsupported EAPI detected with
- # FEATURES=parse-eapi-ebuild-head
+ # early due to an unsupported EAPI
if self.returncode == os.EX_OK and \
self._raw_metadata is not None:
metadata_lines = _unicode_decode(b''.join(self._raw_metadata),
@@ -163,8 +176,7 @@ class EbuildMetadataPhase(SubProcess):
if (not metadata["EAPI"] or self.eapi_supported) and \
metadata["EAPI"] != parsed_eapi:
self._eapi_invalid(metadata)
- if 'parse-eapi-ebuild-head' in self.settings.features:
- metadata_valid = False
+ metadata_valid = False
if metadata_valid:
# Since we're supposed to be able to efficiently obtain the
@@ -181,8 +193,18 @@ class EbuildMetadataPhase(SubProcess):
metadata["_eclasses_"] = {}
metadata.pop("INHERITED", None)
- self.portdb._write_cache(self.cpv,
- self.repo_path, metadata, self.ebuild_hash)
+ if eapi_has_automatic_unpack_dependencies(metadata["EAPI"]):
+ repo = self.portdb.repositories.get_name_for_location(self.repo_path)
+ unpackers = self.settings.unpack_dependencies.get(repo, {}).get(metadata["EAPI"], {})
+ unpack_dependencies = extract_unpack_dependencies(metadata["SRC_URI"], unpackers)
+ if unpack_dependencies:
+ metadata["DEPEND"] += (" " if metadata["DEPEND"] else "") + unpack_dependencies
+
+ # If called by egencache, this cache write is
+ # undesirable when metadata-transfer is disabled.
+ if self.write_auxdb is not False:
+ self.portdb._write_cache(self.cpv,
+ self.repo_path, metadata, self.ebuild_hash)
else:
metadata = {"EAPI": metadata["EAPI"]}
self.metadata = metadata
diff --git a/pym/_emerge/EbuildPhase.py b/pym/_emerge/EbuildPhase.py
index fe44abcbd..b1f7c21df 100644
--- a/pym/_emerge/EbuildPhase.py
+++ b/pym/_emerge/EbuildPhase.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import gzip
@@ -11,6 +11,7 @@ from _emerge.BinpkgEnvExtractor import BinpkgEnvExtractor
from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
from _emerge.EbuildProcess import EbuildProcess
from _emerge.CompositeTask import CompositeTask
+from portage.package.ebuild.prepare_build_dirs import _prepare_workdir
from portage.util import writemsg
try:
@@ -38,7 +39,7 @@ from portage import _unicode_encode
class EbuildPhase(CompositeTask):
- __slots__ = ("actionmap", "phase", "settings") + \
+ __slots__ = ("actionmap", "fd_pipes", "phase", "settings") + \
("_ebuild_lock",)
# FEATURES displayed prior to setup phase
@@ -156,8 +157,7 @@ class EbuildPhase(CompositeTask):
return
self._start_ebuild()
- def _start_ebuild(self):
-
+ def _get_log_path(self):
# Don't open the log file during the clean phase since the
# open file can result in an nfs lock on $T/build.log which
# prevents the clean phase from removing $T.
@@ -165,17 +165,21 @@ class EbuildPhase(CompositeTask):
if self.phase not in ("clean", "cleanrm") and \
self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
logfile = self.settings.get("PORTAGE_LOG_FILE")
+ return logfile
+
+ def _start_ebuild(self):
- fd_pipes = None
- if not self.background and self.phase == 'nofetch':
- # All the pkg_nofetch output goes to stderr since
- # it's considered to be an error message.
- fd_pipes = {1 : sys.stderr.fileno()}
+ fd_pipes = self.fd_pipes
+ if fd_pipes is None:
+ if not self.background and self.phase == 'nofetch':
+ # All the pkg_nofetch output goes to stderr since
+ # it's considered to be an error message.
+ fd_pipes = {1 : sys.__stderr__.fileno()}
ebuild_process = EbuildProcess(actionmap=self.actionmap,
- background=self.background, fd_pipes=fd_pipes, logfile=logfile,
- phase=self.phase, scheduler=self.scheduler,
- settings=self.settings)
+ background=self.background, fd_pipes=fd_pipes,
+ logfile=self._get_log_path(), phase=self.phase,
+ scheduler=self.scheduler, settings=self.settings)
self._start_task(ebuild_process, self._ebuild_exit)
@@ -189,16 +193,21 @@ class EbuildPhase(CompositeTask):
if self._default_exit(ebuild_process) != os.EX_OK:
if self.phase == "test" and \
"test-fail-continue" in self.settings.features:
- pass
+ # mark test phase as complete (bug #452030)
+ try:
+ open(_unicode_encode(os.path.join(
+ self.settings["PORTAGE_BUILDDIR"], ".tested"),
+ encoding=_encodings['fs'], errors='strict'),
+ 'wb').close()
+ except OSError:
+ pass
else:
fail = True
if not fail:
self.returncode = None
- logfile = None
- if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
- logfile = self.settings.get("PORTAGE_LOG_FILE")
+ logfile = self._get_log_path()
if self.phase == "install":
out = io.StringIO()
@@ -213,7 +222,14 @@ class EbuildPhase(CompositeTask):
settings = self.settings
_post_phase_userpriv_perms(settings)
- if self.phase == "install":
+ if self.phase == "unpack":
+ # Bump WORKDIR timestamp, in case tar gave it a timestamp
+ # that will interfere with distfiles / WORKDIR timestamp
+ # comparisons as reported in bug #332217. Also, fix
+ # ownership since tar can change that too.
+ os.utime(settings["WORKDIR"], None)
+ _prepare_workdir(settings)
+ elif self.phase == "install":
out = io.StringIO()
_post_src_install_write_metadata(settings)
_post_src_install_uid_fix(settings, out)
@@ -235,8 +251,9 @@ class EbuildPhase(CompositeTask):
fd, logfile = tempfile.mkstemp()
os.close(fd)
post_phase = MiscFunctionsProcess(background=self.background,
- commands=post_phase_cmds, logfile=logfile, phase=self.phase,
- scheduler=self.scheduler, settings=settings)
+ commands=post_phase_cmds, fd_pipes=self.fd_pipes,
+ logfile=logfile, phase=self.phase, scheduler=self.scheduler,
+ settings=settings)
self._start_task(post_phase, self._post_phase_exit)
return
@@ -311,8 +328,9 @@ class EbuildPhase(CompositeTask):
self.returncode = None
phase = 'die_hooks'
die_hooks = MiscFunctionsProcess(background=self.background,
- commands=[phase], phase=phase,
- scheduler=self.scheduler, settings=self.settings)
+ commands=[phase], phase=phase, logfile=self._get_log_path(),
+ fd_pipes=self.fd_pipes, scheduler=self.scheduler,
+ settings=self.settings)
self._start_task(die_hooks, self._die_hooks_exit)
def _die_hooks_exit(self, die_hooks):
@@ -331,7 +349,8 @@ class EbuildPhase(CompositeTask):
portage.elog.elog_process(self.settings.mycpv, self.settings)
phase = "clean"
clean_phase = EbuildPhase(background=self.background,
- phase=phase, scheduler=self.scheduler, settings=self.settings)
+ fd_pipes=self.fd_pipes, phase=phase, scheduler=self.scheduler,
+ settings=self.settings)
self._start_task(clean_phase, self._fail_clean_exit)
return
diff --git a/pym/_emerge/EbuildProcess.py b/pym/_emerge/EbuildProcess.py
index ce97aff0f..333ad7bd0 100644
--- a/pym/_emerge/EbuildProcess.py
+++ b/pym/_emerge/EbuildProcess.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.AbstractEbuildProcess import AbstractEbuildProcess
@@ -17,5 +17,11 @@ class EbuildProcess(AbstractEbuildProcess):
if actionmap is None:
actionmap = _spawn_actionmap(self.settings)
- return _doebuild_spawn(self.phase, self.settings,
- actionmap=actionmap, **kwargs)
+ if self._dummy_pipe_fd is not None:
+ self.settings["PORTAGE_PIPE_FD"] = str(self._dummy_pipe_fd)
+
+ try:
+ return _doebuild_spawn(self.phase, self.settings,
+ actionmap=actionmap, **kwargs)
+ finally:
+ self.settings.pop("PORTAGE_PIPE_FD", None)
diff --git a/pym/_emerge/EbuildSpawnProcess.py b/pym/_emerge/EbuildSpawnProcess.py
index e1f682a66..26d26fc77 100644
--- a/pym/_emerge/EbuildSpawnProcess.py
+++ b/pym/_emerge/EbuildSpawnProcess.py
@@ -1,4 +1,4 @@
-# Copyright 2010 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.AbstractEbuildProcess import AbstractEbuildProcess
@@ -13,4 +13,10 @@ class EbuildSpawnProcess(AbstractEbuildProcess):
__slots__ = ('fakeroot_state', 'spawn_func')
def _spawn(self, args, **kwargs):
- return self.spawn_func(args, env=self.settings.environ(), **kwargs)
+
+ env = self.settings.environ()
+
+ if self._dummy_pipe_fd is not None:
+ env["PORTAGE_PIPE_FD"] = str(self._dummy_pipe_fd)
+
+ return self.spawn_func(args, env=env, **kwargs)
diff --git a/pym/_emerge/FakeVartree.py b/pym/_emerge/FakeVartree.py
index ce15f5a36..14be50c7f 100644
--- a/pym/_emerge/FakeVartree.py
+++ b/pym/_emerge/FakeVartree.py
@@ -1,6 +1,8 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
import sys
import warnings
@@ -10,11 +12,11 @@ from _emerge.Package import Package
from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
from portage.const import VDB_PATH
from portage.dbapi.vartree import vartree
-from portage.dep._slot_abi import find_built_slot_abi_atoms
+from portage.dep._slot_operator import find_built_slot_operator_atoms
from portage.eapi import _get_eapi_attrs
-from portage.exception import InvalidDependString
-from portage.repository.config import _gen_valid_repo
+from portage.exception import InvalidData, InvalidDependString
from portage.update import grab_updates, parse_updates, update_dbentries
+from portage.versions import _pkg_str
if sys.hexversion >= 0x3000000:
long = int
@@ -33,6 +35,9 @@ class FakeVardbapi(PackageVirtualDbapi):
path =os.path.join(path, filename)
return path
+class _DynamicDepsNotApplicable(Exception):
+ pass
+
class FakeVartree(vartree):
"""This is implements an in-memory copy of a vartree instance that provides
all the interfaces required for use by the depgraph. The vardb is locked
@@ -45,10 +50,10 @@ class FakeVartree(vartree):
is not a matching ebuild in the tree). Instances of this class are not
populated until the sync() method is called."""
def __init__(self, root_config, pkg_cache=None, pkg_root_config=None,
- dynamic_deps=True, ignore_built_slot_abi_deps=False):
+ dynamic_deps=True, ignore_built_slot_operator_deps=False):
self._root_config = root_config
self._dynamic_deps = dynamic_deps
- self._ignore_built_slot_abi_deps = ignore_built_slot_abi_deps
+ self._ignore_built_slot_operator_deps = ignore_built_slot_operator_deps
if pkg_root_config is None:
pkg_root_config = self._root_config
self._pkg_root_config = pkg_root_config
@@ -75,7 +80,7 @@ class FakeVartree(vartree):
self.dbapi.aux_get = self._aux_get_wrapper
self.dbapi.match = self._match_wrapper
self._aux_get_history = set()
- self._portdb_keys = ["EAPI", "KEYWORDS", "DEPEND", "RDEPEND", "PDEPEND"]
+ self._portdb_keys = Package._dep_keys + ("EAPI", "KEYWORDS")
self._portdb = portdb
self._global_updates = None
@@ -102,29 +107,30 @@ class FakeVartree(vartree):
self._aux_get_wrapper(cpv, [])
return matches
- def _aux_get_wrapper(self, pkg, wants, myrepo=None):
- if pkg in self._aux_get_history:
- return self._aux_get(pkg, wants)
- self._aux_get_history.add(pkg)
- # We need to check the EAPI, and this also raises
- # a KeyError to the caller if appropriate.
- pkg_obj = self.dbapi._cpv_map[pkg]
- installed_eapi = pkg_obj.metadata['EAPI']
- repo = pkg_obj.metadata['repository']
- eapi_attrs = _get_eapi_attrs(installed_eapi)
- built_slot_abi_atoms = None
-
- if eapi_attrs.slot_abi and not self._ignore_built_slot_abi_deps:
- try:
- built_slot_abi_atoms = find_built_slot_abi_atoms(pkg_obj)
- except InvalidDependString:
- pass
+ def _aux_get_wrapper(self, cpv, wants, myrepo=None):
+ if cpv in self._aux_get_history:
+ return self._aux_get(cpv, wants)
+ self._aux_get_history.add(cpv)
+
+ # This raises a KeyError to the caller if appropriate.
+ pkg = self.dbapi._cpv_map[cpv]
try:
- # Use the live ebuild metadata if possible.
- repo = _gen_valid_repo(repo)
live_metadata = dict(zip(self._portdb_keys,
- self._portdb.aux_get(pkg, self._portdb_keys, myrepo=repo)))
+ self._portdb.aux_get(cpv, self._portdb_keys,
+ myrepo=pkg.repo)))
+ except (KeyError, portage.exception.PortageException):
+ live_metadata = None
+
+ self._apply_dynamic_deps(pkg, live_metadata)
+
+ return self._aux_get(cpv, wants)
+
+ def _apply_dynamic_deps(self, pkg, live_metadata):
+
+ try:
+ if live_metadata is None:
+ raise _DynamicDepsNotApplicable()
# Use the metadata from the installed instance if the EAPI
# of either instance is unsupported, since if the installed
# instance has an unsupported or corrupt EAPI then we don't
@@ -134,26 +140,46 @@ class FakeVartree(vartree):
# order to respect dep updates without revision bump or EAPI
# bump, as in bug #368725.
if not (portage.eapi_is_supported(live_metadata["EAPI"]) and \
- portage.eapi_is_supported(installed_eapi)):
- raise KeyError(pkg)
+ portage.eapi_is_supported(pkg.eapi)):
+ raise _DynamicDepsNotApplicable()
- # preserve built SLOT/ABI := operator deps
- if built_slot_abi_atoms:
+ # preserve built slot/sub-slot := operator deps
+ built_slot_operator_atoms = None
+ if not self._ignore_built_slot_operator_deps and \
+ _get_eapi_attrs(pkg.eapi).slot_operator:
+ try:
+ built_slot_operator_atoms = \
+ find_built_slot_operator_atoms(pkg)
+ except InvalidDependString:
+ pass
+
+ if built_slot_operator_atoms:
live_eapi_attrs = _get_eapi_attrs(live_metadata["EAPI"])
- if not live_eapi_attrs.slot_abi:
- raise KeyError(pkg)
- for k, v in built_slot_abi_atoms.items():
+ if not live_eapi_attrs.slot_operator:
+ raise _DynamicDepsNotApplicable()
+ for k, v in built_slot_operator_atoms.items():
live_metadata[k] += (" " +
" ".join(_unicode(atom) for atom in v))
- self.dbapi.aux_update(pkg, live_metadata)
- except (KeyError, portage.exception.PortageException):
+ self.dbapi.aux_update(pkg.cpv, live_metadata)
+ except _DynamicDepsNotApplicable:
if self._global_updates is None:
self._global_updates = \
grab_global_updates(self._portdb)
+
+ # Bypass _aux_get_wrapper, since calling that
+ # here would trigger infinite recursion.
+ aux_keys = Package._dep_keys + self.dbapi._pkg_str_aux_keys
+ aux_dict = dict(zip(aux_keys, self._aux_get(pkg.cpv, aux_keys)))
perform_global_updates(
- pkg, self.dbapi, self._global_updates)
- return self._aux_get(pkg, wants)
+ pkg.cpv, aux_dict, self.dbapi, self._global_updates)
+
+ def dynamic_deps_preload(self, pkg, metadata):
+ if metadata is not None:
+ metadata = dict((k, metadata.get(k, ''))
+ for k in self._portdb_keys)
+ self._apply_dynamic_deps(pkg, metadata)
+ self._aux_get_history.add(pkg.cpv)
def cpv_discard(self, pkg):
"""
@@ -251,12 +277,6 @@ class FakeVartree(vartree):
root_config=self._pkg_root_config,
type_name="installed")
- try:
- mycounter = long(pkg.metadata["COUNTER"])
- except ValueError:
- mycounter = 0
- pkg.metadata["COUNTER"] = str(mycounter)
-
self._pkg_cache[pkg] = pkg
return pkg
@@ -285,13 +305,14 @@ def grab_global_updates(portdb):
return retupdates
-def perform_global_updates(mycpv, mydb, myupdates):
- aux_keys = ["DEPEND", "EAPI", "RDEPEND", "PDEPEND", 'repository']
- aux_dict = dict(zip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
- eapi = aux_dict.pop('EAPI')
- repository = aux_dict.pop('repository')
+def perform_global_updates(mycpv, aux_dict, mydb, myupdates):
+ try:
+ pkg = _pkg_str(mycpv, metadata=aux_dict, settings=mydb.settings)
+ except InvalidData:
+ return
+ aux_dict = dict((k, aux_dict[k]) for k in Package._dep_keys)
try:
- mycommands = myupdates[repository]
+ mycommands = myupdates[pkg.repo]
except KeyError:
try:
mycommands = myupdates['DEFAULT']
@@ -301,6 +322,6 @@ def perform_global_updates(mycpv, mydb, myupdates):
if not mycommands:
return
- updates = update_dbentries(mycommands, aux_dict, eapi=eapi)
+ updates = update_dbentries(mycommands, aux_dict, parent=pkg)
if updates:
mydb.aux_update(mycpv, updates)
diff --git a/pym/_emerge/FifoIpcDaemon.py b/pym/_emerge/FifoIpcDaemon.py
index fcc4ab4b9..7468de5e2 100644
--- a/pym/_emerge/FifoIpcDaemon.py
+++ b/pym/_emerge/FifoIpcDaemon.py
@@ -1,6 +1,14 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+import sys
+
+try:
+ import fcntl
+except ImportError:
+ # http://bugs.jython.org/issue1074
+ fcntl = None
+
from portage import os
from _emerge.AbstractPollTask import AbstractPollTask
from portage.cache.mappings import slot_dict_class
@@ -21,7 +29,18 @@ class FifoIpcDaemon(AbstractPollTask):
self._files.pipe_in = \
os.open(self.input_fifo, os.O_RDONLY|os.O_NONBLOCK)
- self._reg_id = self.scheduler.register(
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000 and fcntl is not None:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(self._files.pipe_in, fcntl.F_SETFD,
+ fcntl.fcntl(self._files.pipe_in,
+ fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ self._reg_id = self.scheduler.io_add_watch(
self._files.pipe_in,
self._registered_events, self._input_handler)
@@ -32,11 +51,23 @@ class FifoIpcDaemon(AbstractPollTask):
Re-open the input stream, in order to suppress
POLLHUP events (bug #339976).
"""
- self.scheduler.unregister(self._reg_id)
+ self.scheduler.source_remove(self._reg_id)
os.close(self._files.pipe_in)
self._files.pipe_in = \
os.open(self.input_fifo, os.O_RDONLY|os.O_NONBLOCK)
- self._reg_id = self.scheduler.register(
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000 and fcntl is not None:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(self._files.pipe_in, fcntl.F_SETFD,
+ fcntl.fcntl(self._files.pipe_in,
+ fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ self._reg_id = self.scheduler.io_add_watch(
self._files.pipe_in,
self._registered_events, self._input_handler)
@@ -47,6 +78,8 @@ class FifoIpcDaemon(AbstractPollTask):
if self.returncode is None:
self.returncode = 1
self._unregister()
+ # notify exit listeners
+ self.wait()
def _wait(self):
if self.returncode is not None:
@@ -67,7 +100,7 @@ class FifoIpcDaemon(AbstractPollTask):
self._registered = False
if self._reg_id is not None:
- self.scheduler.unregister(self._reg_id)
+ self.scheduler.source_remove(self._reg_id)
self._reg_id = None
if self._files is not None:
diff --git a/pym/_emerge/JobStatusDisplay.py b/pym/_emerge/JobStatusDisplay.py
index 5b9b2216f..9f6f09be0 100644
--- a/pym/_emerge/JobStatusDisplay.py
+++ b/pym/_emerge/JobStatusDisplay.py
@@ -1,6 +1,8 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
import formatter
import io
import sys
@@ -9,7 +11,6 @@ import time
import portage
from portage import os
from portage import _encodings
-from portage import _unicode_decode
from portage import _unicode_encode
from portage.output import xtermTitle
@@ -121,7 +122,8 @@ class JobStatusDisplay(object):
term_codes = {}
for k, capname in self._termcap_name_map.items():
- code = tigetstr(capname)
+ # Use _native_string for PyPy compat (bug #470258).
+ code = tigetstr(portage._native_string(capname))
if code is None:
code = self._default_term_codes[capname]
term_codes[k] = code
@@ -233,10 +235,10 @@ class JobStatusDisplay(object):
def _display_status(self):
# Don't use len(self._completed_tasks) here since that also
# can include uninstall tasks.
- curval_str = str(self.curval)
- maxval_str = str(self.maxval)
- running_str = str(self.running)
- failed_str = str(self.failed)
+ curval_str = "%s" % (self.curval,)
+ maxval_str = "%s" % (self.maxval,)
+ running_str = "%s" % (self.running,)
+ failed_str = "%s" % (self.failed,)
load_avg_str = self._load_avg_str()
color_output = io.StringIO()
@@ -248,36 +250,36 @@ class JobStatusDisplay(object):
f = formatter.AbstractFormatter(style_writer)
number_style = "INFORM"
- f.add_literal_data(_unicode_decode("Jobs: "))
+ f.add_literal_data("Jobs: ")
f.push_style(number_style)
- f.add_literal_data(_unicode_decode(curval_str))
+ f.add_literal_data(curval_str)
f.pop_style()
- f.add_literal_data(_unicode_decode(" of "))
+ f.add_literal_data(" of ")
f.push_style(number_style)
- f.add_literal_data(_unicode_decode(maxval_str))
+ f.add_literal_data(maxval_str)
f.pop_style()
- f.add_literal_data(_unicode_decode(" complete"))
+ f.add_literal_data(" complete")
if self.running:
- f.add_literal_data(_unicode_decode(", "))
+ f.add_literal_data(", ")
f.push_style(number_style)
- f.add_literal_data(_unicode_decode(running_str))
+ f.add_literal_data(running_str)
f.pop_style()
- f.add_literal_data(_unicode_decode(" running"))
+ f.add_literal_data(" running")
if self.failed:
- f.add_literal_data(_unicode_decode(", "))
+ f.add_literal_data(", ")
f.push_style(number_style)
- f.add_literal_data(_unicode_decode(failed_str))
+ f.add_literal_data(failed_str)
f.pop_style()
- f.add_literal_data(_unicode_decode(" failed"))
+ f.add_literal_data(" failed")
padding = self._jobs_column_width - len(plain_output.getvalue())
if padding > 0:
- f.add_literal_data(padding * _unicode_decode(" "))
+ f.add_literal_data(padding * " ")
- f.add_literal_data(_unicode_decode("Load avg: "))
- f.add_literal_data(_unicode_decode(load_avg_str))
+ f.add_literal_data("Load avg: ")
+ f.add_literal_data(load_avg_str)
# Truncate to fit width, to avoid making the terminal scroll if the
# line overflows (happens when the load average is large).
diff --git a/pym/_emerge/MergeListItem.py b/pym/_emerge/MergeListItem.py
index 8086c689a..938f8014a 100644
--- a/pym/_emerge/MergeListItem.py
+++ b/pym/_emerge/MergeListItem.py
@@ -1,7 +1,8 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage import os
+from portage.dep import _repo_separator
from portage.output import colorize
from _emerge.AsynchronousTask import AsynchronousTask
@@ -32,7 +33,7 @@ class MergeListItem(CompositeTask):
if pkg.installed:
# uninstall, executed by self.merge()
self.returncode = os.EX_OK
- self.wait()
+ self._async_wait()
return
args_set = self.args_set
@@ -47,7 +48,9 @@ class MergeListItem(CompositeTask):
action_desc = "Emerging"
preposition = "for"
+ pkg_color = "PKG_MERGE"
if pkg.type_name == "binary":
+ pkg_color = "PKG_BINARY_MERGE"
action_desc += " binary"
if build_opts.fetchonly:
@@ -57,16 +60,7 @@ class MergeListItem(CompositeTask):
(action_desc,
colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
- colorize("GOOD", pkg.cpv))
-
- portdb = pkg.root_config.trees["porttree"].dbapi
- portdir_repo_name = portdb.getRepositoryName(portdb.porttree_root)
- if portdir_repo_name:
- pkg_repo_name = pkg.repo
- if pkg_repo_name != portdir_repo_name:
- if pkg_repo_name == pkg.UNKNOWN_REPO:
- pkg_repo_name = "unknown repo"
- msg += " from %s" % pkg_repo_name
+ colorize(pkg_color, pkg.cpv + _repo_separator + pkg.repo))
if pkg.root_config.settings["ROOT"] != "/":
msg += " %s %s" % (preposition, pkg.root)
diff --git a/pym/_emerge/MetadataRegen.py b/pym/_emerge/MetadataRegen.py
index e82015fd1..d92b6a06e 100644
--- a/pym/_emerge/MetadataRegen.py
+++ b/pym/_emerge/MetadataRegen.py
@@ -1,18 +1,20 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import portage
from portage import os
from portage.dep import _repo_separator
from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
-from _emerge.PollScheduler import PollScheduler
+from portage.cache.cache_errors import CacheError
+from portage.util._async.AsyncScheduler import AsyncScheduler
-class MetadataRegen(PollScheduler):
+class MetadataRegen(AsyncScheduler):
def __init__(self, portdb, cp_iter=None, consumer=None,
- max_jobs=None, max_load=None):
- PollScheduler.__init__(self, main=True)
+ write_auxdb=True, **kwargs):
+ AsyncScheduler.__init__(self, **kwargs)
self._portdb = portdb
+ self._write_auxdb = write_auxdb
self._global_cleanse = False
if cp_iter is None:
cp_iter = self._iter_every_cp()
@@ -22,34 +24,21 @@ class MetadataRegen(PollScheduler):
self._cp_iter = cp_iter
self._consumer = consumer
- if max_jobs is None:
- max_jobs = 1
-
- self._max_jobs = max_jobs
- self._max_load = max_load
-
self._valid_pkgs = set()
self._cp_set = set()
self._process_iter = self._iter_metadata_processes()
- self.returncode = os.EX_OK
- self._error_count = 0
self._running_tasks = set()
- self._remaining_tasks = True
- def _terminate_tasks(self):
- for task in list(self._running_tasks):
- task.cancel()
+ def _next_task(self):
+ return next(self._process_iter)
def _iter_every_cp(self):
- portage.writemsg_stdout("Listing available packages...\n")
- every_cp = self._portdb.cp_all()
- portage.writemsg_stdout("Regenerating cache entries...\n")
- every_cp.sort(reverse=True)
- try:
- while not self._terminated_tasks:
- yield every_cp.pop()
- except IndexError:
- pass
+ # List categories individually, in order to start yielding quicker,
+ # and in order to reduce latency in case of a signal interrupt.
+ cp_all = self._portdb.cp_all
+ for category in sorted(self._portdb.categories):
+ for cp in cp_all(categories=(category,)):
+ yield cp
def _iter_metadata_processes(self):
portdb = self._portdb
@@ -57,8 +46,9 @@ class MetadataRegen(PollScheduler):
cp_set = self._cp_set
consumer = self._consumer
+ portage.writemsg_stdout("Regenerating cache entries...\n")
for cp in self._cp_iter:
- if self._terminated_tasks:
+ if self._terminated.is_set():
break
cp_set.add(cp)
portage.writemsg_stdout("Processing %s\n" % cp)
@@ -68,7 +58,7 @@ class MetadataRegen(PollScheduler):
repo = portdb.repositories.get_repo_for_location(mytree)
cpv_list = portdb.cp_list(cp, mytree=[repo.location])
for cpv in cpv_list:
- if self._terminated_tasks:
+ if self._terminated.is_set():
break
valid_pkgs.add(cpv)
ebuild_path, repo_path = portdb.findname2(cpv, myrepo=repo.name)
@@ -84,22 +74,21 @@ class MetadataRegen(PollScheduler):
yield EbuildMetadataPhase(cpv=cpv,
ebuild_hash=ebuild_hash,
portdb=portdb, repo_path=repo_path,
- settings=portdb.doebuild_settings)
+ settings=portdb.doebuild_settings,
+ write_auxdb=self._write_auxdb)
- def _keep_scheduling(self):
- return self._remaining_tasks and not self._terminated_tasks
+ def _wait(self):
- def run(self):
+ AsyncScheduler._wait(self)
portdb = self._portdb
- from portage.cache.cache_errors import CacheError
dead_nodes = {}
- self._main_loop()
-
+ self._termination_check()
if self._terminated_tasks:
- self.returncode = 1
- return
+ portdb.flush_cache()
+ self.returncode = self._cancelled_returncode
+ return self.returncode
if self._global_cleanse:
for mytree in portdb.porttrees:
@@ -142,29 +131,12 @@ class MetadataRegen(PollScheduler):
except (KeyError, CacheError):
pass
- def _schedule_tasks(self):
- if self._terminated_tasks:
- return
-
- while self._can_add_job():
- try:
- metadata_process = next(self._process_iter)
- except StopIteration:
- self._remaining_tasks = False
- return
-
- self._jobs += 1
- self._running_tasks.add(metadata_process)
- metadata_process.scheduler = self.sched_iface
- metadata_process.addExitListener(self._metadata_exit)
- metadata_process.start()
-
- def _metadata_exit(self, metadata_process):
- self._jobs -= 1
- self._running_tasks.discard(metadata_process)
+ portdb.flush_cache()
+ return self.returncode
+
+ def _task_exit(self, metadata_process):
+
if metadata_process.returncode != os.EX_OK:
- self.returncode = 1
- self._error_count += 1
self._valid_pkgs.discard(metadata_process.cpv)
if not self._terminated_tasks:
portage.writemsg("Error processing %s, continuing...\n" % \
@@ -179,5 +151,4 @@ class MetadataRegen(PollScheduler):
metadata_process.ebuild_hash,
metadata_process.eapi_supported)
- self._schedule()
-
+ AsyncScheduler._task_exit(self, metadata_process)
diff --git a/pym/_emerge/MiscFunctionsProcess.py b/pym/_emerge/MiscFunctionsProcess.py
index afa44fb2a..bada79d86 100644
--- a/pym/_emerge/MiscFunctionsProcess.py
+++ b/pym/_emerge/MiscFunctionsProcess.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.AbstractEbuildProcess import AbstractEbuildProcess
@@ -29,6 +29,10 @@ class MiscFunctionsProcess(AbstractEbuildProcess):
AbstractEbuildProcess._start(self)
def _spawn(self, args, **kwargs):
+
+ if self._dummy_pipe_fd is not None:
+ self.settings["PORTAGE_PIPE_FD"] = str(self._dummy_pipe_fd)
+
# Temporarily unset EBUILD_PHASE so that bashrc code doesn't
# think this is a real phase.
phase_backup = self.settings.pop("EBUILD_PHASE", None)
@@ -37,3 +41,4 @@ class MiscFunctionsProcess(AbstractEbuildProcess):
finally:
if phase_backup is not None:
self.settings["EBUILD_PHASE"] = phase_backup
+ self.settings.pop("PORTAGE_PIPE_FD", None)
diff --git a/pym/_emerge/Package.py b/pym/_emerge/Package.py
index 14d069449..a09f73c59 100644
--- a/pym/_emerge/Package.py
+++ b/pym/_emerge/Package.py
@@ -1,8 +1,12 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
import sys
from itertools import chain
+import warnings
+
import portage
from portage import _encodings, _unicode_decode, _unicode_encode
from portage.cache.mappings import slot_dict_class
@@ -10,67 +14,82 @@ from portage.const import EBUILD_PHASES
from portage.dep import Atom, check_required_use, use_reduce, \
paren_enclose, _slot_separator, _repo_separator
from portage.versions import _pkg_str, _unknown_repo
-from portage.eapi import _get_eapi_attrs
+from portage.eapi import _get_eapi_attrs, eapi_has_use_aliases
from portage.exception import InvalidDependString
+from portage.localization import _
from _emerge.Task import Task
if sys.hexversion >= 0x3000000:
basestring = str
long = int
+ _unicode = str
+else:
+ _unicode = unicode
class Package(Task):
__hash__ = Task.__hash__
__slots__ = ("built", "cpv", "depth",
- "installed", "metadata", "onlydeps", "operation",
+ "installed", "onlydeps", "operation",
"root_config", "type_name",
"category", "counter", "cp", "cpv_split",
"inherited", "iuse", "mtime",
- "pf", "root", "slot", "slot_abi", "slot_atom", "version") + \
- ("_invalid", "_raw_metadata", "_masks", "_use",
+ "pf", "root", "slot", "sub_slot", "slot_atom", "version") + \
+ ("_invalid", "_masks", "_metadata", "_raw_metadata", "_use",
"_validated_atoms", "_visible")
metadata_keys = [
"BUILD_TIME", "CHOST", "COUNTER", "DEPEND", "EAPI",
- "INHERITED", "IUSE", "KEYWORDS",
+ "HDEPEND", "INHERITED", "IUSE", "KEYWORDS",
"LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
"repository", "PROPERTIES", "RESTRICT", "SLOT", "USE",
"_mtime_", "DEFINED_PHASES", "REQUIRED_USE"]
- _dep_keys = ('DEPEND', 'PDEPEND', 'RDEPEND',)
+ _dep_keys = ('DEPEND', 'HDEPEND', 'PDEPEND', 'RDEPEND')
+ _buildtime_keys = ('DEPEND', 'HDEPEND')
+ _runtime_keys = ('PDEPEND', 'RDEPEND')
_use_conditional_misc_keys = ('LICENSE', 'PROPERTIES', 'RESTRICT')
UNKNOWN_REPO = _unknown_repo
def __init__(self, **kwargs):
+ metadata = _PackageMetadataWrapperBase(kwargs.pop('metadata'))
Task.__init__(self, **kwargs)
# the SlotObject constructor assigns self.root_config from keyword args
# and is an instance of a '_emerge.RootConfig.RootConfig class
self.root = self.root_config.root
- self._raw_metadata = _PackageMetadataWrapperBase(self.metadata)
- self.metadata = _PackageMetadataWrapper(self, self._raw_metadata)
+ self._raw_metadata = metadata
+ self._metadata = _PackageMetadataWrapper(self, metadata)
if not self.built:
- self.metadata['CHOST'] = self.root_config.settings.get('CHOST', '')
- eapi_attrs = _get_eapi_attrs(self.metadata["EAPI"])
- self.cpv = _pkg_str(self.cpv, slot=self.metadata["SLOT"],
- repo=self.metadata.get('repository', ''),
- eapi=self.metadata["EAPI"])
+ self._metadata['CHOST'] = self.root_config.settings.get('CHOST', '')
+ eapi_attrs = _get_eapi_attrs(self.eapi)
+ self.cpv = _pkg_str(self.cpv, metadata=self._metadata,
+ settings=self.root_config.settings)
if hasattr(self.cpv, 'slot_invalid'):
self._invalid_metadata('SLOT.invalid',
- "SLOT: invalid value: '%s'" % self.metadata["SLOT"])
+ "SLOT: invalid value: '%s'" % self._metadata["SLOT"])
+ self.cpv_split = self.cpv.cpv_split
+ self.category, self.pf = portage.catsplit(self.cpv)
self.cp = self.cpv.cp
+ self.version = self.cpv.version
self.slot = self.cpv.slot
- self.slot_abi = self.cpv.slot_abi
+ self.sub_slot = self.cpv.sub_slot
+ self.slot_atom = Atom("%s%s%s" % (self.cp, _slot_separator, self.slot))
# sync metadata with validated repo (may be UNKNOWN_REPO)
- self.metadata['repository'] = self.cpv.repo
+ self._metadata['repository'] = self.cpv.repo
+
+ if eapi_attrs.iuse_effective:
+ implicit_match = self.root_config.settings._iuse_effective_match
+ else:
+ implicit_match = self.root_config.settings._iuse_implicit_match
+ usealiases = self.root_config.settings._use_manager.getUseAliases(self)
+ self.iuse = self._iuse(self, self._metadata["IUSE"].split(), implicit_match,
+ usealiases, self.eapi)
+
if (self.iuse.enabled or self.iuse.disabled) and \
not eapi_attrs.iuse_defaults:
if not self.installed:
self._invalid_metadata('EAPI.incompatible',
"IUSE contains defaults, but EAPI doesn't allow them")
- self.slot_atom = Atom("%s%s%s" % (self.cp, _slot_separator, self.slot))
- self.category, self.pf = portage.catsplit(self.cpv)
- self.cpv_split = self.cpv.cpv_split
- self.version = self.cpv.version
if self.inherited is None:
self.inherited = frozenset()
@@ -87,6 +106,37 @@ class Package(Task):
type_name=self.type_name)
self._hash_value = hash(self._hash_key)
+ @property
+ def eapi(self):
+ return self._metadata["EAPI"]
+
+ @property
+ def build_time(self):
+ if not self.built:
+ raise AttributeError('build_time')
+ try:
+ return long(self._metadata['BUILD_TIME'])
+ except (KeyError, ValueError):
+ return 0
+
+ @property
+ def defined_phases(self):
+ return self._metadata.defined_phases
+
+ @property
+ def properties(self):
+ return self._metadata.properties
+
+ @property
+ def restrict(self):
+ return self._metadata.restrict
+
+ @property
+ def metadata(self):
+ warnings.warn("_emerge.Package.Package.metadata is deprecated",
+ DeprecationWarning, stacklevel=3)
+ return self._metadata
+
# These are calculated on-demand, so that they are calculated
# after FakeVartree applies its metadata tweaks.
@property
@@ -120,6 +170,10 @@ class Package(Task):
self._validate_deps()
return self._validated_atoms
+ @property
+ def stable(self):
+ return self.cpv.stable
+
@classmethod
def _gen_hash_key(cls, cpv=None, installed=None, onlydeps=None,
operation=None, repo_name=None, root_config=None,
@@ -154,15 +208,15 @@ class Package(Task):
# So overwrite the repo_key with type_name.
repo_key = type_name
- return (type_name, root, cpv, operation, repo_key)
+ return (type_name, root, _unicode(cpv), operation, repo_key)
def _validate_deps(self):
"""
Validate deps. This does not trigger USE calculation since that
is expensive for ebuilds and therefore we want to avoid doing
- in unnecessarily (like for masked packages).
+ it unnecessarily (like for masked packages).
"""
- eapi = self.metadata['EAPI']
+ eapi = self.eapi
dep_eapi = eapi
dep_valid_flag = self.iuse.is_valid_flag
if self.installed:
@@ -175,31 +229,42 @@ class Package(Task):
validated_atoms = []
for k in self._dep_keys:
- v = self.metadata.get(k)
+ v = self._metadata.get(k)
if not v:
continue
try:
- validated_atoms.extend(use_reduce(v, eapi=dep_eapi,
+ atoms = use_reduce(v, eapi=dep_eapi,
matchall=True, is_valid_flag=dep_valid_flag,
- token_class=Atom, flat=True))
+ token_class=Atom, flat=True)
except InvalidDependString as e:
self._metadata_exception(k, e)
+ else:
+ validated_atoms.extend(atoms)
+ if not self.built:
+ for atom in atoms:
+ if not isinstance(atom, Atom):
+ continue
+ if atom.slot_operator_built:
+ e = InvalidDependString(
+ _("Improper context for slot-operator "
+ "\"built\" atom syntax: %s") %
+ (atom.unevaluated_atom,))
+ self._metadata_exception(k, e)
self._validated_atoms = tuple(set(atom for atom in
validated_atoms if isinstance(atom, Atom)))
k = 'PROVIDE'
- v = self.metadata.get(k)
+ v = self._metadata.get(k)
if v:
try:
use_reduce(v, eapi=dep_eapi, matchall=True,
is_valid_flag=dep_valid_flag, token_class=Atom)
except InvalidDependString as e:
- self._invalid_metadata("PROVIDE.syntax",
- _unicode_decode("%s: %s") % (k, e))
+ self._invalid_metadata("PROVIDE.syntax", "%s: %s" % (k, e))
for k in self._use_conditional_misc_keys:
- v = self.metadata.get(k)
+ v = self._metadata.get(k)
if not v:
continue
try:
@@ -209,24 +274,20 @@ class Package(Task):
self._metadata_exception(k, e)
k = 'REQUIRED_USE'
- v = self.metadata.get(k)
- if v:
+ v = self._metadata.get(k)
+ if v and not self.built:
if not _get_eapi_attrs(eapi).required_use:
self._invalid_metadata('EAPI.incompatible',
"REQUIRED_USE set, but EAPI='%s' doesn't allow it" % eapi)
else:
try:
check_required_use(v, (),
- self.iuse.is_valid_flag)
+ self.iuse.is_valid_flag, eapi=eapi)
except InvalidDependString as e:
- # Force unicode format string for python-2.x safety,
- # ensuring that PortageException.__unicode__() is used
- # when necessary.
- self._invalid_metadata(k + ".syntax",
- _unicode_decode("%s: %s") % (k, e))
+ self._invalid_metadata(k + ".syntax", "%s: %s" % (k, e))
k = 'SRC_URI'
- v = self.metadata.get(k)
+ v = self._metadata.get(k)
if v:
try:
use_reduce(v, is_src_uri=True, eapi=eapi, matchall=True,
@@ -248,36 +309,45 @@ class Package(Task):
if self.invalid is not False:
masks['invalid'] = self.invalid
- if not settings._accept_chost(self.cpv, self.metadata):
- masks['CHOST'] = self.metadata['CHOST']
+ if not settings._accept_chost(self.cpv, self._metadata):
+ masks['CHOST'] = self._metadata['CHOST']
- eapi = self.metadata["EAPI"]
+ eapi = self.eapi
if not portage.eapi_is_supported(eapi):
masks['EAPI.unsupported'] = eapi
if portage._eapi_is_deprecated(eapi):
masks['EAPI.deprecated'] = eapi
missing_keywords = settings._getMissingKeywords(
- self.cpv, self.metadata)
+ self.cpv, self._metadata)
if missing_keywords:
masks['KEYWORDS'] = missing_keywords
try:
missing_properties = settings._getMissingProperties(
- self.cpv, self.metadata)
+ self.cpv, self._metadata)
if missing_properties:
masks['PROPERTIES'] = missing_properties
except InvalidDependString:
# already recorded as 'invalid'
pass
- mask_atom = settings._getMaskAtom(self.cpv, self.metadata)
+ try:
+ missing_restricts = settings._getMissingRestrict(
+ self.cpv, self._metadata)
+ if missing_restricts:
+ masks['RESTRICT'] = missing_restricts
+ except InvalidDependString:
+ # already recorded as 'invalid'
+ pass
+
+ mask_atom = settings._getMaskAtom(self.cpv, self._metadata)
if mask_atom is not None:
masks['package.mask'] = mask_atom
try:
missing_licenses = settings._getMissingLicenses(
- self.cpv, self.metadata)
+ self.cpv, self._metadata)
if missing_licenses:
masks['LICENSE'] = missing_licenses
except InvalidDependString:
@@ -303,7 +373,8 @@ class Package(Task):
'CHOST' in masks or \
'EAPI.deprecated' in masks or \
'KEYWORDS' in masks or \
- 'PROPERTIES' in masks):
+ 'PROPERTIES' in masks or \
+ 'RESTRICT' in masks):
return False
if 'package.mask' in masks or \
@@ -316,7 +387,7 @@ class Package(Task):
"""returns None, 'missing', or 'unstable'."""
missing = self.root_config.settings._getRawMissingKeywords(
- self.cpv, self.metadata)
+ self.cpv, self._metadata)
if not missing:
return None
@@ -337,17 +408,22 @@ class Package(Task):
"""returns a bool if the cpv is in the list of
expanded pmaskdict[cp] available ebuilds"""
pmask = self.root_config.settings._getRawMaskAtom(
- self.cpv, self.metadata)
+ self.cpv, self._metadata)
return pmask is not None
def _metadata_exception(self, k, e):
+ if k.endswith('DEPEND'):
+ qacat = 'dependency.syntax'
+ else:
+ qacat = k + ".syntax"
+
# For unicode safety with python-2.x we need to avoid
# using the string format operator with a non-unicode
# format string, since that will result in the
# PortageException.__str__() method being invoked,
# followed by unsafe decoding that may result in a
- # UnicodeDecodeError. Therefore, use _unicode_decode()
+ # UnicodeDecodeError. Therefore, use unicode_literals
# to ensure that format strings are unicode, so that
# PortageException.__unicode__() is used when necessary
# in python-2.x.
@@ -359,19 +435,17 @@ class Package(Task):
continue
categorized_error = True
self._invalid_metadata(error.category,
- _unicode_decode("%s: %s") % (k, error))
+ "%s: %s" % (k, error))
if not categorized_error:
- self._invalid_metadata(k + ".syntax",
- _unicode_decode("%s: %s") % (k, e))
+ self._invalid_metadata(qacat,"%s: %s" % (k, e))
else:
# For installed packages, show the path of the file
# containing the invalid metadata, since the user may
# want to fix the deps by hand.
vardb = self.root_config.trees['vartree'].dbapi
path = vardb.getpath(self.cpv, filename=k)
- self._invalid_metadata(k + ".syntax",
- _unicode_decode("%s: %s in '%s'") % (k, e, path))
+ self._invalid_metadata(qacat, "%s: %s in '%s'" % (k, e, path))
def _invalid_metadata(self, msg_type, msg):
if self._invalid is None:
@@ -394,7 +468,8 @@ class Package(Task):
cpv_color = "PKG_NOMERGE"
s = "(%s, %s" \
- % (portage.output.colorize(cpv_color, self.cpv + _repo_separator + self.repo) , self.type_name)
+ % (portage.output.colorize(cpv_color, self.cpv + _slot_separator + \
+ self.slot + "/" + self.sub_slot + _repo_separator + self.repo) , self.type_name)
if self.type_name == "installed":
if self.root_config.settings['ROOT'] != "/":
@@ -425,13 +500,16 @@ class Package(Task):
# Share identical frozenset instances when available.
_frozensets = {}
- def __init__(self, pkg, use_str):
+ def __init__(self, pkg, enabled_flags):
self._pkg = pkg
self._expand = None
self._expand_hidden = None
self._force = None
self._mask = None
- self.enabled = frozenset(use_str.split())
+ if eapi_has_use_aliases(pkg.eapi):
+ for enabled_flag in enabled_flags:
+ enabled_flags.extend(pkg.iuse.alias_mapping.get(enabled_flag, []))
+ self.enabled = frozenset(enabled_flags)
if pkg.built:
# Use IUSE to validate USE settings for built packages,
# in case the package manager that built this package
@@ -481,7 +559,7 @@ class Package(Task):
@property
def repo(self):
- return self.metadata['repository']
+ return self._metadata['repository']
@property
def repo_priority(self):
@@ -493,7 +571,7 @@ class Package(Task):
@property
def use(self):
if self._use is None:
- self.metadata._init_use()
+ self._init_use()
return self._use
def _get_pkgsettings(self):
@@ -502,28 +580,81 @@ class Package(Task):
pkgsettings.setcpv(self)
return pkgsettings
+ def _init_use(self):
+ if self.built:
+ # Use IUSE to validate USE settings for built packages,
+ # in case the package manager that built this package
+ # failed to do that for some reason (or in case of
+ # data corruption). The enabled flags must be consistent
+ # with implicit IUSE, in order to avoid potential
+ # inconsistencies in USE dep matching (see bug #453400).
+ use_str = self._metadata['USE']
+ is_valid_flag = self.iuse.is_valid_flag
+ enabled_flags = [x for x in use_str.split() if is_valid_flag(x)]
+ use_str = " ".join(enabled_flags)
+ self._use = self._use_class(
+ self, enabled_flags)
+ else:
+ try:
+ use_str = _PackageMetadataWrapperBase.__getitem__(
+ self._metadata, 'USE')
+ except KeyError:
+ use_str = None
+ calculated_use = False
+ if not use_str:
+ use_str = self._get_pkgsettings()["PORTAGE_USE"]
+ calculated_use = True
+ self._use = self._use_class(
+ self, use_str.split())
+ # Initialize these now, since USE access has just triggered
+ # setcpv, and we want to cache the result of the force/mask
+ # calculations that were done.
+ if calculated_use:
+ self._use._init_force_mask()
+
+ _PackageMetadataWrapperBase.__setitem__(
+ self._metadata, 'USE', use_str)
+
+ return use_str
+
class _iuse(object):
- __slots__ = ("__weakref__", "all", "enabled", "disabled",
- "tokens") + ("_iuse_implicit_match",)
+ __slots__ = ("__weakref__", "_iuse_implicit_match", "_pkg", "alias_mapping",
+ "all", "all_aliases", "enabled", "disabled", "tokens")
- def __init__(self, tokens, iuse_implicit_match):
+ def __init__(self, pkg, tokens, iuse_implicit_match, aliases, eapi):
+ self._pkg = pkg
self.tokens = tuple(tokens)
self._iuse_implicit_match = iuse_implicit_match
enabled = []
disabled = []
other = []
+ enabled_aliases = []
+ disabled_aliases = []
+ other_aliases = []
+ aliases_supported = eapi_has_use_aliases(eapi)
+ self.alias_mapping = {}
for x in tokens:
prefix = x[:1]
if prefix == "+":
enabled.append(x[1:])
+ if aliases_supported:
+ self.alias_mapping[x[1:]] = aliases.get(x[1:], [])
+ enabled_aliases.extend(self.alias_mapping[x[1:]])
elif prefix == "-":
disabled.append(x[1:])
+ if aliases_supported:
+ self.alias_mapping[x[1:]] = aliases.get(x[1:], [])
+ disabled_aliases.extend(self.alias_mapping[x[1:]])
else:
other.append(x)
- self.enabled = frozenset(enabled)
- self.disabled = frozenset(disabled)
+ if aliases_supported:
+ self.alias_mapping[x] = aliases.get(x, [])
+ other_aliases.extend(self.alias_mapping[x])
+ self.enabled = frozenset(chain(enabled, enabled_aliases))
+ self.disabled = frozenset(chain(disabled, disabled_aliases))
self.all = frozenset(chain(enabled, disabled, other))
+ self.all_aliases = frozenset(chain(enabled_aliases, disabled_aliases, other_aliases))
def is_valid_flag(self, flags):
"""
@@ -534,7 +665,7 @@ class Package(Task):
flags = [flags]
for flag in flags:
- if not flag in self.all and \
+ if not flag in self.all and not flag in self.all_aliases and \
not self._iuse_implicit_match(flag):
return False
return True
@@ -547,11 +678,28 @@ class Package(Task):
flags = [flags]
missing_iuse = []
for flag in flags:
- if not flag in self.all and \
+ if not flag in self.all and not flag in self.all_aliases and \
not self._iuse_implicit_match(flag):
missing_iuse.append(flag)
return missing_iuse
+ def get_real_flag(self, flag):
+ """
+ Returns the flag's name within the scope of this package
+ (accounting for aliases), or None if the flag is unknown.
+ """
+ if flag in self.all:
+ return flag
+ elif flag in self.all_aliases:
+ for k, v in self.alias_mapping.items():
+ if flag in v:
+ return k
+
+ if self._iuse_implicit_match(flag):
+ return flag
+
+ return None
+
def __len__(self):
return 4
@@ -604,7 +752,7 @@ class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
__slots__ = ("_pkg",)
_wrapped_keys = frozenset(
- ["COUNTER", "INHERITED", "IUSE", "USE", "_mtime_"])
+ ["COUNTER", "INHERITED", "USE", "_mtime_"])
_use_conditional_keys = frozenset(
['LICENSE', 'PROPERTIES', 'PROVIDE', 'RESTRICT',])
@@ -617,31 +765,6 @@ class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
self.update(metadata)
- def _init_use(self):
- if self._pkg.built:
- use_str = self['USE']
- self._pkg._use = self._pkg._use_class(
- self._pkg, use_str)
- else:
- try:
- use_str = _PackageMetadataWrapperBase.__getitem__(self, 'USE')
- except KeyError:
- use_str = None
- calculated_use = False
- if not use_str:
- use_str = self._pkg._get_pkgsettings()["PORTAGE_USE"]
- calculated_use = True
- _PackageMetadataWrapperBase.__setitem__(self, 'USE', use_str)
- self._pkg._use = self._pkg._use_class(
- self._pkg, use_str)
- # Initialize these now, since USE access has just triggered
- # setcpv, and we want to cache the result of the force/mask
- # calculations that were done.
- if calculated_use:
- self._pkg._use._init_force_mask()
-
- return use_str
-
def __getitem__(self, k):
v = _PackageMetadataWrapperBase.__getitem__(self, k)
if k in self._use_conditional_keys:
@@ -659,7 +782,7 @@ class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
elif k == 'USE' and not self._pkg.built:
if not v:
# This is lazy because it's expensive.
- v = self._init_use()
+ v = self._pkg._init_use()
return v
@@ -673,10 +796,6 @@ class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
v = frozenset(v.split())
self._pkg.inherited = v
- def _set_iuse(self, k, v):
- self._pkg.iuse = self._pkg._iuse(
- v.split(), self._pkg.root_config.settings._iuse_implicit_match)
-
def _set_counter(self, k, v):
if isinstance(v, basestring):
try:
diff --git a/pym/_emerge/PackageMerge.py b/pym/_emerge/PackageMerge.py
index eed34e99b..ef298ca48 100644
--- a/pym/_emerge/PackageMerge.py
+++ b/pym/_emerge/PackageMerge.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.CompositeTask import CompositeTask
@@ -11,6 +11,9 @@ class PackageMerge(CompositeTask):
self.scheduler = self.merge.scheduler
pkg = self.merge.pkg
pkg_count = self.merge.pkg_count
+ pkg_color = "PKG_MERGE"
+ if pkg.type_name == "binary":
+ pkg_color = "PKG_BINARY_MERGE"
if pkg.installed:
action_desc = "Uninstalling"
@@ -26,7 +29,7 @@ class PackageMerge(CompositeTask):
msg = "%s %s%s" % \
(action_desc,
counter_str,
- colorize("GOOD", pkg.cpv))
+ colorize(pkg_color, pkg.cpv))
if pkg.root_config.settings["ROOT"] != "/":
msg += " %s %s" % (preposition, pkg.root)
diff --git a/pym/_emerge/PackageUninstall.py b/pym/_emerge/PackageUninstall.py
index eb6a947a5..16c2f749b 100644
--- a/pym/_emerge/PackageUninstall.py
+++ b/pym/_emerge/PackageUninstall.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import logging
@@ -33,7 +33,7 @@ class PackageUninstall(CompositeTask):
# Apparently the package got uninstalled
# already, so we can safely return early.
self.returncode = os.EX_OK
- self.wait()
+ self._async_wait()
return
self.settings.setcpv(self.pkg)
@@ -67,7 +67,7 @@ class PackageUninstall(CompositeTask):
if retval != os.EX_OK:
self._builddir_lock.unlock()
self.returncode = retval
- self.wait()
+ self._async_wait()
return
self._writemsg_level(">>> Unmerging %s...\n" % (self.pkg.cpv,),
diff --git a/pym/_emerge/PackageVirtualDbapi.py b/pym/_emerge/PackageVirtualDbapi.py
index 0f7be44b1..56a5576e3 100644
--- a/pym/_emerge/PackageVirtualDbapi.py
+++ b/pym/_emerge/PackageVirtualDbapi.py
@@ -140,10 +140,10 @@ class PackageVirtualDbapi(dbapi):
self._clear_cache()
def aux_get(self, cpv, wants, myrepo=None):
- metadata = self._cpv_map[cpv].metadata
+ metadata = self._cpv_map[cpv]._metadata
return [metadata.get(x, "") for x in wants]
def aux_update(self, cpv, values):
- self._cpv_map[cpv].metadata.update(values)
+ self._cpv_map[cpv]._metadata.update(values)
self._clear_cache()
diff --git a/pym/_emerge/PipeReader.py b/pym/_emerge/PipeReader.py
index 90febdf44..a8392c329 100644
--- a/pym/_emerge/PipeReader.py
+++ b/pym/_emerge/PipeReader.py
@@ -1,9 +1,11 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+import fcntl
+import sys
+
from portage import os
from _emerge.AbstractPollTask import AbstractPollTask
-import fcntl
class PipeReader(AbstractPollTask):
@@ -27,18 +29,28 @@ class PipeReader(AbstractPollTask):
output_handler = self._output_handler
for f in self.input_files.values():
- fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
- fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
- self._reg_ids.add(self.scheduler.register(f.fileno(),
+ fd = isinstance(f, int) and f or f.fileno()
+ fcntl.fcntl(fd, fcntl.F_SETFL,
+ fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(fd, fcntl.F_SETFD,
+ fcntl.fcntl(fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ self._reg_ids.add(self.scheduler.io_add_watch(fd,
self._registered_events, output_handler))
self._registered = True
- def isAlive(self):
- return self._registered
-
def _cancel(self):
+ self._unregister()
if self.returncode is None:
- self.returncode = 1
+ self.returncode = self._cancelled_returncode
def _wait(self):
if self.returncode is not None:
@@ -102,11 +114,14 @@ class PipeReader(AbstractPollTask):
if self._reg_ids is not None:
for reg_id in self._reg_ids:
- self.scheduler.unregister(reg_id)
+ self.scheduler.source_remove(reg_id)
self._reg_ids = None
if self.input_files is not None:
for f in self.input_files.values():
- f.close()
+ if isinstance(f, int):
+ os.close(f)
+ else:
+ f.close()
self.input_files = None
diff --git a/pym/_emerge/PollScheduler.py b/pym/_emerge/PollScheduler.py
index 5103e31d6..b118ac157 100644
--- a/pym/_emerge/PollScheduler.py
+++ b/pym/_emerge/PollScheduler.py
@@ -1,18 +1,13 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-import gzip
-import errno
-
try:
import threading
except ImportError:
import dummy_threading as threading
-from portage import _encodings
-from portage import _unicode_encode
-from portage.util import writemsg_level
-from portage.util.SlotObject import SlotObject
+import portage
+from portage.util._async.SchedulerInterface import SchedulerInterface
from portage.util._eventloop.EventLoop import EventLoop
from portage.util._eventloop.global_event_loop import global_event_loop
@@ -20,14 +15,10 @@ from _emerge.getloadavg import getloadavg
class PollScheduler(object):
- class _sched_iface_class(SlotObject):
- __slots__ = ("IO_ERR", "IO_HUP", "IO_IN", "IO_NVAL", "IO_OUT",
- "IO_PRI", "child_watch_add",
- "idle_add", "io_add_watch", "iteration",
- "output", "register", "run",
- "source_remove", "timeout_add", "unregister")
+ # max time between loadavg checks (milliseconds)
+ _loadavg_latency = None
- def __init__(self, main=False):
+ def __init__(self, main=False, event_loop=None):
"""
@param main: If True then use global_event_loop(), otherwise use
a local EventLoop instance (default is False, for safe use in
@@ -38,29 +29,20 @@ class PollScheduler(object):
self._terminated_tasks = False
self._max_jobs = 1
self._max_load = None
- self._jobs = 0
self._scheduling = False
self._background = False
- if main:
+ if event_loop is not None:
+ self._event_loop = event_loop
+ elif main:
self._event_loop = global_event_loop()
else:
- self._event_loop = EventLoop(main=False)
- self.sched_iface = self._sched_iface_class(
- IO_ERR=self._event_loop.IO_ERR,
- IO_HUP=self._event_loop.IO_HUP,
- IO_IN=self._event_loop.IO_IN,
- IO_NVAL=self._event_loop.IO_NVAL,
- IO_OUT=self._event_loop.IO_OUT,
- IO_PRI=self._event_loop.IO_PRI,
- child_watch_add=self._event_loop.child_watch_add,
- idle_add=self._event_loop.idle_add,
- io_add_watch=self._event_loop.io_add_watch,
- iteration=self._event_loop.iteration,
- output=self._task_output,
- register=self._event_loop.io_add_watch,
- source_remove=self._event_loop.source_remove,
- timeout_add=self._event_loop.timeout_add,
- unregister=self._event_loop.source_remove)
+ self._event_loop = (portage._internal_caller and
+ global_event_loop() or EventLoop(main=False))
+ self._sched_iface = SchedulerInterface(self._event_loop,
+ is_background=self._is_background)
+
+ def _is_background(self):
+ return self._background
def terminate(self):
"""
@@ -135,48 +117,23 @@ class PollScheduler(object):
Calls _schedule_tasks() and automatically returns early from
any recursive calls to this method that the _schedule_tasks()
call might trigger. This makes _schedule() safe to call from
- inside exit listeners.
+ inside exit listeners. This method always returns True, so that
+ it may be scheduled continuously via EventLoop.timeout_add().
"""
if self._scheduling:
- return False
+ return True
self._scheduling = True
try:
self._schedule_tasks()
finally:
self._scheduling = False
-
- def _main_loop(self):
- term_check_id = self.sched_iface.idle_add(self._termination_check)
- try:
- # Populate initial event sources. Unless we're scheduling
- # based on load average, we only need to do this once
- # here, since it can be called during the loop from within
- # event handlers.
- self._schedule()
- max_load = self._max_load
-
- # Loop while there are jobs to be scheduled.
- while self._keep_scheduling():
- self.sched_iface.iteration()
-
- if max_load is not None:
- # We have to schedule periodically, in case the load
- # average has changed since the last call.
- self._schedule()
-
- # Clean shutdown of previously scheduled jobs. In the
- # case of termination, this allows for basic cleanup
- # such as flushing of buffered output to logs.
- while self._is_work_scheduled():
- self.sched_iface.iteration()
- finally:
- self.sched_iface.source_remove(term_check_id)
+ return True
def _is_work_scheduled(self):
return bool(self._running_job_count())
def _running_job_count(self):
- return self._jobs
+ raise NotImplementedError(self)
def _can_add_job(self):
if self._terminated_tasks:
@@ -201,47 +158,3 @@ class PollScheduler(object):
return False
return True
-
- def _task_output(self, msg, log_path=None, background=None,
- level=0, noiselevel=-1):
- """
- Output msg to stdout if not self._background. If log_path
- is not None then append msg to the log (appends with
- compression if the filename extension of log_path
- corresponds to a supported compression type).
- """
-
- if background is None:
- # If the task does not have a local background value
- # (like for parallel-fetch), then use the global value.
- background = self._background
-
- msg_shown = False
- if not background:
- writemsg_level(msg, level=level, noiselevel=noiselevel)
- msg_shown = True
-
- if log_path is not None:
- try:
- f = open(_unicode_encode(log_path,
- encoding=_encodings['fs'], errors='strict'),
- mode='ab')
- f_real = f
- except IOError as e:
- if e.errno not in (errno.ENOENT, errno.ESTALE):
- raise
- if not msg_shown:
- writemsg_level(msg, level=level, noiselevel=noiselevel)
- else:
-
- if log_path.endswith('.gz'):
- # NOTE: The empty filename argument prevents us from
- # triggering a bug in python3 which causes GzipFile
- # to raise AttributeError if fileobj.name is bytes
- # instead of unicode.
- f = gzip.GzipFile(filename='', mode='ab', fileobj=f)
-
- f.write(_unicode_encode(msg))
- f.close()
- if f_real is not f:
- f_real.close()
diff --git a/pym/_emerge/QueueScheduler.py b/pym/_emerge/QueueScheduler.py
deleted file mode 100644
index 206087c7a..000000000
--- a/pym/_emerge/QueueScheduler.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# Copyright 1999-2012 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from _emerge.PollScheduler import PollScheduler
-
-class QueueScheduler(PollScheduler):
-
- """
- Add instances of SequentialTaskQueue and then call run(). The
- run() method returns when no tasks remain.
- """
-
- def __init__(self, main=True, max_jobs=None, max_load=None):
- PollScheduler.__init__(self, main=main)
-
- if max_jobs is None:
- max_jobs = 1
-
- self._max_jobs = max_jobs
- self._max_load = max_load
-
- self._queues = []
- self._schedule_listeners = []
-
- def add(self, q):
- self._queues.append(q)
-
- def remove(self, q):
- self._queues.remove(q)
-
- def clear(self):
- for q in self._queues:
- q.clear()
-
- def run(self, timeout=None):
-
- timeout_callback = None
- if timeout is not None:
- def timeout_callback():
- timeout_callback.timed_out = True
- return False
- timeout_callback.timed_out = False
- timeout_callback.timeout_id = self.sched_iface.timeout_add(
- timeout, timeout_callback)
-
- term_check_id = self.sched_iface.idle_add(self._termination_check)
- try:
- while not (timeout_callback is not None and
- timeout_callback.timed_out):
- # We don't have any callbacks to trigger _schedule(),
- # so we have to call it explicitly here.
- self._schedule()
- if self._keep_scheduling():
- self.sched_iface.iteration()
- else:
- break
-
- while self._is_work_scheduled() and \
- not (timeout_callback is not None and
- timeout_callback.timed_out):
- self.sched_iface.iteration()
- finally:
- self.sched_iface.source_remove(term_check_id)
- if timeout_callback is not None:
- self.sched_iface.unregister(timeout_callback.timeout_id)
-
- def _schedule_tasks(self):
- """
- @rtype: bool
- @return: True if there may be remaining tasks to schedule,
- False otherwise.
- """
- if self._terminated_tasks:
- return
-
- while self._can_add_job():
- n = self._max_jobs - self._running_job_count()
- if n < 1:
- break
-
- if not self._start_next_job(n):
- return
-
- def _keep_scheduling(self):
- return not self._terminated_tasks and any(self._queues)
-
- def _running_job_count(self):
- job_count = 0
- for q in self._queues:
- job_count += len(q.running_tasks)
- self._jobs = job_count
- return job_count
-
- def _start_next_job(self, n=1):
- started_count = 0
- for q in self._queues:
- initial_job_count = len(q.running_tasks)
- q.schedule()
- final_job_count = len(q.running_tasks)
- if final_job_count > initial_job_count:
- started_count += (final_job_count - initial_job_count)
- if started_count >= n:
- break
- return started_count
-
diff --git a/pym/_emerge/RootConfig.py b/pym/_emerge/RootConfig.py
index bb0d7682a..3648d01d7 100644
--- a/pym/_emerge/RootConfig.py
+++ b/pym/_emerge/RootConfig.py
@@ -1,10 +1,10 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
class RootConfig(object):
"""This is used internally by depgraph to track information about a
particular $ROOT."""
- __slots__ = ("root", "setconfig", "sets", "settings", "trees")
+ __slots__ = ("mtimedb", "root", "setconfig", "sets", "settings", "trees")
pkg_tree_map = {
"ebuild" : "porttree",
@@ -31,4 +31,11 @@ class RootConfig(object):
Shallow copy all attributes from another instance.
"""
for k in self.__slots__:
- setattr(self, k, getattr(other, k))
+ try:
+ setattr(self, k, getattr(other, k))
+ except AttributeError:
+ # mtimedb is currently not a required attribute
+ try:
+ delattr(self, k)
+ except AttributeError:
+ pass
diff --git a/pym/_emerge/Scheduler.py b/pym/_emerge/Scheduler.py
index 0b72a4cfc..dd268f708 100644
--- a/pym/_emerge/Scheduler.py
+++ b/pym/_emerge/Scheduler.py
@@ -1,7 +1,7 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-from __future__ import print_function
+from __future__ import print_function, unicode_literals
from collections import deque
import gc
@@ -18,7 +18,7 @@ import zlib
import portage
from portage import os
from portage import _encodings
-from portage import _unicode_decode, _unicode_encode
+from portage import _unicode_encode
from portage.cache.mappings import slot_dict_class
from portage.elog.messages import eerror
from portage.localization import _
@@ -28,6 +28,8 @@ from portage._sets import SETPREFIX
from portage._sets.base import InternalPackageSet
from portage.util import ensure_dirs, writemsg, writemsg_level
from portage.util.SlotObject import SlotObject
+from portage.util._async.SchedulerInterface import SchedulerInterface
+from portage.util._eventloop.EventLoop import EventLoop
from portage.package.ebuild.digestcheck import digestcheck
from portage.package.ebuild.digestgen import digestgen
from portage.package.ebuild.doebuild import (_check_temp_dir,
@@ -50,6 +52,7 @@ from _emerge.EbuildFetcher import EbuildFetcher
from _emerge.EbuildPhase import EbuildPhase
from _emerge.emergelog import emergelog
from _emerge.FakeVartree import FakeVartree
+from _emerge.getloadavg import getloadavg
from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo
from _emerge.JobStatusDisplay import JobStatusDisplay
@@ -64,6 +67,9 @@ if sys.hexversion >= 0x3000000:
class Scheduler(PollScheduler):
+ # max time between loadavg checks (milliseconds)
+ _loadavg_latency = 30000
+
# max time between display status updates (milliseconds)
_max_display_latency = 3000
@@ -79,7 +85,7 @@ class Scheduler(PollScheduler):
_opts_no_self_update = frozenset(["--buildpkgonly",
"--fetchonly", "--fetch-all-uri", "--pretend"])
- class _iface_class(PollScheduler._sched_iface_class):
+ class _iface_class(SchedulerInterface):
__slots__ = ("fetch",
"scheduleSetup", "scheduleUnpack")
@@ -135,8 +141,7 @@ class Scheduler(PollScheduler):
portage.exception.PortageException.__init__(self, value)
def __init__(self, settings, trees, mtimedb, myopts,
- spinner, mergelist=None, favorites=None, graph_config=None,
- uninstall_only=False):
+ spinner, mergelist=None, favorites=None, graph_config=None):
PollScheduler.__init__(self, main=True)
if mergelist is not None:
@@ -152,7 +157,6 @@ class Scheduler(PollScheduler):
self._spinner = spinner
self._mtimedb = mtimedb
self._favorites = favorites
- self._uninstall_only = uninstall_only
self._args_set = InternalPackageSet(favorites, allow_repo=True)
self._build_opts = self._build_opts_class()
@@ -161,6 +165,8 @@ class Scheduler(PollScheduler):
self._build_opts.buildpkg_exclude = InternalPackageSet( \
initial_atoms=" ".join(myopts.get("--buildpkg-exclude", [])).split(), \
allow_wildcard=True, allow_repo=True)
+ if "mirror" in self.settings.features:
+ self._build_opts.fetch_all_uri = True
self._binpkg_opts = self._binpkg_opts_class()
for k in self._binpkg_opts.__slots__:
@@ -217,14 +223,15 @@ class Scheduler(PollScheduler):
fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
schedule=self._schedule_fetch)
self._sched_iface = self._iface_class(
+ self._event_loop,
+ is_background=self._is_background,
fetch=fetch_iface,
scheduleSetup=self._schedule_setup,
- scheduleUnpack=self._schedule_unpack,
- **dict((k, getattr(self.sched_iface, k))
- for k in self.sched_iface.__slots__))
+ scheduleUnpack=self._schedule_unpack)
self._prefetchers = weakref.WeakValueDictionary()
self._pkg_queue = []
+ self._jobs = 0
self._running_tasks = {}
self._completed_tasks = set()
@@ -243,10 +250,15 @@ class Scheduler(PollScheduler):
# The load average takes some time to respond when new
# jobs are added, so we need to limit the rate of adding
# new jobs.
- self._job_delay_max = 10
- self._job_delay_factor = 1.0
- self._job_delay_exp = 1.5
+ self._job_delay_max = 5
self._previous_job_start_time = None
+ self._job_delay_timeout_id = None
+
+ # The load average takes some time to respond when after
+ # a SIGSTOP/SIGCONT cycle, so delay scheduling for some
+ # time after SIGCONT is received.
+ self._sigcont_delay = 5
+ self._sigcont_time = None
# This is used to memoize the _choose_pkg() result when
# no packages can be chosen until one of the existing
@@ -300,15 +312,10 @@ class Scheduler(PollScheduler):
if not portage.dep.match_from_list(
portage.const.PORTAGE_PACKAGE_ATOM, [x]):
continue
- if self._running_portage is None or \
- self._running_portage.cpv != x.cpv or \
- '9999' in x.cpv or \
- 'git' in x.inherited or \
- 'git-2' in x.inherited:
- rval = _check_temp_dir(self.settings)
- if rval != os.EX_OK:
- return rval
- _prepare_self_update(self.settings)
+ rval = _check_temp_dir(self.settings)
+ if rval != os.EX_OK:
+ return rval
+ _prepare_self_update(self.settings)
break
return os.EX_OK
@@ -328,15 +335,13 @@ class Scheduler(PollScheduler):
self._set_graph_config(graph_config)
self._blocker_db = {}
dynamic_deps = self.myopts.get("--dynamic-deps", "y") != "n"
- ignore_built_slot_abi_deps = self.myopts.get(
- "--ignore-built-slot-abi-deps", "n") == "y"
+ ignore_built_slot_operator_deps = self.myopts.get(
+ "--ignore-built-slot-operator-deps", "n") == "y"
for root in self.trees:
- if self._uninstall_only:
- continue
if graph_config is None:
fake_vartree = FakeVartree(self.trees[root]["root_config"],
pkg_cache=self._pkg_cache, dynamic_deps=dynamic_deps,
- ignore_built_slot_abi_deps=ignore_built_slot_abi_deps)
+ ignore_built_slot_operator_deps=ignore_built_slot_operator_deps)
fake_vartree.sync()
else:
fake_vartree = graph_config.trees[root]['vartree']
@@ -413,7 +418,7 @@ class Scheduler(PollScheduler):
if not (isinstance(task, Package) and \
task.operation == "merge"):
continue
- if 'interactive' in task.metadata.properties:
+ if 'interactive' in task.properties:
interactive_tasks.append(task)
return interactive_tasks
@@ -658,10 +663,11 @@ class Scheduler(PollScheduler):
if value and value.strip():
continue
msg = _("%(var)s is not set... "
- "Are you missing the '%(configroot)setc/make.profile' symlink? "
+ "Are you missing the '%(configroot)s%(profile_path)s' symlink? "
"Is the symlink correct? "
"Is your portage tree complete?") % \
- {"var": var, "configroot": settings["PORTAGE_CONFIGROOT"]}
+ {"var": var, "configroot": settings["PORTAGE_CONFIGROOT"],
+ "profile_path": portage.const.PROFILE_PATH}
out = portage.output.EOutput()
for line in textwrap.wrap(msg, 70):
@@ -721,7 +727,6 @@ class Scheduler(PollScheduler):
return
if self._parallel_fetch:
- self._status_msg("Starting parallel fetch")
prefetchers = self._prefetchers
@@ -753,7 +758,8 @@ class Scheduler(PollScheduler):
prefetcher = EbuildFetcher(background=True,
config_pool=self._ConfigPool(pkg.root,
self._allocate_config, self._deallocate_config),
- fetchonly=1, logfile=self._fetch_log,
+ fetchonly=1, fetchall=self._build_opts.fetch_all_uri,
+ logfile=self._fetch_log,
pkg=pkg, prefetch=True, scheduler=self._sched_iface)
elif pkg.type_name == "binary" and \
@@ -774,10 +780,10 @@ class Scheduler(PollScheduler):
failures = 0
- # Use a local PollScheduler instance here, since we don't
+ # Use a local EventLoop instance here, since we don't
# want tasks here to trigger the usual Scheduler callbacks
# that handle job scheduling and status display.
- sched_iface = PollScheduler().sched_iface
+ sched_iface = SchedulerInterface(EventLoop(main=False))
for x in self._mergelist:
if not isinstance(x, Package):
@@ -786,10 +792,10 @@ class Scheduler(PollScheduler):
if x.operation == "uninstall":
continue
- if x.metadata["EAPI"] in ("0", "1", "2", "3"):
+ if x.eapi in ("0", "1", "2", "3"):
continue
- if "pretend" not in x.metadata.defined_phases:
+ if "pretend" not in x.defined_phases:
continue
out_str =">>> Running pre-merge checks for " + colorize("INFORM", x.cpv) + "\n"
@@ -808,7 +814,7 @@ class Scheduler(PollScheduler):
build_dir_path = os.path.join(
os.path.realpath(settings["PORTAGE_TMPDIR"]),
"portage", x.category, x.pf)
- existing_buildir = os.path.isdir(build_dir_path)
+ existing_builddir = os.path.isdir(build_dir_path)
settings["PORTAGE_BUILDDIR"] = build_dir_path
build_dir = EbuildBuildDir(scheduler=sched_iface,
settings=settings)
@@ -819,7 +825,7 @@ class Scheduler(PollScheduler):
# Clean up the existing build dir, in case pkg_pretend
# checks for available space (bug #390711).
- if existing_buildir:
+ if existing_builddir:
if x.built:
tree = "bintree"
infloc = os.path.join(build_dir_path, "build-info")
@@ -908,13 +914,18 @@ class Scheduler(PollScheduler):
failures += 1
portage.elog.elog_process(x.cpv, settings)
finally:
- if current_task is not None and current_task.isAlive():
- current_task.cancel()
- current_task.wait()
- clean_phase = EbuildPhase(background=False,
- phase='clean', scheduler=sched_iface, settings=settings)
- clean_phase.start()
- clean_phase.wait()
+
+ if current_task is not None:
+ if current_task.isAlive():
+ current_task.cancel()
+ current_task.wait()
+ if current_task.returncode == os.EX_OK:
+ clean_phase = EbuildPhase(background=False,
+ phase='clean', scheduler=sched_iface,
+ settings=settings)
+ clean_phase.start()
+ clean_phase.wait()
+
build_dir.unlock()
if failures:
@@ -1004,6 +1015,8 @@ class Scheduler(PollScheduler):
earlier_sigint_handler = signal.signal(signal.SIGINT, sighandler)
earlier_sigterm_handler = signal.signal(signal.SIGTERM, sighandler)
+ earlier_sigcont_handler = \
+ signal.signal(signal.SIGCONT, self._sigcont_handler)
try:
rval = self._merge()
@@ -1017,6 +1030,10 @@ class Scheduler(PollScheduler):
signal.signal(signal.SIGTERM, earlier_sigterm_handler)
else:
signal.signal(signal.SIGTERM, signal.SIG_DFL)
+ if earlier_sigcont_handler is not None:
+ signal.signal(signal.SIGCONT, earlier_sigcont_handler)
+ else:
+ signal.signal(signal.SIGCONT, signal.SIG_DFL)
if received_signal:
sys.exit(received_signal[0])
@@ -1063,7 +1080,8 @@ class Scheduler(PollScheduler):
printer = portage.output.EOutput()
background = self._background
failure_log_shown = False
- if background and len(self._failed_pkgs_all) == 1:
+ if background and len(self._failed_pkgs_all) == 1 and \
+ self.myopts.get('--quiet-fail', 'n') != 'y':
# If only one package failed then just show it's
# whole log for easy viewing.
failed_pkg = self._failed_pkgs_all[-1]
@@ -1142,9 +1160,9 @@ class Scheduler(PollScheduler):
printer.eerror(line)
printer.eerror("")
for failed_pkg in self._failed_pkgs_all:
- # Use _unicode_decode() to force unicode format string so
+ # Use unicode_literals to force unicode format string so
# that Package.__unicode__() is called in python2.
- msg = _unicode_decode(" %s") % (failed_pkg.pkg,)
+ msg = " %s" % (failed_pkg.pkg,)
log_path = self._locate_failure_log(failed_pkg)
if log_path is not None:
msg += ", Log file:"
@@ -1341,6 +1359,38 @@ class Scheduler(PollScheduler):
blocker_db = self._blocker_db[pkg.root]
blocker_db.discardBlocker(pkg)
+ def _main_loop(self):
+ term_check_id = self._event_loop.idle_add(self._termination_check)
+ loadavg_check_id = None
+ if self._max_load is not None and \
+ self._loadavg_latency is not None and \
+ (self._max_jobs is True or self._max_jobs > 1):
+ # We have to schedule periodically, in case the load
+ # average has changed since the last call.
+ loadavg_check_id = self._event_loop.timeout_add(
+ self._loadavg_latency, self._schedule)
+
+ try:
+ # Populate initial event sources. Unless we're scheduling
+ # based on load average, we only need to do this once
+ # here, since it can be called during the loop from within
+ # event handlers.
+ self._schedule()
+
+ # Loop while there are jobs to be scheduled.
+ while self._keep_scheduling():
+ self._event_loop.iteration()
+
+ # Clean shutdown of previously scheduled jobs. In the
+ # case of termination, this allows for basic cleanup
+ # such as flushing of buffered output to logs.
+ while self._is_work_scheduled():
+ self._event_loop.iteration()
+ finally:
+ self._event_loop.source_remove(term_check_id)
+ if loadavg_check_id is not None:
+ self._event_loop.source_remove(loadavg_check_id)
+
def _merge(self):
if self._opts_no_background.intersection(self.myopts):
@@ -1351,8 +1401,10 @@ class Scheduler(PollScheduler):
failed_pkgs = self._failed_pkgs
portage.locks._quiet = self._background
portage.elog.add_listener(self._elog_listener)
- display_timeout_id = self.sched_iface.timeout_add(
- self._max_display_latency, self._status_display.display)
+ display_timeout_id = None
+ if self._status_display._isatty and not self._status_display.quiet:
+ display_timeout_id = self._event_loop.timeout_add(
+ self._max_display_latency, self._status_display.display)
rval = os.EX_OK
try:
@@ -1361,7 +1413,8 @@ class Scheduler(PollScheduler):
self._main_loop_cleanup()
portage.locks._quiet = False
portage.elog.remove_listener(self._elog_listener)
- self.sched_iface.source_remove(display_timeout_id)
+ if display_timeout_id is not None:
+ self._event_loop.source_remove(display_timeout_id)
if failed_pkgs:
rval = failed_pkgs[-1].returncode
@@ -1493,12 +1546,15 @@ class Scheduler(PollScheduler):
self._config_pool[settings['EROOT']].append(settings)
def _keep_scheduling(self):
- return bool(not self._terminated_tasks and self._pkg_queue and \
+ return bool(not self._terminated.is_set() and self._pkg_queue and \
not (self._failed_pkgs and not self._build_opts.fetchonly))
def _is_work_scheduled(self):
return bool(self._running_tasks)
+ def _running_job_count(self):
+ return self._jobs
+
def _schedule_tasks(self):
while True:
@@ -1539,6 +1595,9 @@ class Scheduler(PollScheduler):
not self._task_queues.merge)):
break
+ def _sigcont_handler(self, signum, frame):
+ self._sigcont_time = time.time()
+
def _job_delay(self):
"""
@rtype: bool
@@ -1549,14 +1608,53 @@ class Scheduler(PollScheduler):
current_time = time.time()
- delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
+ if self._sigcont_time is not None:
+
+ elapsed_seconds = current_time - self._sigcont_time
+ # elapsed_seconds < 0 means the system clock has been adjusted
+ if elapsed_seconds > 0 and \
+ elapsed_seconds < self._sigcont_delay:
+
+ if self._job_delay_timeout_id is not None:
+ self._event_loop.source_remove(
+ self._job_delay_timeout_id)
+
+ self._job_delay_timeout_id = self._event_loop.timeout_add(
+ 1000 * (self._sigcont_delay - elapsed_seconds),
+ self._schedule_once)
+ return True
+
+ # Only set this to None after the delay has expired,
+ # since this method may be called again before the
+ # delay has expired.
+ self._sigcont_time = None
+
+ try:
+ avg1, avg5, avg15 = getloadavg()
+ except OSError:
+ return False
+
+ delay = self._job_delay_max * avg1 / self._max_load
if delay > self._job_delay_max:
delay = self._job_delay_max
- if (current_time - self._previous_job_start_time) < delay:
+ elapsed_seconds = current_time - self._previous_job_start_time
+ # elapsed_seconds < 0 means the system clock has been adjusted
+ if elapsed_seconds > 0 and elapsed_seconds < delay:
+
+ if self._job_delay_timeout_id is not None:
+ self._event_loop.source_remove(
+ self._job_delay_timeout_id)
+
+ self._job_delay_timeout_id = self._event_loop.timeout_add(
+ 1000 * (delay - elapsed_seconds), self._schedule_once)
return True
return False
+ def _schedule_once(self):
+ self._schedule()
+ return False
+
def _schedule_tasks_imp(self):
"""
@rtype: bool
@@ -1738,7 +1836,7 @@ class Scheduler(PollScheduler):
# scope
e = exc
mydepgraph = e.depgraph
- dropped_tasks = set()
+ dropped_tasks = {}
if e is not None:
def unsatisfied_resume_dep_msg():
@@ -1775,11 +1873,7 @@ class Scheduler(PollScheduler):
return False
if success and self._show_list():
- mylist = mydepgraph.altlist()
- if mylist:
- if "--tree" in self.myopts:
- mylist.reverse()
- mydepgraph.display(mylist, favorites=self._favorites)
+ mydepgraph.display(mydepgraph.altlist(), favorites=self._favorites)
if not success:
self._post_mod_echo_msgs.append(mydepgraph.display_problems)
@@ -1788,7 +1882,7 @@ class Scheduler(PollScheduler):
self._init_graph(mydepgraph.schedulerGraph())
msg_width = 75
- for task in dropped_tasks:
+ for task, atoms in dropped_tasks.items():
if not (isinstance(task, Package) and task.operation == "merge"):
continue
pkg = task
@@ -1796,7 +1890,10 @@ class Scheduler(PollScheduler):
" %s" % (pkg.cpv,)
if pkg.root_config.settings["ROOT"] != "/":
msg += " for %s" % (pkg.root,)
- msg += " dropped due to unsatisfied dependency."
+ if not atoms:
+ msg += " dropped because it is masked or unavailable"
+ else:
+ msg += " dropped because it requires %s" % ", ".join(atoms)
for line in textwrap.wrap(msg, msg_width):
eerror(line, phase="other", key=pkg.cpv)
settings = self.pkgsettings[pkg.root]
@@ -1841,11 +1938,21 @@ class Scheduler(PollScheduler):
root_config = pkg.root_config
world_set = root_config.sets["selected"]
world_locked = False
- if hasattr(world_set, "lock"):
- world_set.lock()
- world_locked = True
+ atom = None
+
+ if pkg.operation != "uninstall":
+ # Do this before acquiring the lock, since it queries the
+ # portdbapi which can call the global event loop, triggering
+ # a concurrent call to this method or something else that
+ # needs an exclusive (non-reentrant) lock on the world file.
+ atom = create_world_atom(pkg, args_set, root_config)
try:
+
+ if hasattr(world_set, "lock"):
+ world_set.lock()
+ world_locked = True
+
if hasattr(world_set, "load"):
world_set.load() # maybe it's changed on disk
@@ -1857,8 +1964,7 @@ class Scheduler(PollScheduler):
for s in pkg.root_config.setconfig.active:
world_set.remove(SETPREFIX+s)
else:
- atom = create_world_atom(pkg, args_set, root_config)
- if atom:
+ if atom is not None:
if hasattr(world_set, "add"):
self._status_msg(('Recording %s in "world" ' + \
'favorites file...') % atom)
diff --git a/pym/_emerge/SpawnProcess.py b/pym/_emerge/SpawnProcess.py
index 9fbc96472..15d3dc5cf 100644
--- a/pym/_emerge/SpawnProcess.py
+++ b/pym/_emerge/SpawnProcess.py
@@ -1,17 +1,23 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 2008-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-from _emerge.SubProcess import SubProcess
+try:
+ import fcntl
+except ImportError:
+ # http://bugs.jython.org/issue1074
+ fcntl = None
+
+import errno
+import logging
+import signal
import sys
-from portage.cache.mappings import slot_dict_class
+
+from _emerge.SubProcess import SubProcess
import portage
-from portage import _encodings
-from portage import _unicode_encode
from portage import os
from portage.const import BASH_BINARY
-import fcntl
-import errno
-import gzip
+from portage.util import writemsg_level
+from portage.util._async.PipeLogger import PipeLogger
class SpawnProcess(SubProcess):
@@ -23,31 +29,27 @@ class SpawnProcess(SubProcess):
_spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
"uid", "gid", "groups", "umask", "logfile",
- "path_lookup", "pre_exec")
+ "path_lookup", "pre_exec", "close_fds", "cgroup",
+ "unshare_ipc", "unshare_net")
__slots__ = ("args",) + \
- _spawn_kwarg_names + ("_log_file_real", "_selinux_type",)
-
- _file_names = ("log", "process", "stdout")
- _files_dict = slot_dict_class(_file_names, prefix="")
+ _spawn_kwarg_names + ("_pipe_logger", "_selinux_type",)
def _start(self):
if self.fd_pipes is None:
self.fd_pipes = {}
+ else:
+ self.fd_pipes = self.fd_pipes.copy()
fd_pipes = self.fd_pipes
- self._files = self._files_dict()
- files = self._files
-
master_fd, slave_fd = self._pipe(fd_pipes)
- fcntl.fcntl(master_fd, fcntl.F_SETFL,
- fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
- files.process = master_fd
- logfile = None
- if self._can_log(slave_fd):
- logfile = self.logfile
+ can_log = self._can_log(slave_fd)
+ if can_log:
+ log_file_path = self.logfile
+ else:
+ log_file_path = None
null_input = None
if not self.background or 0 in fd_pipes:
@@ -62,48 +64,34 @@ class SpawnProcess(SubProcess):
null_input = os.open('/dev/null', os.O_RDWR)
fd_pipes[0] = null_input
- fd_pipes.setdefault(0, sys.stdin.fileno())
- fd_pipes.setdefault(1, sys.stdout.fileno())
- fd_pipes.setdefault(2, sys.stderr.fileno())
+ fd_pipes.setdefault(0, portage._get_stdin().fileno())
+ fd_pipes.setdefault(1, sys.__stdout__.fileno())
+ fd_pipes.setdefault(2, sys.__stderr__.fileno())
# flush any pending output
+ stdout_filenos = (sys.__stdout__.fileno(), sys.__stderr__.fileno())
for fd in fd_pipes.values():
- if fd == sys.stdout.fileno():
- sys.stdout.flush()
- if fd == sys.stderr.fileno():
- sys.stderr.flush()
+ if fd in stdout_filenos:
+ sys.__stdout__.flush()
+ sys.__stderr__.flush()
+ break
- if logfile is not None:
+ fd_pipes_orig = fd_pipes.copy()
- fd_pipes_orig = fd_pipes.copy()
+ if log_file_path is not None or self.background:
fd_pipes[1] = slave_fd
fd_pipes[2] = slave_fd
- files.log = open(_unicode_encode(logfile,
- encoding=_encodings['fs'], errors='strict'), mode='ab')
- if logfile.endswith('.gz'):
- self._log_file_real = files.log
- files.log = gzip.GzipFile(filename='', mode='ab',
- fileobj=files.log)
-
- portage.util.apply_secpass_permissions(logfile,
- uid=portage.portage_uid, gid=portage.portage_gid,
- mode=0o660)
-
- if not self.background:
- files.stdout = os.dup(fd_pipes_orig[1])
-
- output_handler = self._output_handler
-
else:
-
- # Create a dummy pipe so the scheduler can monitor
- # the process from inside a poll() loop.
- fd_pipes[self._dummy_pipe_fd] = slave_fd
- if self.background:
- fd_pipes[1] = slave_fd
- fd_pipes[2] = slave_fd
- output_handler = self._dummy_handler
+ # Create a dummy pipe that PipeLogger uses to efficiently
+ # monitor for process exit by listening for the EOF event.
+ # Re-use of the allocated fd number for the key in fd_pipes
+ # guarantees that the keys will not collide for similarly
+ # allocated pipes which are used by callers such as
+ # FileDigester and MergeProcess. See the _setup_pipes
+ # docstring for more benefits of this allocation approach.
+ self._dummy_pipe_fd = slave_fd
+ fd_pipes[slave_fd] = slave_fd
kwargs = {}
for k in self._spawn_kwarg_names:
@@ -115,10 +103,6 @@ class SpawnProcess(SubProcess):
kwargs["returnpid"] = True
kwargs.pop("logfile", None)
- self._reg_id = self.scheduler.register(files.process,
- self._registered_events, output_handler)
- self._registered = True
-
retval = self._spawn(self.args, **kwargs)
os.close(slave_fd)
@@ -129,11 +113,32 @@ class SpawnProcess(SubProcess):
# spawn failed
self._unregister()
self._set_returncode((self.pid, retval))
- self.wait()
+ self._async_wait()
return
self.pid = retval[0]
- portage.process.spawned_pids.remove(self.pid)
+
+ stdout_fd = None
+ if can_log and not self.background:
+ stdout_fd = os.dup(fd_pipes_orig[1])
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000 and fcntl is not None:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(stdout_fd, fcntl.F_SETFD,
+ fcntl.fcntl(stdout_fd,
+ fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ self._pipe_logger = PipeLogger(background=self.background,
+ scheduler=self.scheduler, input_fd=master_fd,
+ log_file_path=log_file_path,
+ stdout_fd=stdout_fd)
+ self._pipe_logger.addExitListener(self._pipe_logger_exit)
+ self._pipe_logger.start()
+ self._registered = True
def _can_log(self, slave_fd):
return True
@@ -157,92 +162,56 @@ class SpawnProcess(SubProcess):
return spawn_func(args, **kwargs)
- def _output_handler(self, fd, event):
-
- files = self._files
- while True:
- buf = self._read_buf(fd, event)
-
- if buf is None:
- # not a POLLIN event, EAGAIN, etc...
- break
-
- if not buf:
- # EOF
- self._unregister()
- self.wait()
- break
-
- else:
- if not self.background:
- write_successful = False
- failures = 0
- while True:
- try:
- if not write_successful:
- os.write(files.stdout, buf)
- write_successful = True
- break
- except OSError as e:
- if e.errno != errno.EAGAIN:
- raise
- del e
- failures += 1
- if failures > 50:
- # Avoid a potentially infinite loop. In
- # most cases, the failure count is zero
- # and it's unlikely to exceed 1.
- raise
-
- # This means that a subprocess has put an inherited
- # stdio file descriptor (typically stdin) into
- # O_NONBLOCK mode. This is not acceptable (see bug
- # #264435), so revert it. We need to use a loop
- # here since there's a race condition due to
- # parallel processes being able to change the
- # flags on the inherited file descriptor.
- # TODO: When possible, avoid having child processes
- # inherit stdio file descriptors from portage
- # (maybe it can't be avoided with
- # PROPERTIES=interactive).
- fcntl.fcntl(files.stdout, fcntl.F_SETFL,
- fcntl.fcntl(files.stdout,
- fcntl.F_GETFL) ^ os.O_NONBLOCK)
-
- files.log.write(buf)
- files.log.flush()
-
- self._unregister_if_appropriate(event)
-
- return True
-
- def _dummy_handler(self, fd, event):
- """
- This method is mainly interested in detecting EOF, since
- the only purpose of the pipe is to allow the scheduler to
- monitor the process from inside a poll() loop.
- """
-
- while True:
- buf = self._read_buf(fd, event)
-
- if buf is None:
- # not a POLLIN event, EAGAIN, etc...
- break
-
- if not buf:
- # EOF
- self._unregister()
- self.wait()
- break
-
- self._unregister_if_appropriate(event)
-
- return True
-
- def _unregister(self):
- super(SpawnProcess, self)._unregister()
- if self._log_file_real is not None:
- # Avoid "ResourceWarning: unclosed file" since python 3.2.
- self._log_file_real.close()
- self._log_file_real = None
+ def _pipe_logger_exit(self, pipe_logger):
+ self._pipe_logger = None
+ self._unregister()
+ self.wait()
+
+ def _waitpid_loop(self):
+ SubProcess._waitpid_loop(self)
+
+ pipe_logger = self._pipe_logger
+ if pipe_logger is not None:
+ self._pipe_logger = None
+ pipe_logger.removeExitListener(self._pipe_logger_exit)
+ pipe_logger.cancel()
+ pipe_logger.wait()
+
+ def _set_returncode(self, wait_retval):
+ SubProcess._set_returncode(self, wait_retval)
+
+ if self.cgroup:
+ def get_pids(cgroup):
+ try:
+ with open(os.path.join(cgroup, 'cgroup.procs'), 'r') as f:
+ return [int(p) for p in f.read().split()]
+ except OSError:
+ # cgroup removed already?
+ return []
+
+ def kill_all(pids, sig):
+ for p in pids:
+ try:
+ os.kill(p, sig)
+ except OSError as e:
+ if e.errno == errno.EPERM:
+ # Reported with hardened kernel (bug #358211).
+ writemsg_level(
+ "!!! kill: (%i) - Operation not permitted\n" %
+ (p,), level=logging.ERROR,
+ noiselevel=-1)
+ elif e.errno != errno.ESRCH:
+ raise
+
+ # step 1: kill all orphans
+ pids = get_pids(self.cgroup)
+ if pids:
+ kill_all(pids, signal.SIGKILL)
+
+ # step 2: remove the cgroup
+ try:
+ os.rmdir(self.cgroup)
+ except OSError:
+ # it may be removed already, or busy
+ # we can't do anything good about it
+ pass
diff --git a/pym/_emerge/SubProcess.py b/pym/_emerge/SubProcess.py
index 76b313fc2..13d938297 100644
--- a/pym/_emerge/SubProcess.py
+++ b/pym/_emerge/SubProcess.py
@@ -1,7 +1,10 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+import logging
+
from portage import os
+from portage.util import writemsg_level
from _emerge.AbstractPollTask import AbstractPollTask
import signal
import errno
@@ -9,12 +12,7 @@ import errno
class SubProcess(AbstractPollTask):
__slots__ = ("pid",) + \
- ("_files", "_reg_id")
-
- # A file descriptor is required for the scheduler to monitor changes from
- # inside a poll() loop. When logging is not enabled, create a pipe just to
- # serve this purpose alone.
- _dummy_pipe_fd = 9
+ ("_dummy_pipe_fd", "_files", "_reg_id")
# This is how much time we allow for waitpid to succeed after
# we've sent a kill signal to our subprocess.
@@ -50,7 +48,13 @@ class SubProcess(AbstractPollTask):
try:
os.kill(self.pid, signal.SIGTERM)
except OSError as e:
- if e.errno != errno.ESRCH:
+ if e.errno == errno.EPERM:
+ # Reported with hardened kernel (bug #358211).
+ writemsg_level(
+ "!!! kill: (%i) - Operation not permitted\n" %
+ (self.pid,), level=logging.ERROR,
+ noiselevel=-1)
+ elif e.errno != errno.ESRCH:
raise
def isAlive(self):
@@ -69,7 +73,13 @@ class SubProcess(AbstractPollTask):
try:
os.kill(self.pid, signal.SIGKILL)
except OSError as e:
- if e.errno != errno.ESRCH:
+ if e.errno == errno.EPERM:
+ # Reported with hardened kernel (bug #358211).
+ writemsg_level(
+ "!!! kill: (%i) - Operation not permitted\n" %
+ (self.pid,), level=logging.ERROR,
+ noiselevel=-1)
+ elif e.errno != errno.ESRCH:
raise
del e
self._wait_loop(timeout=self._cancel_timeout)
@@ -116,7 +126,7 @@ class SubProcess(AbstractPollTask):
self._registered = False
if self._reg_id is not None:
- self.scheduler.unregister(self._reg_id)
+ self.scheduler.source_remove(self._reg_id)
self._reg_id = None
if self._files is not None:
diff --git a/pym/_emerge/Task.py b/pym/_emerge/Task.py
index 40f5066c0..250d45802 100644
--- a/pym/_emerge/Task.py
+++ b/pym/_emerge/Task.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage.util.SlotObject import SlotObject
@@ -41,3 +41,10 @@ class Task(SlotObject):
strings.
"""
return "(%s)" % ", ".join(("'%s'" % x for x in self._hash_key))
+
+ def __repr__(self):
+ if self._hash_key is None:
+ # triggered by python-trace
+ return SlotObject.__repr__(self)
+ return "<%s (%s)>" % (self.__class__.__name__,
+ ", ".join(("'%s'" % x for x in self._hash_key)))
diff --git a/pym/_emerge/TaskScheduler.py b/pym/_emerge/TaskScheduler.py
deleted file mode 100644
index 583bfe323..000000000
--- a/pym/_emerge/TaskScheduler.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 1999-2012 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from _emerge.QueueScheduler import QueueScheduler
-from _emerge.SequentialTaskQueue import SequentialTaskQueue
-
-class TaskScheduler(object):
-
- """
- A simple way to handle scheduling of AsynchrousTask instances. Simply
- add tasks and call run(). The run() method returns when no tasks remain.
- """
-
- def __init__(self, main=True, max_jobs=None, max_load=None):
- self._queue = SequentialTaskQueue(max_jobs=max_jobs)
- self._scheduler = QueueScheduler(main=main,
- max_jobs=max_jobs, max_load=max_load)
- self.sched_iface = self._scheduler.sched_iface
- self.run = self._scheduler.run
- self.clear = self._scheduler.clear
- self.wait = self._queue.wait
- self._scheduler.add(self._queue)
-
- def add(self, task):
- self._queue.add(task)
-
diff --git a/pym/_emerge/UnmergeDepPriority.py b/pym/_emerge/UnmergeDepPriority.py
index 43166006f..ec44a67a1 100644
--- a/pym/_emerge/UnmergeDepPriority.py
+++ b/pym/_emerge/UnmergeDepPriority.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.AbstractDepPriority import AbstractDepPriority
@@ -7,15 +7,16 @@ class UnmergeDepPriority(AbstractDepPriority):
"""
Combination of properties Priority Category
- runtime 0 HARD
- runtime_post -1 HARD
- buildtime -2 SOFT
- (none of the above) -2 SOFT
+ runtime_slot_op 0 HARD
+ runtime -1 HARD
+ runtime_post -2 HARD
+ buildtime -3 SOFT
+ (none of the above) -3 SOFT
"""
MAX = 0
- SOFT = -2
- MIN = -2
+ SOFT = -3
+ MIN = -3
def __init__(self, **kwargs):
AbstractDepPriority.__init__(self, **kwargs)
@@ -23,17 +24,21 @@ class UnmergeDepPriority(AbstractDepPriority):
self.optional = True
def __int__(self):
- if self.runtime:
+ if self.runtime_slot_op:
return 0
- if self.runtime_post:
+ if self.runtime:
return -1
- if self.buildtime:
+ if self.runtime_post:
return -2
- return -2
+ if self.buildtime:
+ return -3
+ return -3
def __str__(self):
if self.ignored:
return "ignored"
+ if self.runtime_slot_op:
+ return "hard slot op"
myvalue = self.__int__()
if myvalue > self.SOFT:
return "hard"
diff --git a/pym/_emerge/UseFlagDisplay.py b/pym/_emerge/UseFlagDisplay.py
index 3daca19e1..f46047454 100644
--- a/pym/_emerge/UseFlagDisplay.py
+++ b/pym/_emerge/UseFlagDisplay.py
@@ -1,10 +1,12 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
from itertools import chain
import sys
-from portage import _encodings, _unicode_decode, _unicode_encode
+from portage import _encodings, _unicode_encode
from portage.output import red
from portage.util import cmp_sort_key
from portage.output import blue
@@ -114,9 +116,9 @@ def pkg_use_display(pkg, opts, modified_use=None):
flags.sort(key=UseFlagDisplay.sort_combined)
else:
flags.sort(key=UseFlagDisplay.sort_separated)
- # Use _unicode_decode() to force unicode format string so
+ # Use unicode_literals to force unicode format string so
# that UseFlagDisplay.__unicode__() is called in python2.
flag_displays.append('%s="%s"' % (varname,
- ' '.join(_unicode_decode("%s") % (f,) for f in flags)))
+ ' '.join("%s" % (f,) for f in flags)))
return ' '.join(flag_displays)
diff --git a/pym/_emerge/actions.py b/pym/_emerge/actions.py
index 9a023a84a..2a1354b6b 100644
--- a/pym/_emerge/actions.py
+++ b/pym/_emerge/actions.py
@@ -1,7 +1,7 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-from __future__ import print_function
+from __future__ import print_function, unicode_literals
import errno
import logging
@@ -18,27 +18,35 @@ import sys
import tempfile
import textwrap
import time
+import warnings
from itertools import chain
import portage
portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.dbapi._similar_name_search:similar_name_search',
+ 'portage.debug',
'portage.news:count_unread_news,display_news_notifications',
+ 'portage.util._get_vm_info:get_vm_info',
+ '_emerge.chk_updated_cfg_files:chk_updated_cfg_files',
+ '_emerge.help:help@emerge_help',
+ '_emerge.post_emerge:display_news_notification,post_emerge',
+ '_emerge.stdout_spinner:stdout_spinner',
)
from portage.localization import _
from portage import os
from portage import shutil
-from portage import eapi_is_supported, _unicode_decode
+from portage import eapi_is_supported, _encodings, _unicode_decode
from portage.cache.cache_errors import CacheError
-from portage.const import GLOBAL_CONFIG_PATH
-from portage.const import _ENABLE_DYN_LINK_MAP
+from portage.const import GLOBAL_CONFIG_PATH, VCS_DIRS, _DEPCLEAN_LIB_CHECK_DEFAULT
+from portage.const import SUPPORTED_BINPKG_FORMATS, TIMESTAMP_FORMAT
from portage.dbapi.dep_expand import dep_expand
from portage.dbapi._expand_new_virt import expand_new_virt
from portage.dep import Atom
from portage.eclass_cache import hashed_path
-from portage.exception import InvalidAtom, InvalidData
+from portage.exception import InvalidAtom, InvalidData, ParseError
from portage.output import blue, bold, colorize, create_color_func, darkgreen, \
- red, yellow
+ red, xtermTitle, xtermTitleReset, yellow
good = create_color_func("GOOD")
bad = create_color_func("BAD")
warn = create_color_func("WARN")
@@ -46,9 +54,13 @@ from portage.package.ebuild._ipc.QueryCommand import QueryCommand
from portage.package.ebuild.doebuild import _check_temp_dir
from portage._sets import load_default_config, SETPREFIX
from portage._sets.base import InternalPackageSet
-from portage.util import cmp_sort_key, writemsg, \
+from portage.util import cmp_sort_key, writemsg, varexpand, \
writemsg_level, writemsg_stdout
from portage.util.digraph import digraph
+from portage.util.SlotObject import SlotObject
+from portage.util._async.run_main_scheduler import run_main_scheduler
+from portage.util._async.SchedulerInterface import SchedulerInterface
+from portage.util._eventloop.global_event_loop import global_event_loop
from portage._global_updates import _global_updates
from _emerge.clear_caches import clear_caches
@@ -277,8 +289,14 @@ def action_build(settings, trees, mtimedb,
"dropped due to\n" + \
"!!! masking or unsatisfied dependencies:\n\n",
noiselevel=-1)
- for task in dropped_tasks:
- portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
+ for task, atoms in dropped_tasks.items():
+ if not atoms:
+ writemsg(" %s is masked or unavailable\n" %
+ (task,), noiselevel=-1)
+ else:
+ writemsg(" %s requires %s\n" %
+ (task, ", ".join(atoms)), noiselevel=-1)
+
portage.writemsg("\n", noiselevel=-1)
del dropped_tasks
else:
@@ -309,6 +327,7 @@ def action_build(settings, trees, mtimedb,
mydepgraph.display_problems()
return 1
+ mergecount = None
if "--pretend" not in myopts and \
("--ask" in myopts or "--tree" in myopts or \
"--verbose" in myopts) and \
@@ -320,7 +339,7 @@ def action_build(settings, trees, mtimedb,
return os.EX_OK
favorites = mtimedb["resume"]["favorites"]
retval = mydepgraph.display(
- mydepgraph.altlist(reversed=tree),
+ mydepgraph.altlist(),
favorites=favorites)
mydepgraph.display_problems()
mergelist_shown = True
@@ -329,7 +348,7 @@ def action_build(settings, trees, mtimedb,
prompt="Would you like to resume merging these packages?"
else:
retval = mydepgraph.display(
- mydepgraph.altlist(reversed=("--tree" in myopts)),
+ mydepgraph.altlist(),
favorites=favorites)
mydepgraph.display_problems()
mergelist_shown = True
@@ -340,6 +359,7 @@ def action_build(settings, trees, mtimedb,
if isinstance(x, Package) and x.operation == "merge":
mergecount += 1
+ prompt = None
if mergecount==0:
sets = trees[settings['EROOT']]['root_config'].sets
world_candidates = None
@@ -352,14 +372,11 @@ def action_build(settings, trees, mtimedb,
world_candidates = [x for x in favorites \
if not (x.startswith(SETPREFIX) and \
not sets[x[1:]].world_candidate)]
+
if "selective" in myparams and \
not oneshot and world_candidates:
- print()
- for x in world_candidates:
- print(" %s %s" % (good("*"), x))
- prompt="Would you like to add these packages to your world favorites?"
- elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
- prompt="Nothing to merge; would you like to auto-clean packages?"
+ # Prompt later, inside saveNomergeFavorites.
+ prompt = None
else:
print()
print("Nothing to merge; quitting.")
@@ -370,13 +387,15 @@ def action_build(settings, trees, mtimedb,
else:
prompt="Would you like to merge these packages?"
print()
- if "--ask" in myopts and userquery(prompt, enter_invalid) == "No":
+ if prompt is not None and "--ask" in myopts and \
+ userquery(prompt, enter_invalid) == "No":
print()
print("Quitting.")
print()
return 128 + signal.SIGINT
# Don't ask again (e.g. when auto-cleaning packages after merge)
- myopts.pop("--ask", None)
+ if mergecount != 0:
+ myopts.pop("--ask", None)
if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
if ("--resume" in myopts):
@@ -386,7 +405,7 @@ def action_build(settings, trees, mtimedb,
return os.EX_OK
favorites = mtimedb["resume"]["favorites"]
retval = mydepgraph.display(
- mydepgraph.altlist(reversed=tree),
+ mydepgraph.altlist(),
favorites=favorites)
mydepgraph.display_problems()
mergelist_shown = True
@@ -394,39 +413,14 @@ def action_build(settings, trees, mtimedb,
return retval
else:
retval = mydepgraph.display(
- mydepgraph.altlist(reversed=("--tree" in myopts)),
+ mydepgraph.altlist(),
favorites=favorites)
mydepgraph.display_problems()
mergelist_shown = True
if retval != os.EX_OK:
return retval
- if "--buildpkgonly" in myopts:
- graph_copy = mydepgraph._dynamic_config.digraph.copy()
- removed_nodes = set()
- for node in graph_copy:
- if not isinstance(node, Package) or \
- node.operation == "nomerge":
- removed_nodes.add(node)
- graph_copy.difference_update(removed_nodes)
- if not graph_copy.hasallzeros(ignore_priority = \
- DepPrioritySatisfiedRange.ignore_medium):
- print("\n!!! --buildpkgonly requires all dependencies to be merged.")
- print("!!! You have to merge the dependencies before you can build this package.\n")
- return 1
+
else:
- if "--buildpkgonly" in myopts:
- graph_copy = mydepgraph._dynamic_config.digraph.copy()
- removed_nodes = set()
- for node in graph_copy:
- if not isinstance(node, Package) or \
- node.operation == "nomerge":
- removed_nodes.add(node)
- graph_copy.difference_update(removed_nodes)
- if not graph_copy.hasallzeros(ignore_priority = \
- DepPrioritySatisfiedRange.ignore_medium):
- print("\n!!! --buildpkgonly requires all dependencies to be merged.")
- print("!!! Cannot merge requested packages. Merge deps and try again.\n")
- return 1
if not mergelist_shown:
# If we haven't already shown the merge list above, at
@@ -446,25 +440,29 @@ def action_build(settings, trees, mtimedb,
mydepgraph.saveNomergeFavorites()
- mergetask = Scheduler(settings, trees, mtimedb, myopts,
- spinner, favorites=favorites,
- graph_config=mydepgraph.schedulerGraph())
-
- del mydepgraph
- clear_caches(trees)
-
- retval = mergetask.merge()
-
- if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
- if "yes" == settings.get("AUTOCLEAN"):
- portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
- unmerge(trees[settings['EROOT']]['root_config'],
- myopts, "clean", [],
- ldpath_mtimes, autoclean=1)
- else:
- portage.writemsg_stdout(colorize("WARN", "WARNING:")
- + " AUTOCLEAN is disabled. This can cause serious"
- + " problems due to overlapping packages.\n")
+ if mergecount == 0:
+ retval = os.EX_OK
+ else:
+ mergetask = Scheduler(settings, trees, mtimedb, myopts,
+ spinner, favorites=favorites,
+ graph_config=mydepgraph.schedulerGraph())
+
+ del mydepgraph
+ clear_caches(trees)
+
+ retval = mergetask.merge()
+
+ if retval == os.EX_OK and \
+ not (buildpkgonly or fetchonly or pretend):
+ if "yes" == settings.get("AUTOCLEAN"):
+ portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
+ unmerge(trees[settings['EROOT']]['root_config'],
+ myopts, "clean", [],
+ ldpath_mtimes, autoclean=1)
+ else:
+ portage.writemsg_stdout(colorize("WARN", "WARNING:")
+ + " AUTOCLEAN is disabled. This can cause serious"
+ + " problems due to overlapping packages.\n")
return retval
@@ -544,7 +542,8 @@ def action_depclean(settings, trees, ldpath_mtimes,
# specific packages.
msg = []
- if not _ENABLE_DYN_LINK_MAP:
+ if "preserve-libs" not in settings.features and \
+ not myopts.get("--depclean-lib-check", _DEPCLEAN_LIB_CHECK_DEFAULT) != "n":
msg.append("Depclean may break link level dependencies. Thus, it is\n")
msg.append("recommended to use a tool such as " + good("`revdep-rebuild`") + " (from\n")
msg.append("app-portage/gentoolkit) in order to detect such breakage.\n")
@@ -610,11 +609,17 @@ def action_depclean(settings, trees, ldpath_mtimes,
if not cleanlist and "--quiet" in myopts:
return rval
+ set_atoms = {}
+ for k in ("system", "selected"):
+ try:
+ set_atoms[k] = root_config.setconfig.getSetAtoms(k)
+ except portage.exception.PackageSetNotFound:
+ # A nested set could not be resolved, so ignore nested sets.
+ set_atoms[k] = root_config.sets[k].getAtoms()
+
print("Packages installed: " + str(len(vardb.cpv_all())))
- print("Packages in world: " + \
- str(len(root_config.sets["selected"].getAtoms())))
- print("Packages in system: " + \
- str(len(root_config.sets["system"].getAtoms())))
+ print("Packages in world: %d" % len(set_atoms["selected"]))
+ print("Packages in system: %d" % len(set_atoms["system"]))
print("Required packages: "+str(req_pkg_count))
if "--pretend" in myopts:
print("Number to remove: "+str(len(cleanlist)))
@@ -647,13 +652,21 @@ def calc_depclean(settings, trees, ldpath_mtimes,
required_sets[protected_set_name] = protected_set
system_set = psets["system"]
- if not system_set or not selected_set:
+ set_atoms = {}
+ for k in ("system", "selected"):
+ try:
+ set_atoms[k] = root_config.setconfig.getSetAtoms(k)
+ except portage.exception.PackageSetNotFound:
+ # A nested set could not be resolved, so ignore nested sets.
+ set_atoms[k] = root_config.sets[k].getAtoms()
+
+ if not set_atoms["system"] or not set_atoms["selected"]:
- if not system_set:
+ if not set_atoms["system"]:
writemsg_level("!!! You have no system list.\n",
level=logging.ERROR, noiselevel=-1)
- if not selected_set:
+ if not set_atoms["selected"]:
writemsg_level("!!! You have no world file.\n",
level=logging.WARNING, noiselevel=-1)
@@ -697,7 +710,7 @@ def calc_depclean(settings, trees, ldpath_mtimes,
continue
except portage.exception.InvalidDependString as e:
show_invalid_depstring_notice(pkg,
- pkg.metadata["PROVIDE"], str(e))
+ pkg._metadata["PROVIDE"], _unicode(e))
del e
protected_set.add("=" + pkg.cpv)
continue
@@ -751,7 +764,7 @@ def calc_depclean(settings, trees, ldpath_mtimes,
continue
except portage.exception.InvalidDependString as e:
show_invalid_depstring_notice(pkg,
- pkg.metadata["PROVIDE"], str(e))
+ pkg._metadata["PROVIDE"], _unicode(e))
del e
protected_set.add("=" + pkg.cpv)
continue
@@ -769,7 +782,7 @@ def calc_depclean(settings, trees, ldpath_mtimes,
required_sets['__excluded__'].add("=" + pkg.cpv)
except portage.exception.InvalidDependString as e:
show_invalid_depstring_notice(pkg,
- pkg.metadata["PROVIDE"], str(e))
+ pkg._metadata["PROVIDE"], _unicode(e))
del e
required_sets['__excluded__'].add("=" + pkg.cpv)
@@ -805,7 +818,12 @@ def calc_depclean(settings, trees, ldpath_mtimes,
msg.append("the following required packages not being installed:")
msg.append("")
for atom, parent in unresolvable:
- msg.append(" %s pulled in by:" % (atom,))
+ if atom != atom.unevaluated_atom and \
+ vardb.match(_unicode(atom)):
+ msg.append(" %s (%s) pulled in by:" %
+ (atom.unevaluated_atom, atom))
+ else:
+ msg.append(" %s pulled in by:" % (atom,))
msg.append(" %s" % (parent,))
msg.append("")
msg.extend(textwrap.wrap(
@@ -848,15 +866,27 @@ def calc_depclean(settings, trees, ldpath_mtimes,
required_pkgs_total += 1
def show_parents(child_node):
- parent_nodes = graph.parent_nodes(child_node)
- if not parent_nodes:
+ parent_atoms = \
+ resolver._dynamic_config._parent_atoms.get(child_node, [])
+
+ # Never display the special internal protected_set.
+ parent_atoms = [parent_atom for parent_atom in parent_atoms
+ if not (isinstance(parent_atom[0], SetArg) and
+ parent_atom[0].name == protected_set_name)]
+
+ if not parent_atoms:
# With --prune, the highest version can be pulled in without any
# real parent since all installed packages are pulled in. In that
# case there's nothing to show here.
return
+ parent_atom_dict = {}
+ for parent, atom in parent_atoms:
+ parent_atom_dict.setdefault(parent, []).append(atom)
+
parent_strs = []
- for node in parent_nodes:
- parent_strs.append(str(getattr(node, "cpv", node)))
+ for parent, atoms in parent_atom_dict.items():
+ parent_strs.append("%s requires %s" %
+ (getattr(parent, "cpv", parent), ", ".join(atoms)))
parent_strs.sort()
msg = []
msg.append(" %s pulled in by:\n" % (child_node.cpv,))
@@ -881,12 +911,6 @@ def calc_depclean(settings, trees, ldpath_mtimes,
graph.debug_print()
writemsg("\n", noiselevel=-1)
- # Never display the special internal protected_set.
- for node in graph:
- if isinstance(node, SetArg) and node.name == protected_set_name:
- graph.remove(node)
- break
-
pkgs_to_remove = []
if action == "depclean":
@@ -939,10 +963,19 @@ def calc_depclean(settings, trees, ldpath_mtimes,
cleanlist = create_cleanlist()
clean_set = set(cleanlist)
- if cleanlist and \
- real_vardb._linkmap is not None and \
- myopts.get("--depclean-lib-check") != "n" and \
- "preserve-libs" not in settings.features:
+ depclean_lib_check = cleanlist and real_vardb._linkmap is not None and \
+ myopts.get("--depclean-lib-check", _DEPCLEAN_LIB_CHECK_DEFAULT) != "n"
+ preserve_libs = "preserve-libs" in settings.features
+ preserve_libs_restrict = False
+
+ if depclean_lib_check and preserve_libs:
+ for pkg in cleanlist:
+ if "preserve-libs" in pkg.restrict:
+ preserve_libs_restrict = True
+ break
+
+ if depclean_lib_check and \
+ (preserve_libs_restrict or not preserve_libs):
# Check if any of these packages are the sole providers of libraries
# with consumers that have not been selected for removal. If so, these
@@ -955,6 +988,13 @@ def calc_depclean(settings, trees, ldpath_mtimes,
writemsg_level(">>> Checking for lib consumers...\n")
for pkg in cleanlist:
+
+ if preserve_libs and "preserve-libs" not in pkg.restrict:
+ # Any needed libraries will be preserved
+ # when this package is unmerged, so there's
+ # no need to account for it here.
+ continue
+
pkg_dblink = real_vardb._dblink(pkg.cpv)
consumers = {}
@@ -1109,7 +1149,8 @@ def calc_depclean(settings, trees, ldpath_mtimes,
"installed", root_config, installed=True)
if not resolver._add_pkg(pkg,
Dependency(parent=consumer_pkg,
- priority=UnmergeDepPriority(runtime=True),
+ priority=UnmergeDepPriority(runtime=True,
+ runtime_slot_op=True),
root=pkg.root)):
resolver.display_problems()
return 1, [], False, 0
@@ -1146,30 +1187,30 @@ def calc_depclean(settings, trees, ldpath_mtimes,
graph = digraph()
del cleanlist[:]
- dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
runtime = UnmergeDepPriority(runtime=True)
runtime_post = UnmergeDepPriority(runtime_post=True)
buildtime = UnmergeDepPriority(buildtime=True)
priority_map = {
"RDEPEND": runtime,
"PDEPEND": runtime_post,
+ "HDEPEND": buildtime,
"DEPEND": buildtime,
}
for node in clean_set:
graph.add(node, None)
- for dep_type in dep_keys:
- depstr = node.metadata[dep_type]
+ for dep_type in Package._dep_keys:
+ depstr = node._metadata[dep_type]
if not depstr:
continue
priority = priority_map[dep_type]
if debug:
- writemsg_level(_unicode_decode("\nParent: %s\n") \
+ writemsg_level("\nParent: %s\n"
% (node,), noiselevel=-1, level=logging.DEBUG)
- writemsg_level(_unicode_decode( "Depstring: %s\n") \
+ writemsg_level( "Depstring: %s\n"
% (depstr,), noiselevel=-1, level=logging.DEBUG)
- writemsg_level(_unicode_decode( "Priority: %s\n") \
+ writemsg_level( "Priority: %s\n"
% (priority,), noiselevel=-1, level=logging.DEBUG)
try:
@@ -1183,7 +1224,7 @@ def calc_depclean(settings, trees, ldpath_mtimes,
if debug:
writemsg_level("Candidates: [%s]\n" % \
- ', '.join(_unicode_decode("'%s'") % (x,) for x in atoms),
+ ', '.join("'%s'" % (x,) for x in atoms),
noiselevel=-1, level=logging.DEBUG)
for atom in atoms:
@@ -1197,7 +1238,15 @@ def calc_depclean(settings, trees, ldpath_mtimes,
continue
for child_node in matches:
if child_node in clean_set:
- graph.add(child_node, node, priority=priority)
+
+ mypriority = priority.copy()
+ if atom.slot_operator_built:
+ if mypriority.buildtime:
+ mypriority.buildtime_slot_op = True
+ if mypriority.runtime:
+ mypriority.runtime_slot_op = True
+
+ graph.add(child_node, node, priority=mypriority)
if debug:
writemsg_level("\nunmerge digraph:\n\n",
@@ -1277,11 +1326,8 @@ def action_deselect(settings, trees, opts, atoms):
allow_repo=True, allow_wildcard=True))
for cpv in vardb.match(atom):
- slot, = vardb.aux_get(cpv, ["SLOT"])
- if not slot:
- slot = "0"
- expanded_atoms.add(Atom("%s:%s" % \
- (portage.cpv_getkey(cpv), slot)))
+ pkg = vardb._pkg_str(cpv, None)
+ expanded_atoms.add(Atom("%s:%s" % (pkg.cp, pkg.slot)))
discard_atoms = set()
for atom in world_set:
@@ -1352,10 +1398,90 @@ class _info_pkgs_ver(object):
def action_info(settings, trees, myopts, myfiles):
+ # See if we can find any packages installed matching the strings
+ # passed on the command line
+ mypkgs = []
+ eroot = settings['EROOT']
+ vardb = trees[eroot]["vartree"].dbapi
+ portdb = trees[eroot]['porttree'].dbapi
+ bindb = trees[eroot]["bintree"].dbapi
+ for x in myfiles:
+ any_match = False
+ cp_exists = bool(vardb.match(x.cp))
+ installed_match = vardb.match(x)
+ for installed in installed_match:
+ mypkgs.append((installed, "installed"))
+ any_match = True
+
+ if any_match:
+ continue
+
+ for db, pkg_type in ((portdb, "ebuild"), (bindb, "binary")):
+ if pkg_type == "binary" and "--usepkg" not in myopts:
+ continue
+
+ # Use match instead of cp_list, to account for old-style virtuals.
+ if not cp_exists and db.match(x.cp):
+ cp_exists = True
+ # Search for masked packages too.
+ if not cp_exists and hasattr(db, "xmatch") and \
+ db.xmatch("match-all", x.cp):
+ cp_exists = True
+
+ matches = db.match(x)
+ matches.reverse()
+ for match in matches:
+ if pkg_type == "binary":
+ if db.bintree.isremote(match):
+ continue
+ auxkeys = ["EAPI", "DEFINED_PHASES"]
+ metadata = dict(zip(auxkeys, db.aux_get(match, auxkeys)))
+ if metadata["EAPI"] not in ("0", "1", "2", "3") and \
+ "info" in metadata["DEFINED_PHASES"].split():
+ mypkgs.append((match, pkg_type))
+ break
+
+ if not cp_exists:
+ xinfo = '"%s"' % x.unevaluated_atom
+ # Discard null/ from failed cpv_expand category expansion.
+ xinfo = xinfo.replace("null/", "")
+ if settings["ROOT"] != "/":
+ xinfo = "%s for %s" % (xinfo, eroot)
+ writemsg("\nemerge: there are no ebuilds to satisfy %s.\n" %
+ colorize("INFORM", xinfo), noiselevel=-1)
+
+ if myopts.get("--misspell-suggestions", "y") != "n":
+
+ writemsg("\nemerge: searching for similar names..."
+ , noiselevel=-1)
+
+ dbs = [vardb]
+ #if "--usepkgonly" not in myopts:
+ dbs.append(portdb)
+ if "--usepkg" in myopts:
+ dbs.append(bindb)
+
+ matches = similar_name_search(dbs, x)
+
+ if len(matches) == 1:
+ writemsg("\nemerge: Maybe you meant " + matches[0] + "?\n"
+ , noiselevel=-1)
+ elif len(matches) > 1:
+ writemsg(
+ "\nemerge: Maybe you meant any of these: %s?\n" % \
+ (", ".join(matches),), noiselevel=-1)
+ else:
+ # Generally, this would only happen if
+ # all dbapis are empty.
+ writemsg(" nothing similar found.\n"
+ , noiselevel=-1)
+
+ return 1
+
output_buffer = []
append = output_buffer.append
root_config = trees[settings['EROOT']]['root_config']
- running_eroot = trees._running_eroot
+ chost = settings.get("CHOST")
append(getportageversion(settings["PORTDIR"], None,
settings.profile_path, settings["CHOST"],
@@ -1369,6 +1495,18 @@ def action_info(settings, trees, myopts, myfiles):
append(header_width * "=")
append("System uname: %s" % (platform.platform(aliased=1),))
+ vm_info = get_vm_info()
+ if "ram.total" in vm_info:
+ line = "%-9s %10d total" % ("KiB Mem:", vm_info["ram.total"] / 1024)
+ if "ram.free" in vm_info:
+ line += ",%10d free" % (vm_info["ram.free"] / 1024,)
+ append(line)
+ if "swap.total" in vm_info:
+ line = "%-9s %10d total" % ("KiB Swap:", vm_info["swap.total"] / 1024)
+ if "swap.free" in vm_info:
+ line += ",%10d free" % (vm_info["swap.free"] / 1024,)
+ append(line)
+
lastSync = portage.grabfile(os.path.join(
settings["PORTDIR"], "metadata", "timestamp.chk"))
if lastSync:
@@ -1377,6 +1515,23 @@ def action_info(settings, trees, myopts, myfiles):
lastSync = "Unknown"
append("Timestamp of tree: %s" % (lastSync,))
+ ld_names = []
+ if chost:
+ ld_names.append(chost + "-ld")
+ ld_names.append("ld")
+ for name in ld_names:
+ try:
+ proc = subprocess.Popen([name, "--version"],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ pass
+ else:
+ output = _unicode_decode(proc.communicate()[0]).splitlines()
+ proc.wait()
+ if proc.wait() == os.EX_OK and output:
+ append("ld %s" % (output[0]))
+ break
+
try:
proc = subprocess.Popen(["distcc", "--version"],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
@@ -1413,7 +1568,6 @@ def action_info(settings, trees, myopts, myfiles):
"sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
atoms = []
- vardb = trees[running_eroot]['vartree'].dbapi
for x in myvars:
try:
x = Atom(x)
@@ -1426,7 +1580,6 @@ def action_info(settings, trees, myopts, myfiles):
myvars = sorted(set(atoms))
- portdb = trees[running_eroot]['porttree'].dbapi
main_repo = portdb.getRepositoryName(portdb.porttree_root)
cp_map = {}
cp_max_len = 0
@@ -1493,7 +1646,7 @@ def action_info(settings, trees, myopts, myfiles):
'PORTDIR_OVERLAY', 'PORTAGE_BUNZIP2_COMMAND',
'PORTAGE_BZIP2_COMMAND',
'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
- 'ACCEPT_KEYWORDS', 'ACCEPT_LICENSE', 'SYNC', 'FEATURES',
+ 'ACCEPT_KEYWORDS', 'ACCEPT_LICENSE', 'FEATURES',
'EMERGE_DEFAULT_OPTS']
myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
@@ -1539,40 +1692,7 @@ def action_info(settings, trees, myopts, myfiles):
append("")
writemsg_stdout("\n".join(output_buffer),
noiselevel=-1)
-
- # See if we can find any packages installed matching the strings
- # passed on the command line
- mypkgs = []
- eroot = settings['EROOT']
- vardb = trees[eroot]["vartree"].dbapi
- portdb = trees[eroot]['porttree'].dbapi
- bindb = trees[eroot]["bintree"].dbapi
- for x in myfiles:
- match_found = False
- installed_match = vardb.match(x)
- for installed in installed_match:
- mypkgs.append((installed, "installed"))
- match_found = True
-
- if match_found:
- continue
-
- for db, pkg_type in ((portdb, "ebuild"), (bindb, "binary")):
- if pkg_type == "binary" and "--usepkg" not in myopts:
- continue
-
- matches = db.match(x)
- matches.reverse()
- for match in matches:
- if pkg_type == "binary":
- if db.bintree.isremote(match):
- continue
- auxkeys = ["EAPI", "DEFINED_PHASES"]
- metadata = dict(zip(auxkeys, db.aux_get(match, auxkeys)))
- if metadata["EAPI"] not in ("0", "1", "2", "3") and \
- "info" in metadata["DEFINED_PHASES"].split():
- mypkgs.append((match, pkg_type))
- break
+ del output_buffer[:]
# If some packages were found...
if mypkgs:
@@ -1586,11 +1706,15 @@ def action_info(settings, trees, myopts, myfiles):
# Loop through each package
# Only print settings if they differ from global settings
header_title = "Package Settings"
- print(header_width * "=")
- print(header_title.rjust(int(header_width/2 + len(header_title)/2)))
- print(header_width * "=")
- from portage.output import EOutput
- out = EOutput()
+ append(header_width * "=")
+ append(header_title.rjust(int(header_width/2 + len(header_title)/2)))
+ append(header_width * "=")
+ append("")
+ writemsg_stdout("\n".join(output_buffer),
+ noiselevel=-1)
+ del output_buffer[:]
+
+ out = portage.output.EOutput()
for mypkg in mypkgs:
cpv = mypkg[0]
pkg_type = mypkg[1]
@@ -1608,28 +1732,32 @@ def action_info(settings, trees, myopts, myfiles):
root_config=root_config, type_name=pkg_type)
if pkg_type == "installed":
- print("\n%s was built with the following:" % \
+ append("\n%s was built with the following:" % \
colorize("INFORM", str(pkg.cpv)))
elif pkg_type == "ebuild":
- print("\n%s would be build with the following:" % \
+ append("\n%s would be build with the following:" % \
colorize("INFORM", str(pkg.cpv)))
elif pkg_type == "binary":
- print("\n%s (non-installed binary) was built with the following:" % \
+ append("\n%s (non-installed binary) was built with the following:" % \
colorize("INFORM", str(pkg.cpv)))
- writemsg_stdout('%s\n' % pkg_use_display(pkg, myopts),
- noiselevel=-1)
+ append('%s' % pkg_use_display(pkg, myopts))
if pkg_type == "installed":
for myvar in mydesiredvars:
if metadata[myvar].split() != settings.get(myvar, '').split():
- print("%s=\"%s\"" % (myvar, metadata[myvar]))
- print()
+ append("%s=\"%s\"" % (myvar, metadata[myvar]))
+ append("")
+ append("")
+ writemsg_stdout("\n".join(output_buffer),
+ noiselevel=-1)
+ del output_buffer[:]
if metadata['DEFINED_PHASES']:
if 'info' not in metadata['DEFINED_PHASES'].split():
continue
- print(">>> Attempting to run pkg_info() for '%s'" % pkg.cpv)
+ writemsg_stdout(">>> Attempting to run pkg_info() for '%s'\n"
+ % pkg.cpv, noiselevel=-1)
if pkg_type == "installed":
ebuildpath = vardb.findname(pkg.cpv)
@@ -1856,6 +1984,7 @@ def action_metadata(settings, portdb, myopts, porttrees=None):
print()
signal.signal(signal.SIGWINCH, signal.SIG_DFL)
+ portdb.flush_cache()
sys.stdout.flush()
os.umask(old_umask)
@@ -1865,35 +1994,12 @@ def action_regen(settings, portdb, max_jobs, max_load):
#regenerate cache entries
sys.stdout.flush()
- regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
- received_signal = []
-
- def emergeexitsig(signum, frame):
- signal.signal(signal.SIGINT, signal.SIG_IGN)
- signal.signal(signal.SIGTERM, signal.SIG_IGN)
- portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % \
- {"signal":signum})
- regen.terminate()
- received_signal.append(128 + signum)
-
- earlier_sigint_handler = signal.signal(signal.SIGINT, emergeexitsig)
- earlier_sigterm_handler = signal.signal(signal.SIGTERM, emergeexitsig)
+ regen = MetadataRegen(portdb, max_jobs=max_jobs,
+ max_load=max_load, main=True)
- try:
- regen.run()
- finally:
- # Restore previous handlers
- if earlier_sigint_handler is not None:
- signal.signal(signal.SIGINT, earlier_sigint_handler)
- else:
- signal.signal(signal.SIGINT, signal.SIG_DFL)
- if earlier_sigterm_handler is not None:
- signal.signal(signal.SIGTERM, earlier_sigterm_handler)
- else:
- signal.signal(signal.SIGTERM, signal.SIG_DFL)
-
- if received_signal:
- sys.exit(received_signal[0])
+ signum = run_main_scheduler(regen)
+ if signum is not None:
+ sys.exit(128 + signum)
portage.writemsg_stdout("done!\n")
return regen.returncode
@@ -1914,37 +2020,110 @@ def action_search(root_config, myopts, myfiles, spinner):
sys.exit(1)
searchinstance.output()
-def action_sync(settings, trees, mtimedb, myopts, myaction):
+def action_sync(emerge_config, trees=DeprecationWarning,
+ mtimedb=DeprecationWarning, opts=DeprecationWarning,
+ action=DeprecationWarning):
+
+ if not isinstance(emerge_config, _emerge_config):
+ warnings.warn("_emerge.actions.action_sync() now expects "
+ "an _emerge_config instance as the first parameter",
+ DeprecationWarning, stacklevel=2)
+ emerge_config = load_emerge_config(
+ action=action, args=[], trees=trees, opts=opts)
+
+ xterm_titles = "notitles" not in \
+ emerge_config.target_config.settings.features
+ emergelog(xterm_titles, " === sync")
+
+ selected_repos = []
+ unknown_repo_names = []
+ missing_sync_type = []
+ if emerge_config.args:
+ for repo_name in emerge_config.args:
+ try:
+ repo = emerge_config.target_config.settings.repositories[repo_name]
+ except KeyError:
+ unknown_repo_names.append(repo_name)
+ else:
+ selected_repos.append(repo)
+ if repo.sync_type is None:
+ missing_sync_type.append(repo)
+
+ if unknown_repo_names:
+ writemsg_level("!!! %s\n" % _("Unknown repo(s): %s") %
+ " ".join(unknown_repo_names),
+ level=logging.ERROR, noiselevel=-1)
+
+ if missing_sync_type:
+ writemsg_level("!!! %s\n" %
+ _("Missing sync-type for repo(s): %s") %
+ " ".join(repo.name for repo in missing_sync_type),
+ level=logging.ERROR, noiselevel=-1)
+
+ if unknown_repo_names or missing_sync_type:
+ return 1
+
+ else:
+ selected_repos.extend(emerge_config.target_config.settings.repositories)
+
+ for repo in selected_repos:
+ if repo.sync_type is not None:
+ returncode = _sync_repo(emerge_config, repo)
+ if returncode != os.EX_OK:
+ return returncode
+
+ # Reload the whole config from scratch.
+ portage._sync_mode = False
+ load_emerge_config(emerge_config=emerge_config)
+ adjust_configs(emerge_config.opts, emerge_config.trees)
+
+ if emerge_config.opts.get('--package-moves') != 'n' and \
+ _global_updates(emerge_config.trees,
+ emerge_config.target_config.mtimedb["updates"],
+ quiet=("--quiet" in emerge_config.opts)):
+ emerge_config.target_config.mtimedb.commit()
+ # Reload the whole config from scratch.
+ load_emerge_config(emerge_config=emerge_config)
+ adjust_configs(emerge_config.opts, emerge_config.trees)
+
+ mybestpv = emerge_config.target_config.trees['porttree'].dbapi.xmatch(
+ "bestmatch-visible", portage.const.PORTAGE_PACKAGE_ATOM)
+ mypvs = portage.best(
+ emerge_config.target_config.trees['vartree'].dbapi.match(
+ portage.const.PORTAGE_PACKAGE_ATOM))
+
+ chk_updated_cfg_files(emerge_config.target_config.root,
+ portage.util.shlex_split(
+ emerge_config.target_config.settings.get("CONFIG_PROTECT", "")))
+
+ if mybestpv != mypvs and "--quiet" not in emerge_config.opts:
+ print()
+ print(warn(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended")
+ print(warn(" * ")+"that you update portage now, before any other packages are updated.")
+ print()
+ print(warn(" * ")+"To update portage, run 'emerge --oneshot portage' now.")
+ print()
+
+ display_news_notification(emerge_config.target_config, emerge_config.opts)
+ return os.EX_OK
+
+def _sync_repo(emerge_config, repo):
+ settings, trees, mtimedb = emerge_config
+ myopts = emerge_config.opts
enter_invalid = '--ask-enter-invalid' in myopts
xterm_titles = "notitles" not in settings.features
- emergelog(xterm_titles, " === sync")
- portdb = trees[settings['EROOT']]['porttree'].dbapi
- myportdir = portdb.porttree_root
- if not myportdir:
- myportdir = settings.get('PORTDIR', '')
- if myportdir and myportdir.strip():
- myportdir = os.path.realpath(myportdir)
- else:
- myportdir = None
+ msg = ">>> Synchronization of repository '%s' located in '%s'..." % (repo.name, repo.location)
+ emergelog(xterm_titles, msg)
+ writemsg_level(msg + "\n")
out = portage.output.EOutput()
- global_config_path = GLOBAL_CONFIG_PATH
- if settings['EPREFIX']:
- global_config_path = os.path.join(settings['EPREFIX'],
- GLOBAL_CONFIG_PATH.lstrip(os.sep))
- if not myportdir:
- sys.stderr.write("!!! PORTDIR is undefined. " + \
- "Is %s/make.globals missing?\n" % global_config_path)
- sys.exit(1)
- if myportdir[-1]=="/":
- myportdir=myportdir[:-1]
try:
- st = os.stat(myportdir)
+ st = os.stat(repo.location)
except OSError:
st = None
if st is None:
- print(">>>",myportdir,"not found, creating it.")
- portage.util.ensure_dirs(myportdir, mode=0o755)
- st = os.stat(myportdir)
+ print(">>> '%s' not found, creating it." % repo.location)
+ portage.util.ensure_dirs(repo.location, mode=0o755)
+ st = os.stat(repo.location)
usersync_uid = None
spawn_kwargs = {}
@@ -1977,59 +2156,51 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
if rval != os.EX_OK:
return rval
- syncuri = settings.get("SYNC", "").strip()
- if not syncuri:
- writemsg_level("!!! SYNC is undefined. " + \
- "Is %s/make.globals missing?\n" % global_config_path,
- noiselevel=-1, level=logging.ERROR)
- return 1
+ syncuri = repo.sync_uri
- vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
- vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
+ vcs_dirs = frozenset(VCS_DIRS)
+ vcs_dirs = vcs_dirs.intersection(os.listdir(repo.location))
os.umask(0o022)
dosyncuri = syncuri
updatecache_flg = False
- git = False
- if myaction == "metadata":
- print("skipping sync")
- updatecache_flg = True
- elif ".git" in vcs_dirs:
+ if repo.sync_type == "git":
# Update existing git repository, and ignore the syncuri. We are
# going to trust the user and assume that the user is in the branch
# that he/she wants updated. We'll let the user manage branches with
# git directly.
if portage.process.find_binary("git") is None:
msg = ["Command not found: git",
- "Type \"emerge dev-util/git\" to enable git support."]
+ "Type \"emerge %s\" to enable git support." % portage.const.GIT_PACKAGE_ATOM]
for l in msg:
writemsg_level("!!! %s\n" % l,
level=logging.ERROR, noiselevel=-1)
return 1
- msg = ">>> Starting git pull in %s..." % myportdir
+ msg = ">>> Starting git pull in %s..." % repo.location
emergelog(xterm_titles, msg )
writemsg_level(msg + "\n")
exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
- (portage._shell_quote(myportdir),), **spawn_kwargs)
+ (portage._shell_quote(repo.location),),
+ **portage._native_kwargs(spawn_kwargs))
if exitcode != os.EX_OK:
- msg = "!!! git pull error in %s." % myportdir
+ msg = "!!! git pull error in %s." % repo.location
emergelog(xterm_titles, msg)
writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
return exitcode
- msg = ">>> Git pull in %s successful" % myportdir
+ msg = ">>> Git pull in %s successful" % repo.location
emergelog(xterm_titles, msg)
writemsg_level(msg + "\n")
- git = True
- elif syncuri[:8]=="rsync://" or syncuri[:6]=="ssh://":
+ elif repo.sync_type == "rsync":
for vcs_dir in vcs_dirs:
writemsg_level(("!!! %s appears to be under revision " + \
"control (contains %s).\n!!! Aborting rsync sync.\n") % \
- (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
+ (repo.location, vcs_dir), level=logging.ERROR, noiselevel=-1)
return 1
- if not os.path.exists("/usr/bin/rsync"):
+ rsync_binary = portage.process.find_binary("rsync")
+ if rsync_binary is None:
print("!!! /usr/bin/rsync does not exist, so rsync support is disabled.")
- print("!!! Type \"emerge net-misc/rsync\" to enable rsync support.")
- sys.exit(1)
+ print("!!! Type \"emerge %s\" to enable rsync support." % portage.const.RSYNC_PACKAGE_ATOM)
+ return os.EX_UNAVAILABLE
mytimeout=180
rsync_opts = []
@@ -2041,6 +2212,7 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
"--safe-links", # Ignore links outside of tree
"--perms", # Preserve permissions
"--times", # Preserive mod times
+ "--omit-dir-times",
"--compress", # Compress the data transmitted
"--force", # Force deletion on non-empty dirs
"--whole-file", # Don't do block transfers, only entire files
@@ -2103,14 +2275,14 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
# Real local timestamp file.
servertimestampfile = os.path.join(
- myportdir, "metadata", "timestamp.chk")
+ repo.location, "metadata", "timestamp.chk")
content = portage.util.grabfile(servertimestampfile)
mytimestamp = 0
if content:
try:
mytimestamp = time.mktime(time.strptime(content[0],
- "%a, %d %b %Y %H:%M:%S +0000"))
+ TIMESTAMP_FORMAT))
except (OverflowError, ValueError):
pass
del content
@@ -2134,9 +2306,12 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
r"(rsync|ssh)://([^:/]+@)?(\[[:\da-fA-F]*\]|[^:/]*)(:[0-9]+)?",
syncuri, maxsplit=4)[1:5]
except ValueError:
- writemsg_level("!!! SYNC is invalid: %s\n" % syncuri,
+ writemsg_level("!!! sync-uri is invalid: %s\n" % syncuri,
noiselevel=-1, level=logging.ERROR)
return 1
+
+ ssh_opts = settings.get("PORTAGE_SSH_OPTS")
+
if port is None:
port=""
if user_name is None:
@@ -2252,7 +2427,10 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
if mytimestamp != 0 and "--quiet" not in myopts:
print(">>> Checking server timestamp ...")
- rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
+ rsynccommand = [rsync_binary] + rsync_opts + extra_rsync_opts
+
+ if proto == 'ssh' and ssh_opts:
+ rsynccommand.append("--rsh=ssh " + ssh_opts)
if "--debug" in myopts:
print(rsynccommand)
@@ -2298,7 +2476,8 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
rsync_initial_timeout)
mypids.extend(portage.process.spawn(
- mycommand, returnpid=True, **spawn_kwargs))
+ mycommand, returnpid=True,
+ **portage._native_kwargs(spawn_kwargs)))
exitcode = os.waitpid(mypids[0], 0)[1]
if usersync_uid is not None:
portage.util.apply_permissions(tmpservertimestampfile,
@@ -2328,12 +2507,11 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
exitcode = (exitcode & 0xff) << 8
else:
exitcode = exitcode >> 8
- if mypids:
- portage.process.spawned_pids.remove(mypids[0])
+
if content:
try:
servertimestamp = time.mktime(time.strptime(
- content[0], "%a, %d %b %Y %H:%M:%S +0000"))
+ content[0], TIMESTAMP_FORMAT))
except (OverflowError, ValueError):
pass
del mycommand, mypids, content
@@ -2349,7 +2527,7 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
print(">>> In order to force sync, remove '%s'." % servertimestampfile)
print(">>>")
print()
- sys.exit(0)
+ return os.EX_OK
elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
emergelog(xterm_titles,
">>> Server out of date: %s" % dosyncuri)
@@ -2363,8 +2541,33 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
exitcode = SERVER_OUT_OF_DATE
elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
# actual sync
- mycommand = rsynccommand + [dosyncuri+"/", myportdir]
- exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
+ mycommand = rsynccommand + [dosyncuri+"/", repo.location]
+ exitcode = None
+ try:
+ exitcode = portage.process.spawn(mycommand,
+ **portage._native_kwargs(spawn_kwargs))
+ finally:
+ if exitcode is None:
+ # interrupted
+ exitcode = 128 + signal.SIGINT
+
+ # 0 Success
+ # 1 Syntax or usage error
+ # 2 Protocol incompatibility
+ # 5 Error starting client-server protocol
+ # 35 Timeout waiting for daemon connection
+ if exitcode not in (0, 1, 2, 5, 35):
+ # If the exit code is not among those listed above,
+ # then we may have a partial/inconsistent sync
+ # state, so our previously read timestamp as well
+ # as the corresponding file can no longer be
+ # trusted.
+ mytimestamp = 0
+ try:
+ os.unlink(servertimestampfile)
+ except OSError:
+ pass
+
if exitcode in [0,1,3,4,11,14,20,21]:
break
elif exitcode in [1,3,4,11,14,20,21]:
@@ -2390,23 +2593,23 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
if (exitcode==0):
emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
elif exitcode == SERVER_OUT_OF_DATE:
- sys.exit(1)
+ return 1
elif exitcode == EXCEEDED_MAX_RETRIES:
sys.stderr.write(
">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
- sys.exit(1)
+ return 1
elif (exitcode>0):
msg = []
if exitcode==1:
msg.append("Rsync has reported that there is a syntax error. Please ensure")
- msg.append("that your SYNC statement is proper.")
- msg.append("SYNC=" + settings["SYNC"])
+ msg.append("that sync-uri attribute for repository '%s' is proper." % repo.name)
+ msg.append("sync-uri: '%s'" % repo.sync_uri)
elif exitcode==11:
msg.append("Rsync has reported that there is a File IO error. Normally")
msg.append("this means your disk is full, but can be caused by corruption")
- msg.append("on the filesystem that contains PORTDIR. Please investigate")
+ msg.append("on the filesystem that contains repository '%s'. Please investigate" % repo.name)
msg.append("and try again after the problem has been fixed.")
- msg.append("PORTDIR=" + settings["PORTDIR"])
+ msg.append("Location of repository: '%s'" % repo.location)
elif exitcode==20:
msg.append("Rsync was killed before it finished.")
else:
@@ -2417,115 +2620,76 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
msg.append("(and possibly your system's filesystem) configuration.")
for line in msg:
out.eerror(line)
- sys.exit(exitcode)
- elif syncuri[:6]=="cvs://":
+ return exitcode
+ elif repo.sync_type == "cvs":
if not os.path.exists("/usr/bin/cvs"):
print("!!! /usr/bin/cvs does not exist, so CVS support is disabled.")
- print("!!! Type \"emerge dev-vcs/cvs\" to enable CVS support.")
- sys.exit(1)
- cvsroot=syncuri[6:]
- cvsdir=os.path.dirname(myportdir)
- if not os.path.exists(myportdir+"/CVS"):
+ print("!!! Type \"emerge %s\" to enable CVS support." % portage.const.CVS_PACKAGE_ATOM)
+ return os.EX_UNAVAILABLE
+ cvs_root = syncuri
+ if cvs_root.startswith("cvs://"):
+ cvs_root = cvs_root[6:]
+ if not os.path.exists(os.path.join(repo.location, "CVS")):
#initial checkout
print(">>> Starting initial cvs checkout with "+syncuri+"...")
- if os.path.exists(cvsdir+"/gentoo-x86"):
- print("!!! existing",cvsdir+"/gentoo-x86 directory; exiting.")
- sys.exit(1)
try:
- os.rmdir(myportdir)
+ os.rmdir(repo.location)
except OSError as e:
if e.errno != errno.ENOENT:
sys.stderr.write(
- "!!! existing '%s' directory; exiting.\n" % myportdir)
- sys.exit(1)
+ "!!! existing '%s' directory; exiting.\n" % repo.location)
+ return 1
del e
if portage.process.spawn_bash(
- "cd %s; exec cvs -z0 -d %s co -P gentoo-x86" % \
- (portage._shell_quote(cvsdir), portage._shell_quote(cvsroot)),
- **spawn_kwargs) != os.EX_OK:
+ "cd %s; exec cvs -z0 -d %s co -P -d %s %s" %
+ (portage._shell_quote(os.path.dirname(repo.location)), portage._shell_quote(cvs_root),
+ portage._shell_quote(os.path.basename(repo.location)), portage._shell_quote(repo.sync_cvs_repo)),
+ **portage._native_kwargs(spawn_kwargs)) != os.EX_OK:
print("!!! cvs checkout error; exiting.")
- sys.exit(1)
- os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
+ return 1
else:
#cvs update
print(">>> Starting cvs update with "+syncuri+"...")
retval = portage.process.spawn_bash(
"cd %s; exec cvs -z0 -q update -dP" % \
- (portage._shell_quote(myportdir),), **spawn_kwargs)
+ (portage._shell_quote(repo.location),),
+ **portage._native_kwargs(spawn_kwargs))
if retval != os.EX_OK:
writemsg_level("!!! cvs update error; exiting.\n",
noiselevel=-1, level=logging.ERROR)
- sys.exit(retval)
+ return retval
dosyncuri = syncuri
- else:
- writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
- noiselevel=-1, level=logging.ERROR)
- return 1
# Reload the whole config from scratch.
- settings, trees, mtimedb = load_emerge_config(trees=trees)
- adjust_configs(myopts, trees)
- root_config = trees[settings['EROOT']]['root_config']
+ settings, trees, mtimedb = load_emerge_config(emerge_config=emerge_config)
+ adjust_configs(emerge_config.opts, emerge_config.trees)
portdb = trees[settings['EROOT']]['porttree'].dbapi
- if git:
+ if repo.sync_type == "git":
# NOTE: Do this after reloading the config, in case
# it did not exist prior to sync, so that the config
# and portdb properly account for its existence.
- exitcode = git_sync_timestamps(portdb, myportdir)
+ exitcode = git_sync_timestamps(portdb, repo.location)
if exitcode == os.EX_OK:
updatecache_flg = True
- if updatecache_flg and \
- myaction != "metadata" and \
- "metadata-transfer" not in settings.features:
+ if updatecache_flg and "metadata-transfer" not in settings.features:
updatecache_flg = False
if updatecache_flg and \
- os.path.exists(os.path.join(myportdir, 'metadata', 'cache')):
+ os.path.exists(os.path.join(repo.location, 'metadata', 'cache')):
- # Only update cache for myportdir since that's
+ # Only update cache for repo.location since that's
# the only one that's been synced here.
- action_metadata(settings, portdb, myopts, porttrees=[myportdir])
-
- if myopts.get('--package-moves') != 'n' and \
- _global_updates(trees, mtimedb["updates"], quiet=("--quiet" in myopts)):
- mtimedb.commit()
- # Reload the whole config from scratch.
- settings, trees, mtimedb = load_emerge_config(trees=trees)
- adjust_configs(myopts, trees)
- portdb = trees[settings['EROOT']]['porttree'].dbapi
- root_config = trees[settings['EROOT']]['root_config']
-
- mybestpv = portdb.xmatch("bestmatch-visible",
- portage.const.PORTAGE_PACKAGE_ATOM)
- mypvs = portage.best(
- trees[settings['EROOT']]['vartree'].dbapi.match(
- portage.const.PORTAGE_PACKAGE_ATOM))
-
- chk_updated_cfg_files(settings["EROOT"],
- portage.util.shlex_split(settings.get("CONFIG_PROTECT", "")))
-
- if myaction != "metadata":
- postsync = os.path.join(settings["PORTAGE_CONFIGROOT"],
- portage.USER_CONFIG_PATH, "bin", "post_sync")
- if os.access(postsync, os.X_OK):
- retval = portage.process.spawn(
- [postsync, dosyncuri], env=settings.environ())
- if retval != os.EX_OK:
- writemsg_level(
- " %s spawn failed of %s\n" % (bad("*"), postsync,),
- level=logging.ERROR, noiselevel=-1)
+ action_metadata(settings, portdb, myopts, porttrees=[repo.location])
- if(mybestpv != mypvs) and not "--quiet" in myopts:
- print()
- print(warn(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended")
- print(warn(" * ")+"that you update portage now, before any other packages are updated.")
- print()
- print(warn(" * ")+"To update portage, run 'emerge portage' now.")
- print()
+ postsync = os.path.join(settings["PORTAGE_CONFIGROOT"], portage.USER_CONFIG_PATH, "bin", "post_sync")
+ if os.access(postsync, os.X_OK):
+ retval = portage.process.spawn([postsync, dosyncuri], env=settings.environ())
+ if retval != os.EX_OK:
+ writemsg_level(" %s spawn failed of %s\n" % (bad("*"), postsync,),
+ level=logging.ERROR, noiselevel=-1)
- display_news_notification(root_config, myopts)
return os.EX_OK
def action_uninstall(settings, trees, ldpath_mtimes,
@@ -2647,13 +2811,8 @@ def action_uninstall(settings, trees, ldpath_mtimes,
if owners:
for cpv in owners:
- slot = vardb.aux_get(cpv, ['SLOT'])[0]
- if not slot:
- # portage now masks packages with missing slot, but it's
- # possible that one was installed by an older version
- atom = portage.cpv_getkey(cpv)
- else:
- atom = '%s:%s' % (portage.cpv_getkey(cpv), slot)
+ pkg = vardb._pkg_str(cpv, None)
+ atom = '%s:%s' % (pkg.cp, pkg.slot)
valid_atoms.append(portage.dep.Atom(atom))
else:
writemsg_level(("!!! '%s' is not claimed " + \
@@ -2677,20 +2836,20 @@ def action_uninstall(settings, trees, ldpath_mtimes,
if action == 'deselect':
return action_deselect(settings, trees, opts, valid_atoms)
- # Create a Scheduler for calls to unmerge(), in order to cause
- # redirection of ebuild phase output to logs as required for
- # options such as --quiet.
- sched = Scheduler(settings, trees, None, opts,
- spinner, uninstall_only=True)
- sched._background = sched._background_mode()
- sched._status_display.quiet = True
-
- if sched._background:
- sched.settings.unlock()
- sched.settings["PORTAGE_BACKGROUND"] = "1"
- sched.settings.backup_changes("PORTAGE_BACKGROUND")
- sched.settings.lock()
- sched.pkgsettings[eroot] = portage.config(clone=sched.settings)
+ # Use the same logic as the Scheduler class to trigger redirection
+ # of ebuild pkg_prerm/postrm phase output to logs as appropriate
+ # for options such as --jobs, --quiet and --quiet-build.
+ max_jobs = opts.get("--jobs", 1)
+ background = (max_jobs is True or max_jobs > 1 or
+ "--quiet" in opts or opts.get("--quiet-build") == "y")
+ sched_iface = SchedulerInterface(global_event_loop(),
+ is_background=lambda: background)
+
+ if background:
+ settings.unlock()
+ settings["PORTAGE_BACKGROUND"] = "1"
+ settings.backup_changes("PORTAGE_BACKGROUND")
+ settings.lock()
if action in ('clean', 'unmerge') or \
(action == 'prune' and "--nodeps" in opts):
@@ -2698,10 +2857,11 @@ def action_uninstall(settings, trees, ldpath_mtimes,
ordered = action == 'unmerge'
rval = unmerge(trees[settings['EROOT']]['root_config'], opts, action,
valid_atoms, ldpath_mtimes, ordered=ordered,
- scheduler=sched._sched_iface)
+ scheduler=sched_iface)
else:
rval = action_depclean(settings, trees, ldpath_mtimes,
- opts, action, valid_atoms, spinner, scheduler=sched._sched_iface)
+ opts, action, valid_atoms, spinner,
+ scheduler=sched_iface)
return rval
@@ -2807,6 +2967,10 @@ def adjust_config(myopts, settings):
settings["NOCOLOR"] = "true"
settings.backup_changes("NOCOLOR")
+ if "--pkg-format" in myopts:
+ settings["PORTAGE_BINPKG_FORMAT"] = myopts["--pkg-format"]
+ settings.backup_changes("PORTAGE_BINPKG_FORMAT")
+
def display_missing_pkg_set(root_config, set_name):
msg = []
@@ -3030,61 +3194,53 @@ def git_sync_timestamps(portdb, portdir):
return os.EX_OK
-def load_emerge_config(trees=None):
+class _emerge_config(SlotObject):
+
+ __slots__ = ('action', 'args', 'opts',
+ 'running_config', 'target_config', 'trees')
+
+ # Support unpack as tuple, for load_emerge_config backward compatibility.
+ def __iter__(self):
+ yield self.target_config.settings
+ yield self.trees
+ yield self.target_config.mtimedb
+
+ def __getitem__(self, index):
+ return list(self)[index]
+
+ def __len__(self):
+ return 3
+
+def load_emerge_config(emerge_config=None, **kargs):
+
+ if emerge_config is None:
+ emerge_config = _emerge_config(**kargs)
+
kwargs = {}
- for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
+ for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT"),
+ ("eprefix", "EPREFIX")):
v = os.environ.get(envvar, None)
if v and v.strip():
kwargs[k] = v
- trees = portage.create_trees(trees=trees, **kwargs)
+ emerge_config.trees = portage.create_trees(trees=emerge_config.trees,
+ **portage._native_kwargs(kwargs))
- for root_trees in trees.values():
+ for root_trees in emerge_config.trees.values():
settings = root_trees["vartree"].settings
settings._init_dirs()
setconfig = load_default_config(settings, root_trees)
root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
- settings = trees[trees._target_eroot]['vartree'].settings
- mtimedbfile = os.path.join(settings['EROOT'], portage.CACHE_PATH, "mtimedb")
- mtimedb = portage.MtimeDB(mtimedbfile)
- QueryCommand._db = trees
- return settings, trees, mtimedb
-
-def chk_updated_cfg_files(eroot, config_protect):
- target_root = eroot
- result = list(
- portage.util.find_updated_config_files(target_root, config_protect))
-
- for x in result:
- writemsg_level("\n %s " % (colorize("WARN", "* " + _("IMPORTANT:"))),
- level=logging.INFO, noiselevel=-1)
- if not x[1]: # it's a protected file
- writemsg_level( _("config file '%s' needs updating.\n") % x[0],
- level=logging.INFO, noiselevel=-1)
- else: # it's a protected dir
- if len(x[1]) == 1:
- head, tail = os.path.split(x[1][0])
- tail = tail[len("._cfg0000_"):]
- fpath = os.path.join(head, tail)
- writemsg_level(_("config file '%s' needs updating.\n") % fpath,
- level=logging.INFO, noiselevel=-1)
- else:
- writemsg_level( _("%d config files in '%s' need updating.\n") % \
- (len(x[1]), x[0]), level=logging.INFO, noiselevel=-1)
-
- if result:
- print(" "+yellow("*")+ " See the "+colorize("INFORM", _("CONFIGURATION FILES"))\
- + " " + _("section of the") + " " + bold("emerge"))
- print(" "+yellow("*")+ " " + _("man page to learn how to update config files."))
-
+ target_eroot = emerge_config.trees._target_eroot
+ emerge_config.target_config = \
+ emerge_config.trees[target_eroot]['root_config']
+ emerge_config.target_config.mtimedb = portage.MtimeDB(
+ os.path.join(target_eroot, portage.CACHE_PATH, "mtimedb"))
+ emerge_config.running_config = emerge_config.trees[
+ emerge_config.trees._running_eroot]['root_config']
+ QueryCommand._db = emerge_config.trees
-def display_news_notification(root_config, myopts):
- if "news" not in root_config.settings.features:
- return
- portdb = root_config.trees["porttree"].dbapi
- vardb = root_config.trees["vartree"].dbapi
- news_counts = count_unread_news(portdb, vardb)
- display_news_notifications(news_counts)
+ return emerge_config
def getgccversion(chost):
"""
@@ -3140,3 +3296,771 @@ def getgccversion(chost):
portage.writemsg(gcc_not_found_error, noiselevel=-1)
return "[unavailable]"
+
+# Warn about features that may confuse users and
+# lead them to report invalid bugs.
+_emerge_features_warn = frozenset(['keeptemp', 'keepwork'])
+
+def validate_ebuild_environment(trees):
+ features_warn = set()
+ for myroot in trees:
+ settings = trees[myroot]["vartree"].settings
+ settings.validate()
+ features_warn.update(
+ _emerge_features_warn.intersection(settings.features))
+
+ if features_warn:
+ msg = "WARNING: The FEATURES variable contains one " + \
+ "or more values that should be disabled under " + \
+ "normal circumstances: %s" % " ".join(features_warn)
+ out = portage.output.EOutput()
+ for line in textwrap.wrap(msg, 65):
+ out.ewarn(line)
+
+def check_procfs():
+ procfs_path = '/proc'
+ if platform.system() not in ("Linux",) or \
+ os.path.ismount(procfs_path):
+ return os.EX_OK
+ msg = "It seems that %s is not mounted. You have been warned." % procfs_path
+ writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+def config_protect_check(trees):
+ for root, root_trees in trees.items():
+ settings = root_trees["root_config"].settings
+ if not settings.get("CONFIG_PROTECT"):
+ msg = "!!! CONFIG_PROTECT is empty"
+ if settings["ROOT"] != "/":
+ msg += " for '%s'" % root
+ msg += "\n"
+ writemsg_level(msg, level=logging.WARN, noiselevel=-1)
+
+def apply_priorities(settings):
+ ionice(settings)
+ nice(settings)
+
+def nice(settings):
+ try:
+ os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
+ except (OSError, ValueError) as e:
+ out = portage.output.EOutput()
+ out.eerror("Failed to change nice value to '%s'" % \
+ settings["PORTAGE_NICENESS"])
+ out.eerror("%s\n" % str(e))
+
+def ionice(settings):
+
+ ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
+ if ionice_cmd:
+ ionice_cmd = portage.util.shlex_split(ionice_cmd)
+ if not ionice_cmd:
+ return
+
+ variables = {"PID" : str(os.getpid())}
+ cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
+
+ try:
+ rval = portage.process.spawn(cmd, env=os.environ)
+ except portage.exception.CommandNotFound:
+ # The OS kernel probably doesn't support ionice,
+ # so return silently.
+ return
+
+ if rval != os.EX_OK:
+ out = portage.output.EOutput()
+ out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
+ out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
+
+def setconfig_fallback(root_config):
+ setconfig = root_config.setconfig
+ setconfig._create_default_config()
+ setconfig._parse(update=True)
+ root_config.sets = setconfig.getSets()
+
+def get_missing_sets(root_config):
+ # emerge requires existence of "world", "selected", and "system"
+ missing_sets = []
+
+ for s in ("selected", "system", "world",):
+ if s not in root_config.sets:
+ missing_sets.append(s)
+
+ return missing_sets
+
+def missing_sets_warning(root_config, missing_sets):
+ if len(missing_sets) > 2:
+ missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
+ missing_sets_str += ', and "%s"' % missing_sets[-1]
+ elif len(missing_sets) == 2:
+ missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
+ else:
+ missing_sets_str = '"%s"' % missing_sets[-1]
+ msg = ["emerge: incomplete set configuration, " + \
+ "missing set(s): %s" % missing_sets_str]
+ if root_config.sets:
+ msg.append(" sets defined: %s" % ", ".join(root_config.sets))
+ global_config_path = portage.const.GLOBAL_CONFIG_PATH
+ if portage.const.EPREFIX:
+ global_config_path = os.path.join(portage.const.EPREFIX,
+ portage.const.GLOBAL_CONFIG_PATH.lstrip(os.sep))
+ msg.append(" This usually means that '%s'" % \
+ (os.path.join(global_config_path, "sets/portage.conf"),))
+ msg.append(" is missing or corrupt.")
+ msg.append(" Falling back to default world and system set configuration!!!")
+ for line in msg:
+ writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
+
+def ensure_required_sets(trees):
+ warning_shown = False
+ for root_trees in trees.values():
+ missing_sets = get_missing_sets(root_trees["root_config"])
+ if missing_sets and not warning_shown:
+ warning_shown = True
+ missing_sets_warning(root_trees["root_config"], missing_sets)
+ if missing_sets:
+ setconfig_fallback(root_trees["root_config"])
+
+def expand_set_arguments(myfiles, myaction, root_config):
+ retval = os.EX_OK
+ setconfig = root_config.setconfig
+
+ sets = setconfig.getSets()
+
+ # In order to know exactly which atoms/sets should be added to the
+ # world file, the depgraph performs set expansion later. It will get
+ # confused about where the atoms came from if it's not allowed to
+ # expand them itself.
+ do_not_expand = myaction is None
+ newargs = []
+ for a in myfiles:
+ if a in ("system", "world"):
+ newargs.append(SETPREFIX+a)
+ else:
+ newargs.append(a)
+ myfiles = newargs
+ del newargs
+ newargs = []
+
+ # separators for set arguments
+ ARG_START = "{"
+ ARG_END = "}"
+
+ for i in range(0, len(myfiles)):
+ if myfiles[i].startswith(SETPREFIX):
+ start = 0
+ end = 0
+ x = myfiles[i][len(SETPREFIX):]
+ newset = ""
+ while x:
+ start = x.find(ARG_START)
+ end = x.find(ARG_END)
+ if start > 0 and start < end:
+ namepart = x[:start]
+ argpart = x[start+1:end]
+
+ # TODO: implement proper quoting
+ args = argpart.split(",")
+ options = {}
+ for a in args:
+ if "=" in a:
+ k, v = a.split("=", 1)
+ options[k] = v
+ else:
+ options[a] = "True"
+ setconfig.update(namepart, options)
+ newset += (x[:start-len(namepart)]+namepart)
+ x = x[end+len(ARG_END):]
+ else:
+ newset += x
+ x = ""
+ myfiles[i] = SETPREFIX+newset
+
+ sets = setconfig.getSets()
+
+ # display errors that occurred while loading the SetConfig instance
+ for e in setconfig.errors:
+ print(colorize("BAD", "Error during set creation: %s" % e))
+
+ unmerge_actions = ("unmerge", "prune", "clean", "depclean")
+
+ for a in myfiles:
+ if a.startswith(SETPREFIX):
+ s = a[len(SETPREFIX):]
+ if s not in sets:
+ display_missing_pkg_set(root_config, s)
+ return (None, 1)
+ if s == "installed":
+ msg = ("The @installed set is deprecated and will soon be "
+ "removed. Please refer to bug #387059 for details.")
+ out = portage.output.EOutput()
+ for line in textwrap.wrap(msg, 50):
+ out.ewarn(line)
+ setconfig.active.append(s)
+
+ if do_not_expand:
+ # Loading sets can be slow, so skip it here, in order
+ # to allow the depgraph to indicate progress with the
+ # spinner while sets are loading (bug #461412).
+ newargs.append(a)
+ continue
+
+ try:
+ set_atoms = setconfig.getSetAtoms(s)
+ except portage.exception.PackageSetNotFound as e:
+ writemsg_level(("emerge: the given set '%s' " + \
+ "contains a non-existent set named '%s'.\n") % \
+ (s, e), level=logging.ERROR, noiselevel=-1)
+ if s in ('world', 'selected') and \
+ SETPREFIX + e.value in sets['selected']:
+ writemsg_level(("Use `emerge --deselect %s%s` to "
+ "remove this set from world_sets.\n") %
+ (SETPREFIX, e,), level=logging.ERROR,
+ noiselevel=-1)
+ return (None, 1)
+ if myaction in unmerge_actions and \
+ not sets[s].supportsOperation("unmerge"):
+ writemsg_level("emerge: the given set '%s' does " % s + \
+ "not support unmerge operations\n",
+ level=logging.ERROR, noiselevel=-1)
+ retval = 1
+ elif not set_atoms:
+ writemsg_level("emerge: '%s' is an empty set\n" % s,
+ level=logging.INFO, noiselevel=-1)
+ else:
+ newargs.extend(set_atoms)
+ for error_msg in sets[s].errors:
+ writemsg_level("%s\n" % (error_msg,),
+ level=logging.ERROR, noiselevel=-1)
+ else:
+ newargs.append(a)
+ return (newargs, retval)
+
+def repo_name_check(trees):
+ missing_repo_names = set()
+ for root_trees in trees.values():
+ porttree = root_trees.get("porttree")
+ if porttree:
+ portdb = porttree.dbapi
+ missing_repo_names.update(portdb.getMissingRepoNames())
+
+ # Skip warnings about missing repo_name entries for
+ # /usr/local/portage (see bug #248603).
+ try:
+ missing_repo_names.remove('/usr/local/portage')
+ except KeyError:
+ pass
+
+ if missing_repo_names:
+ msg = []
+ msg.append("WARNING: One or more repositories " + \
+ "have missing repo_name entries:")
+ msg.append("")
+ for p in missing_repo_names:
+ msg.append("\t%s/profiles/repo_name" % (p,))
+ msg.append("")
+ msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
+ "should be a plain text file containing a unique " + \
+ "name for the repository on the first line.", 70))
+ msg.append("\n")
+ writemsg_level("".join("%s\n" % l for l in msg),
+ level=logging.WARNING, noiselevel=-1)
+
+ return bool(missing_repo_names)
+
+def repo_name_duplicate_check(trees):
+ ignored_repos = {}
+ for root, root_trees in trees.items():
+ if 'porttree' in root_trees:
+ portdb = root_trees['porttree'].dbapi
+ if portdb.settings.get('PORTAGE_REPO_DUPLICATE_WARN') != '0':
+ for repo_name, paths in portdb.getIgnoredRepos():
+ k = (root, repo_name, portdb.getRepositoryPath(repo_name))
+ ignored_repos.setdefault(k, []).extend(paths)
+
+ if ignored_repos:
+ msg = []
+ msg.append('WARNING: One or more repositories ' + \
+ 'have been ignored due to duplicate')
+ msg.append(' profiles/repo_name entries:')
+ msg.append('')
+ for k in sorted(ignored_repos):
+ msg.append(' %s overrides' % ", ".join(k))
+ for path in ignored_repos[k]:
+ msg.append(' %s' % (path,))
+ msg.append('')
+ msg.extend(' ' + x for x in textwrap.wrap(
+ "All profiles/repo_name entries must be unique in order " + \
+ "to avoid having duplicates ignored. " + \
+ "Set PORTAGE_REPO_DUPLICATE_WARN=\"0\" in " + \
+ "/etc/portage/make.conf if you would like to disable this warning."))
+ msg.append("\n")
+ writemsg_level(''.join('%s\n' % l for l in msg),
+ level=logging.WARNING, noiselevel=-1)
+
+ return bool(ignored_repos)
+
+def run_action(emerge_config):
+
+ # skip global updates prior to sync, since it's called after sync
+ if emerge_config.action not in ('help', 'info', 'sync', 'version') and \
+ emerge_config.opts.get('--package-moves') != 'n' and \
+ _global_updates(emerge_config.trees,
+ emerge_config.target_config.mtimedb["updates"],
+ quiet=("--quiet" in emerge_config.opts)):
+ emerge_config.target_config.mtimedb.commit()
+ # Reload the whole config from scratch.
+ load_emerge_config(emerge_config=emerge_config)
+
+ xterm_titles = "notitles" not in \
+ emerge_config.target_config.settings.features
+ if xterm_titles:
+ xtermTitle("emerge")
+
+ if "--digest" in emerge_config.opts:
+ os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
+ # Reload the whole config from scratch so that the portdbapi internal
+ # config is updated with new FEATURES.
+ load_emerge_config(emerge_config=emerge_config)
+
+ # NOTE: adjust_configs() can map options to FEATURES, so any relevant
+ # options adjustments should be made prior to calling adjust_configs().
+ if "--buildpkgonly" in emerge_config.opts:
+ emerge_config.opts["--buildpkg"] = True
+
+ if "getbinpkg" in emerge_config.target_config.settings.features:
+ emerge_config.opts["--getbinpkg"] = True
+
+ if "--getbinpkgonly" in emerge_config.opts:
+ emerge_config.opts["--getbinpkg"] = True
+
+ if "--getbinpkgonly" in emerge_config.opts:
+ emerge_config.opts["--usepkgonly"] = True
+
+ if "--getbinpkg" in emerge_config.opts:
+ emerge_config.opts["--usepkg"] = True
+
+ if "--usepkgonly" in emerge_config.opts:
+ emerge_config.opts["--usepkg"] = True
+
+ if "--buildpkgonly" in emerge_config.opts:
+ # --buildpkgonly will not merge anything, so
+ # it cancels all binary package options.
+ for opt in ("--getbinpkg", "--getbinpkgonly",
+ "--usepkg", "--usepkgonly"):
+ emerge_config.opts.pop(opt, None)
+
+ adjust_configs(emerge_config.opts, emerge_config.trees)
+ apply_priorities(emerge_config.target_config.settings)
+
+ for fmt in emerge_config.target_config.settings["PORTAGE_BINPKG_FORMAT"].split():
+ if not fmt in portage.const.SUPPORTED_BINPKG_FORMATS:
+ if "--pkg-format" in emerge_config.opts:
+ problematic="--pkg-format"
+ else:
+ problematic="PORTAGE_BINPKG_FORMAT"
+
+ writemsg_level(("emerge: %s is not set correctly. Format " + \
+ "'%s' is not supported.\n") % (problematic, fmt),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ if emerge_config.action == 'version':
+ writemsg_stdout(getportageversion(
+ emerge_config.target_config.settings["PORTDIR"],
+ None,
+ emerge_config.target_config.settings.profile_path,
+ emerge_config.target_config.settings["CHOST"],
+ emerge_config.target_config.trees['vartree'].dbapi) + '\n',
+ noiselevel=-1)
+ return 0
+ elif emerge_config.action == 'help':
+ emerge_help()
+ return 0
+
+ spinner = stdout_spinner()
+ if "candy" in emerge_config.target_config.settings.features:
+ spinner.update = spinner.update_scroll
+
+ if "--quiet" not in emerge_config.opts:
+ portage.deprecated_profile_check(
+ settings=emerge_config.target_config.settings)
+ repo_name_check(emerge_config.trees)
+ repo_name_duplicate_check(emerge_config.trees)
+ config_protect_check(emerge_config.trees)
+ check_procfs()
+
+ for mytrees in emerge_config.trees.values():
+ mydb = mytrees["porttree"].dbapi
+ # Freeze the portdbapi for performance (memoize all xmatch results).
+ mydb.freeze()
+
+ if emerge_config.action in ('search', None) and \
+ "--usepkg" in emerge_config.opts:
+ # Populate the bintree with current --getbinpkg setting.
+ # This needs to happen before expand_set_arguments(), in case
+ # any sets use the bintree.
+ try:
+ mytrees["bintree"].populate(
+ getbinpkgs="--getbinpkg" in emerge_config.opts)
+ except ParseError as e:
+ writemsg("\n\n!!!%s.\nSee make.conf(5) for more info.\n"
+ % e, noiselevel=-1)
+ return 1
+
+ del mytrees, mydb
+
+ for x in emerge_config.args:
+ if x.endswith((".ebuild", ".tbz2")) and \
+ os.path.exists(os.path.abspath(x)):
+ print(colorize("BAD", "\n*** emerging by path is broken "
+ "and may not always work!!!\n"))
+ break
+
+ if emerge_config.action == "list-sets":
+ writemsg_stdout("".join("%s\n" % s for s in
+ sorted(emerge_config.target_config.sets)))
+ return os.EX_OK
+ elif emerge_config.action == "check-news":
+ news_counts = count_unread_news(
+ emerge_config.target_config.trees["porttree"].dbapi,
+ emerge_config.target_config.trees["vartree"].dbapi)
+ if any(news_counts.values()):
+ display_news_notifications(news_counts)
+ elif "--quiet" not in emerge_config.opts:
+ print("", colorize("GOOD", "*"), "No news items were found.")
+ return os.EX_OK
+
+ ensure_required_sets(emerge_config.trees)
+
+ if emerge_config.action is None and \
+ "--resume" in emerge_config.opts and emerge_config.args:
+ writemsg("emerge: unexpected argument(s) for --resume: %s\n" %
+ " ".join(emerge_config.args), noiselevel=-1)
+ return 1
+
+ # only expand sets for actions taking package arguments
+ oldargs = emerge_config.args[:]
+ if emerge_config.action in ("clean", "config", "depclean",
+ "info", "prune", "unmerge", None):
+ newargs, retval = expand_set_arguments(
+ emerge_config.args, emerge_config.action,
+ emerge_config.target_config)
+ if retval != os.EX_OK:
+ return retval
+
+ # Need to handle empty sets specially, otherwise emerge will react
+ # with the help message for empty argument lists
+ if oldargs and not newargs:
+ print("emerge: no targets left after set expansion")
+ return 0
+
+ emerge_config.args = newargs
+
+ if "--tree" in emerge_config.opts and \
+ "--columns" in emerge_config.opts:
+ print("emerge: can't specify both of \"--tree\" and \"--columns\".")
+ return 1
+
+ if '--emptytree' in emerge_config.opts and \
+ '--noreplace' in emerge_config.opts:
+ writemsg_level("emerge: can't specify both of " + \
+ "\"--emptytree\" and \"--noreplace\".\n",
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ if ("--quiet" in emerge_config.opts):
+ spinner.update = spinner.update_quiet
+ portage.util.noiselimit = -1
+
+ if "--fetch-all-uri" in emerge_config.opts:
+ emerge_config.opts["--fetchonly"] = True
+
+ if "--skipfirst" in emerge_config.opts and \
+ "--resume" not in emerge_config.opts:
+ emerge_config.opts["--resume"] = True
+
+ # Allow -p to remove --ask
+ if "--pretend" in emerge_config.opts:
+ emerge_config.opts.pop("--ask", None)
+
+ # forbid --ask when not in a terminal
+ # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
+ if ("--ask" in emerge_config.opts) and (not sys.stdin.isatty()):
+ portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
+ noiselevel=-1)
+ return 1
+
+ if emerge_config.target_config.settings.get("PORTAGE_DEBUG", "") == "1":
+ spinner.update = spinner.update_quiet
+ portage.util.noiselimit = 0
+ if "python-trace" in emerge_config.target_config.settings.features:
+ portage.debug.set_trace(True)
+
+ if not ("--quiet" in emerge_config.opts):
+ if '--nospinner' in emerge_config.opts or \
+ emerge_config.target_config.settings.get('TERM') == 'dumb' or \
+ not sys.stdout.isatty():
+ spinner.update = spinner.update_basic
+
+ if "--debug" in emerge_config.opts:
+ print("myaction", emerge_config.action)
+ print("myopts", emerge_config.opts)
+
+ if not emerge_config.action and not emerge_config.args and \
+ "--resume" not in emerge_config.opts:
+ emerge_help()
+ return 1
+
+ pretend = "--pretend" in emerge_config.opts
+ fetchonly = "--fetchonly" in emerge_config.opts or \
+ "--fetch-all-uri" in emerge_config.opts
+ buildpkgonly = "--buildpkgonly" in emerge_config.opts
+
+ # check if root user is the current user for the actions where emerge needs this
+ if portage.data.secpass < 2:
+ # We've already allowed "--version" and "--help" above.
+ if "--pretend" not in emerge_config.opts and \
+ emerge_config.action not in ("search", "info"):
+ need_superuser = emerge_config.action in ('clean', 'depclean',
+ 'deselect', 'prune', 'unmerge') or not \
+ (fetchonly or \
+ (buildpkgonly and portage.data.secpass >= 1) or \
+ emerge_config.action in ("metadata", "regen", "sync"))
+ if portage.data.secpass < 1 or \
+ need_superuser:
+ if need_superuser:
+ access_desc = "superuser"
+ else:
+ access_desc = "portage group"
+ # Always show portage_group_warning() when only portage group
+ # access is required but the user is not in the portage group.
+ if "--ask" in emerge_config.opts:
+ writemsg_stdout("This action requires %s access...\n" % \
+ (access_desc,), noiselevel=-1)
+ if portage.data.secpass < 1 and not need_superuser:
+ portage.data.portage_group_warning()
+ if userquery("Would you like to add --pretend to options?",
+ "--ask-enter-invalid" in emerge_config.opts) == "No":
+ return 128 + signal.SIGINT
+ emerge_config.opts["--pretend"] = True
+ emerge_config.opts.pop("--ask")
+ else:
+ sys.stderr.write(("emerge: %s access is required\n") \
+ % access_desc)
+ if portage.data.secpass < 1 and not need_superuser:
+ portage.data.portage_group_warning()
+ return 1
+
+ # Disable emergelog for everything except build or unmerge operations.
+ # This helps minimize parallel emerge.log entries that can confuse log
+ # parsers like genlop.
+ disable_emergelog = False
+ for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
+ if x in emerge_config.opts:
+ disable_emergelog = True
+ break
+ if disable_emergelog:
+ pass
+ elif emerge_config.action in ("search", "info"):
+ disable_emergelog = True
+ elif portage.data.secpass < 1:
+ disable_emergelog = True
+
+ import _emerge.emergelog
+ _emerge.emergelog._disable = disable_emergelog
+
+ if not disable_emergelog:
+ emerge_log_dir = \
+ emerge_config.target_config.settings.get('EMERGE_LOG_DIR')
+ if emerge_log_dir:
+ try:
+ # At least the parent needs to exist for the lock file.
+ portage.util.ensure_dirs(emerge_log_dir)
+ except portage.exception.PortageException as e:
+ writemsg_level("!!! Error creating directory for " + \
+ "EMERGE_LOG_DIR='%s':\n!!! %s\n" % \
+ (emerge_log_dir, e),
+ noiselevel=-1, level=logging.ERROR)
+ portage.util.ensure_dirs(_emerge.emergelog._emerge_log_dir)
+ else:
+ _emerge.emergelog._emerge_log_dir = emerge_log_dir
+ else:
+ _emerge.emergelog._emerge_log_dir = os.path.join(os.sep,
+ portage.const.EPREFIX.lstrip(os.sep), "var", "log")
+ portage.util.ensure_dirs(_emerge.emergelog._emerge_log_dir)
+
+ if not "--pretend" in emerge_config.opts:
+ time_fmt = "%b %d, %Y %H:%M:%S"
+ if sys.hexversion < 0x3000000:
+ time_fmt = portage._unicode_encode(time_fmt)
+ time_str = time.strftime(time_fmt, time.localtime(time.time()))
+ # Avoid potential UnicodeDecodeError in Python 2, since strftime
+ # returns bytes in Python 2, and %b may contain non-ascii chars.
+ time_str = _unicode_decode(time_str,
+ encoding=_encodings['content'], errors='replace')
+ emergelog(xterm_titles, "Started emerge on: %s" % time_str)
+ myelogstr=""
+ if emerge_config.opts:
+ opt_list = []
+ for opt, arg in emerge_config.opts.items():
+ if arg is True:
+ opt_list.append(opt)
+ elif isinstance(arg, list):
+ # arguments like --exclude that use 'append' action
+ for x in arg:
+ opt_list.append("%s=%s" % (opt, x))
+ else:
+ opt_list.append("%s=%s" % (opt, arg))
+ myelogstr=" ".join(opt_list)
+ if emerge_config.action:
+ myelogstr += " --" + emerge_config.action
+ if oldargs:
+ myelogstr += " " + " ".join(oldargs)
+ emergelog(xterm_titles, " *** emerge " + myelogstr)
+
+ oldargs = None
+
+ def emergeexitsig(signum, frame):
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ portage.util.writemsg(
+ "\n\nExiting on signal %(signal)s\n" % {"signal":signum})
+ sys.exit(128 + signum)
+
+ signal.signal(signal.SIGTERM, emergeexitsig)
+
+ def emergeexit():
+ """This gets out final log message in before we quit."""
+ if "--pretend" not in emerge_config.opts:
+ emergelog(xterm_titles, " *** terminating.")
+ if xterm_titles:
+ xtermTitleReset()
+ portage.atexit_register(emergeexit)
+
+ if emerge_config.action in ("config", "metadata", "regen", "sync"):
+ if "--pretend" in emerge_config.opts:
+ sys.stderr.write(("emerge: The '%s' action does " + \
+ "not support '--pretend'.\n") % emerge_config.action)
+ return 1
+
+ if "sync" == emerge_config.action:
+ return action_sync(emerge_config)
+ elif "metadata" == emerge_config.action:
+ action_metadata(emerge_config.target_config.settings,
+ emerge_config.target_config.trees['porttree'].dbapi,
+ emerge_config.opts)
+ elif emerge_config.action=="regen":
+ validate_ebuild_environment(emerge_config.trees)
+ return action_regen(emerge_config.target_config.settings,
+ emerge_config.target_config.trees['porttree'].dbapi,
+ emerge_config.opts.get("--jobs"),
+ emerge_config.opts.get("--load-average"))
+ # HELP action
+ elif "config" == emerge_config.action:
+ validate_ebuild_environment(emerge_config.trees)
+ action_config(emerge_config.target_config.settings,
+ emerge_config.trees, emerge_config.opts, emerge_config.args)
+
+ # SEARCH action
+ elif "search" == emerge_config.action:
+ validate_ebuild_environment(emerge_config.trees)
+ action_search(emerge_config.target_config,
+ emerge_config.opts, emerge_config.args, spinner)
+
+ elif emerge_config.action in \
+ ('clean', 'depclean', 'deselect', 'prune', 'unmerge'):
+ validate_ebuild_environment(emerge_config.trees)
+ rval = action_uninstall(emerge_config.target_config.settings,
+ emerge_config.trees, emerge_config.target_config.mtimedb["ldpath"],
+ emerge_config.opts, emerge_config.action,
+ emerge_config.args, spinner)
+ if not (emerge_config.action == 'deselect' or
+ buildpkgonly or fetchonly or pretend):
+ post_emerge(emerge_config.action, emerge_config.opts,
+ emerge_config.args, emerge_config.target_config.root,
+ emerge_config.trees, emerge_config.target_config.mtimedb, rval)
+ return rval
+
+ elif emerge_config.action == 'info':
+
+ # Ensure atoms are valid before calling unmerge().
+ vardb = emerge_config.target_config.trees['vartree'].dbapi
+ portdb = emerge_config.target_config.trees['porttree'].dbapi
+ bindb = emerge_config.target_config.trees['bintree'].dbapi
+ valid_atoms = []
+ for x in emerge_config.args:
+ if is_valid_package_atom(x, allow_repo=True):
+ try:
+ #look at the installed files first, if there is no match
+ #look at the ebuilds, since EAPI 4 allows running pkg_info
+ #on non-installed packages
+ valid_atom = dep_expand(x, mydb=vardb)
+ if valid_atom.cp.split("/")[0] == "null":
+ valid_atom = dep_expand(x, mydb=portdb)
+
+ if valid_atom.cp.split("/")[0] == "null" and \
+ "--usepkg" in emerge_config.opts:
+ valid_atom = dep_expand(x, mydb=bindb)
+
+ valid_atoms.append(valid_atom)
+
+ except portage.exception.AmbiguousPackageName as e:
+ msg = "The short ebuild name \"" + x + \
+ "\" is ambiguous. Please specify " + \
+ "one of the following " + \
+ "fully-qualified ebuild names instead:"
+ for line in textwrap.wrap(msg, 70):
+ writemsg_level("!!! %s\n" % (line,),
+ level=logging.ERROR, noiselevel=-1)
+ for i in e.args[0]:
+ writemsg_level(" %s\n" % colorize("INFORM", i),
+ level=logging.ERROR, noiselevel=-1)
+ writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
+ return 1
+ continue
+ msg = []
+ msg.append("'%s' is not a valid package atom." % (x,))
+ msg.append("Please check ebuild(5) for full details.")
+ writemsg_level("".join("!!! %s\n" % line for line in msg),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ return action_info(emerge_config.target_config.settings,
+ emerge_config.trees, emerge_config.opts, valid_atoms)
+
+ # "update", "system", or just process files:
+ else:
+ validate_ebuild_environment(emerge_config.trees)
+
+ for x in emerge_config.args:
+ if x.startswith(SETPREFIX) or \
+ is_valid_package_atom(x, allow_repo=True):
+ continue
+ if x[:1] == os.sep:
+ continue
+ try:
+ os.lstat(x)
+ continue
+ except OSError:
+ pass
+ msg = []
+ msg.append("'%s' is not a valid package atom." % (x,))
+ msg.append("Please check ebuild(5) for full details.")
+ writemsg_level("".join("!!! %s\n" % line for line in msg),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ # GLEP 42 says to display news *after* an emerge --pretend
+ if "--pretend" not in emerge_config.opts:
+ display_news_notification(
+ emerge_config.target_config, emerge_config.opts)
+ retval = action_build(emerge_config.target_config.settings,
+ emerge_config.trees, emerge_config.target_config.mtimedb,
+ emerge_config.opts, emerge_config.action,
+ emerge_config.args, spinner)
+ post_emerge(emerge_config.action, emerge_config.opts,
+ emerge_config.args, emerge_config.target_config.root,
+ emerge_config.trees, emerge_config.target_config.mtimedb, retval)
+
+ return retval
diff --git a/pym/_emerge/chk_updated_cfg_files.py b/pym/_emerge/chk_updated_cfg_files.py
new file mode 100644
index 000000000..9f2ab6f3e
--- /dev/null
+++ b/pym/_emerge/chk_updated_cfg_files.py
@@ -0,0 +1,42 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import logging
+
+import portage
+from portage import os
+from portage.localization import _
+from portage.output import bold, colorize, yellow
+from portage.util import writemsg_level
+
+def chk_updated_cfg_files(eroot, config_protect):
+ target_root = eroot
+ result = list(
+ portage.util.find_updated_config_files(target_root, config_protect))
+
+ for x in result:
+ writemsg_level("\n %s " % (colorize("WARN", "* " + _("IMPORTANT:"))),
+ level=logging.INFO, noiselevel=-1)
+ if not x[1]: # it's a protected file
+ writemsg_level( _("config file '%s' needs updating.\n") % x[0],
+ level=logging.INFO, noiselevel=-1)
+ else: # it's a protected dir
+ if len(x[1]) == 1:
+ head, tail = os.path.split(x[1][0])
+ tail = tail[len("._cfg0000_"):]
+ fpath = os.path.join(head, tail)
+ writemsg_level(_("config file '%s' needs updating.\n") % fpath,
+ level=logging.INFO, noiselevel=-1)
+ else:
+ writemsg_level(
+ _("%d config files in '%s' need updating.\n") % \
+ (len(x[1]), x[0]), level=logging.INFO, noiselevel=-1)
+
+ if result:
+ print(" " + yellow("*") + " See the " +
+ colorize("INFORM", _("CONFIGURATION FILES")) +
+ " " + _("section of the") + " " + bold("emerge"))
+ print(" " + yellow("*") + " " +
+ _("man page to learn how to update config files."))
diff --git a/pym/_emerge/clear_caches.py b/pym/_emerge/clear_caches.py
index 7b7c5eced..513df626f 100644
--- a/pym/_emerge/clear_caches.py
+++ b/pym/_emerge/clear_caches.py
@@ -1,8 +1,7 @@
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import gc
-from portage.util.listdir import dircache
def clear_caches(trees):
for d in trees.values():
@@ -15,5 +14,4 @@ def clear_caches(trees):
pass
else:
d["vartree"].dbapi._linkmap._clear_cache()
- dircache.clear()
gc.collect()
diff --git a/pym/_emerge/countdown.py b/pym/_emerge/countdown.py
index 5abdc8a96..62e3c8dea 100644
--- a/pym/_emerge/countdown.py
+++ b/pym/_emerge/countdown.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2009 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -8,15 +8,15 @@ import time
from portage.output import colorize
-def countdown(secs=5, doing="Starting"):
+
+def countdown(secs=5, doing='Starting'):
if secs:
- print(">>> Waiting",secs,"seconds before starting...")
- print(">>> (Control-C to abort)...\n"+doing+" in: ", end=' ')
- ticks=list(range(secs))
- ticks.reverse()
- for sec in ticks:
- sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
+ print(
+ '>>> Waiting %s seconds before starting...\n'
+ '>>> (Control-C to abort)...\n'
+ '%s in:' % (secs, doing), end='')
+ for sec in range(secs, 0, -1):
+ sys.stdout.write(colorize('UNMERGE_WARN', ' %i' % sec))
sys.stdout.flush()
time.sleep(1)
print()
-
diff --git a/pym/_emerge/create_depgraph_params.py b/pym/_emerge/create_depgraph_params.py
index 2838e93c3..225b792b6 100644
--- a/pym/_emerge/create_depgraph_params.py
+++ b/pym/_emerge/create_depgraph_params.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import logging
@@ -15,11 +15,11 @@ def create_depgraph_params(myopts, myaction):
# complete: completely account for all known dependencies
# remove: build graph for use in removing packages
# rebuilt_binaries: replace installed packages with rebuilt binaries
- # rebuild_if_new_slot_abi: rebuild or reinstall packages when
- # SLOT/ABI := operator dependencies can be satisfied by a newer
- # SLOT/ABI, so that older packages slots will become eligible for
+ # rebuild_if_new_slot: rebuild or reinstall packages when
+ # slot/sub-slot := operator dependencies can be satisfied by a newer
+ # slot/sub-slot, so that older packages slots will become eligible for
# removal by the --depclean action as soon as possible
- # ignore_built_slot_abi_deps: ignore the SLOT/ABI := operator parts
+ # ignore_built_slot_operator_deps: ignore the slot/sub-slot := operator parts
# of dependencies that have been recorded when packages where built
myparams = {"recurse" : True}
@@ -27,9 +27,9 @@ def create_depgraph_params(myopts, myaction):
if bdeps is not None:
myparams["bdeps"] = bdeps
- ignore_built_slot_abi_deps = myopts.get("--ignore-built-slot-abi-deps")
- if ignore_built_slot_abi_deps is not None:
- myparams["ignore_built_slot_abi_deps"] = ignore_built_slot_abi_deps
+ ignore_built_slot_operator_deps = myopts.get("--ignore-built-slot-operator-deps")
+ if ignore_built_slot_operator_deps is not None:
+ myparams["ignore_built_slot_operator_deps"] = ignore_built_slot_operator_deps
dynamic_deps = myopts.get("--dynamic-deps")
if dynamic_deps is not None:
@@ -41,11 +41,12 @@ def create_depgraph_params(myopts, myaction):
myparams["selective"] = True
return myparams
- rebuild_if_new_slot_abi = myopts.get('--rebuild-if-new-slot-abi')
- if rebuild_if_new_slot_abi is not None:
- myparams['rebuild_if_new_slot_abi'] = rebuild_if_new_slot_abi
+ rebuild_if_new_slot = myopts.get('--rebuild-if-new-slot')
+ if rebuild_if_new_slot is not None:
+ myparams['rebuild_if_new_slot'] = rebuild_if_new_slot
if "--update" in myopts or \
+ "--newrepo" in myopts or \
"--newuse" in myopts or \
"--reinstall" in myopts or \
"--noreplace" in myopts or \
diff --git a/pym/_emerge/create_world_atom.py b/pym/_emerge/create_world_atom.py
index 35fb7c4bd..ac994cc04 100644
--- a/pym/_emerge/create_world_atom.py
+++ b/pym/_emerge/create_world_atom.py
@@ -1,7 +1,15 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+import sys
+
from portage.dep import _repo_separator
+from portage.exception import InvalidData
+
+if sys.hexversion >= 0x3000000:
+ _unicode = str
+else:
+ _unicode = unicode
def create_world_atom(pkg, args_set, root_config):
"""Create a new atom for the world file if one does not exist. If the
@@ -35,16 +43,15 @@ def create_world_atom(pkg, args_set, root_config):
for cpv in portdb.match(cp):
for repo in repos:
try:
- available_slots.add(portdb.aux_get(cpv, ["SLOT"],
- myrepo=repo)[0])
- except KeyError:
+ available_slots.add(portdb._pkg_str(_unicode(cpv), repo).slot)
+ except (KeyError, InvalidData):
pass
slotted = len(available_slots) > 1 or \
(len(available_slots) == 1 and "0" not in available_slots)
if not slotted:
# check the vdb in case this is multislot
- available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
+ available_slots = set(vardb._pkg_str(cpv, None).slot \
for cpv in vardb.match(cp))
slotted = len(available_slots) > 1 or \
(len(available_slots) == 1 and "0" not in available_slots)
@@ -83,14 +90,14 @@ def create_world_atom(pkg, args_set, root_config):
matched_slots = set()
if mydb is vardb:
for cpv in matches:
- matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
+ matched_slots.add(mydb._pkg_str(cpv, None).slot)
else:
for cpv in matches:
for repo in repos:
try:
- matched_slots.add(portdb.aux_get(cpv, ["SLOT"],
- myrepo=repo)[0])
- except KeyError:
+ matched_slots.add(
+ portdb._pkg_str(_unicode(cpv), repo).slot)
+ except (KeyError, InvalidData):
pass
if len(matched_slots) == 1:
diff --git a/pym/_emerge/depgraph.py b/pym/_emerge/depgraph.py
index 0f3bc9389..abb70a769 100644
--- a/pym/_emerge/depgraph.py
+++ b/pym/_emerge/depgraph.py
@@ -1,34 +1,38 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-from __future__ import print_function
+from __future__ import print_function, unicode_literals
-import difflib
+import collections
import errno
import io
import logging
import stat
import sys
import textwrap
+import warnings
from collections import deque
from itertools import chain
import portage
from portage import os, OrderedDict
from portage import _unicode_decode, _unicode_encode, _encodings
-from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH
+from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH, VCS_DIRS
from portage.dbapi import dbapi
from portage.dbapi.dep_expand import dep_expand
+from portage.dbapi._similar_name_search import similar_name_search
from portage.dep import Atom, best_match_to_list, extract_affecting_use, \
check_required_use, human_readable_required_use, match_from_list, \
_repo_separator
-from portage.dep._slot_abi import ignore_built_slot_abi_deps
-from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use
-from portage.exception import (InvalidAtom, InvalidDependString,
+from portage.dep._slot_operator import ignore_built_slot_operator_deps
+from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use, \
+ _get_eapi_attrs
+from portage.exception import (InvalidAtom, InvalidData, InvalidDependString,
PackageNotFound, PortageException)
from portage.output import colorize, create_color_func, \
darkgreen, green
bad = create_color_func("BAD")
+from portage.package.ebuild.config import _get_feature_flags
from portage.package.ebuild.getmaskingstatus import \
_getmaskingstatus, _MaskReason
from portage._sets import SETPREFIX
@@ -38,13 +42,16 @@ from portage.util import cmp_sort_key, writemsg, writemsg_stdout
from portage.util import ensure_dirs
from portage.util import writemsg_level, write_atomic
from portage.util.digraph import digraph
-from portage.util.listdir import _ignorecvs_dirs
+from portage.util._async.TaskScheduler import TaskScheduler
+from portage.util._eventloop.EventLoop import EventLoop
+from portage.util._eventloop.global_event_loop import global_event_loop
from portage.versions import catpkgsplit
from _emerge.AtomArg import AtomArg
from _emerge.Blocker import Blocker
from _emerge.BlockerCache import BlockerCache
from _emerge.BlockerDepPriority import BlockerDepPriority
+from .chk_updated_cfg_files import chk_updated_cfg_files
from _emerge.countdown import countdown
from _emerge.create_world_atom import create_world_atom
from _emerge.Dependency import Dependency
@@ -52,6 +59,7 @@ from _emerge.DependencyArg import DependencyArg
from _emerge.DepPriority import DepPriority
from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
+from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
from _emerge.FakeVartree import FakeVartree
from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
from _emerge.is_valid_package_atom import insert_category_into_atom, \
@@ -68,9 +76,10 @@ from _emerge.UseFlagDisplay import pkg_use_display
from _emerge.userquery import userquery
from _emerge.resolver.backtracking import Backtracker, BacktrackParameter
+from _emerge.resolver.package_tracker import PackageTracker, PackageTrackerDbapiWrapper
from _emerge.resolver.slot_collision import slot_conflict_handler
from _emerge.resolver.circular_dependency import circular_dependency_handler
-from _emerge.resolver.output import Display
+from _emerge.resolver.output import Display, format_unmatched_atom
if sys.hexversion >= 0x3000000:
basestring = str
@@ -115,8 +124,8 @@ class _frozen_depgraph_config(object):
self._pkg_cache = {}
self._highest_license_masked = {}
dynamic_deps = myopts.get("--dynamic-deps", "y") != "n"
- ignore_built_slot_abi_deps = myopts.get(
- "--ignore-built-slot-abi-deps", "n") == "y"
+ ignore_built_slot_operator_deps = myopts.get(
+ "--ignore-built-slot-operator-deps", "n") == "y"
for myroot in trees:
self.trees[myroot] = {}
# Create a RootConfig instance that references
@@ -132,7 +141,7 @@ class _frozen_depgraph_config(object):
pkg_cache=self._pkg_cache,
pkg_root_config=self.roots[myroot],
dynamic_deps=dynamic_deps,
- ignore_built_slot_abi_deps=ignore_built_slot_abi_deps)
+ ignore_built_slot_operator_deps=ignore_built_slot_operator_deps)
self.pkgsettings[myroot] = portage.config(
clone=self.trees[myroot]["vartree"].settings)
@@ -267,13 +276,12 @@ class _rebuild_config(object):
return True
elif (parent.installed and
root_slot not in self.reinstall_list):
- inst_build_time = parent.metadata.get("BUILD_TIME")
try:
bin_build_time, = bindb.aux_get(parent.cpv,
["BUILD_TIME"])
except KeyError:
continue
- if bin_build_time != inst_build_time:
+ if bin_build_time != _unicode(parent.build_time):
# 2) Remote binary package is valid, and local package
# is not up to date. Force reinstall.
reinstall = True
@@ -335,11 +343,8 @@ class _dynamic_depgraph_config(object):
self.myparams = myparams.copy()
self._vdb_loaded = False
self._allow_backtracking = allow_backtracking
- # Maps slot atom to package for each Package added to the graph.
- self._slot_pkg_map = {}
# Maps nodes to the reasons they were selected for reinstallation.
self._reinstall_nodes = {}
- self.mydbapi = {}
# Contains a filtered view of preferred packages that are selected
# from available repositories.
self._filtered_trees = {}
@@ -374,14 +379,6 @@ class _dynamic_depgraph_config(object):
# This use used to check if we have accounted for blockers
# relevant to a package.
self._traversed_pkg_deps = set()
- # This should be ordered such that the backtracker will
- # attempt to solve conflicts which occurred earlier first,
- # since an earlier conflict can be the cause of a conflict
- # which occurs later.
- self._slot_collision_info = OrderedDict()
- # Slot collision nodes are not allowed to block other packages since
- # blocker validation is only able to account for one package per slot.
- self._slot_collision_nodes = set()
self._parent_atoms = {}
self._slot_conflict_handler = None
self._circular_dependency_handler = None
@@ -412,28 +409,31 @@ class _dynamic_depgraph_config(object):
self._needed_license_changes = backtrack_parameters.needed_license_changes
self._needed_use_config_changes = backtrack_parameters.needed_use_config_changes
self._runtime_pkg_mask = backtrack_parameters.runtime_pkg_mask
- self._slot_abi_replace_installed = backtrack_parameters.slot_abi_replace_installed
+ self._slot_operator_replace_installed = backtrack_parameters.slot_operator_replace_installed
+ self._prune_rebuilds = backtrack_parameters.prune_rebuilds
self._need_restart = False
# For conditions that always require user intervention, such as
# unsatisfied REQUIRED_USE (currently has no autounmask support).
self._skip_restart = False
self._backtrack_infos = {}
+ self._buildpkgonly_deps_unsatisfied = False
self._autounmask = depgraph._frozen_config.myopts.get('--autounmask') != 'n'
self._success_without_autounmask = False
self._traverse_ignored_deps = False
self._complete_mode = False
- self._slot_abi_deps = {}
+ self._slot_operator_deps = {}
+ self._package_tracker = PackageTracker()
+ # Track missed updates caused by solved conflicts.
+ self._conflict_missed_update = collections.defaultdict(dict)
for myroot in depgraph._frozen_config.trees:
self.sets[myroot] = _depgraph_sets()
- self._slot_pkg_map[myroot] = {}
vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
# This dbapi instance will model the state that the vdb will
# have after new packages have been installed.
- fakedb = PackageVirtualDbapi(vardb.settings)
+ fakedb = PackageTrackerDbapiWrapper(myroot, self._package_tracker)
- self.mydbapi[myroot] = fakedb
def graph_tree():
pass
graph_tree.dbapi = fakedb
@@ -446,6 +446,7 @@ class _dynamic_depgraph_config(object):
self._graph_trees[myroot]["vartree"] = graph_tree
self._graph_trees[myroot]["graph_db"] = graph_tree.dbapi
self._graph_trees[myroot]["graph"] = self.digraph
+ self._graph_trees[myroot]["want_update_pkg"] = depgraph._want_update_pkg
def filtered_tree():
pass
filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot)
@@ -472,6 +473,7 @@ class _dynamic_depgraph_config(object):
self._filtered_trees[myroot]["graph"] = self.digraph
self._filtered_trees[myroot]["vartree"] = \
depgraph._frozen_config.trees[myroot]["vartree"]
+ self._filtered_trees[myroot]["want_update_pkg"] = depgraph._want_update_pkg
dbs = []
# (db, pkg_type, built, installed, db_keys)
@@ -502,8 +504,6 @@ class depgraph(object):
pkg_tree_map = RootConfig.pkg_tree_map
- _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
-
def __init__(self, settings, trees, myopts, myparams, spinner,
frozen_config=None, backtrack_parameters=BacktrackParameter(), allow_backtracking=False):
if frozen_config is None:
@@ -517,6 +517,9 @@ class depgraph(object):
self._select_atoms = self._select_atoms_highest_available
self._select_package = self._select_pkg_highest_available
+ self._event_loop = (portage._internal_caller and
+ global_event_loop() or EventLoop(main=False))
+
def _load_vdb(self):
"""
Load installed package metadata if appropriate. This used to be called
@@ -535,10 +538,6 @@ class depgraph(object):
preload_installed_pkgs = \
"--nodeps" not in self._frozen_config.myopts
- if self._frozen_config.myopts.get("--root-deps") is not None and \
- myroot != self._frozen_config.target_root:
- continue
-
fake_vartree = self._frozen_config.trees[myroot]["vartree"]
if not fake_vartree.dbapi:
# This needs to be called for the first depgraph, but not for
@@ -552,24 +551,157 @@ class depgraph(object):
if preload_installed_pkgs:
vardb = fake_vartree.dbapi
- fakedb = self._dynamic_config._graph_trees[
- myroot]["vartree"].dbapi
- for pkg in vardb:
- self._spinner_update()
- if dynamic_deps:
- # This causes FakeVartree to update the
- # Package instance dependencies via
- # PackageVirtualDbapi.aux_update()
- vardb.aux_get(pkg.cpv, [])
- fakedb.cpv_inject(pkg)
+ if not dynamic_deps:
+ for pkg in vardb:
+ self._dynamic_config._package_tracker.add_installed_pkg(pkg)
+ else:
+ max_jobs = self._frozen_config.myopts.get("--jobs")
+ max_load = self._frozen_config.myopts.get("--load-average")
+ scheduler = TaskScheduler(
+ self._dynamic_deps_preload(fake_vartree),
+ max_jobs=max_jobs,
+ max_load=max_load,
+ event_loop=fake_vartree._portdb._event_loop)
+ scheduler.start()
+ scheduler.wait()
self._dynamic_config._vdb_loaded = True
+ def _dynamic_deps_preload(self, fake_vartree):
+ portdb = fake_vartree._portdb
+ for pkg in fake_vartree.dbapi:
+ self._spinner_update()
+ self._dynamic_config._package_tracker.add_installed_pkg(pkg)
+ ebuild_path, repo_path = \
+ portdb.findname2(pkg.cpv, myrepo=pkg.repo)
+ if ebuild_path is None:
+ fake_vartree.dynamic_deps_preload(pkg, None)
+ continue
+ metadata, ebuild_hash = portdb._pull_valid_cache(
+ pkg.cpv, ebuild_path, repo_path)
+ if metadata is not None:
+ fake_vartree.dynamic_deps_preload(pkg, metadata)
+ else:
+ proc = EbuildMetadataPhase(cpv=pkg.cpv,
+ ebuild_hash=ebuild_hash,
+ portdb=portdb, repo_path=repo_path,
+ settings=portdb.doebuild_settings)
+ proc.addExitListener(
+ self._dynamic_deps_proc_exit(pkg, fake_vartree))
+ yield proc
+
+ class _dynamic_deps_proc_exit(object):
+
+ __slots__ = ('_pkg', '_fake_vartree')
+
+ def __init__(self, pkg, fake_vartree):
+ self._pkg = pkg
+ self._fake_vartree = fake_vartree
+
+ def __call__(self, proc):
+ metadata = None
+ if proc.returncode == os.EX_OK:
+ metadata = proc.metadata
+ self._fake_vartree.dynamic_deps_preload(self._pkg, metadata)
+
def _spinner_update(self):
if self._frozen_config.spinner:
self._frozen_config.spinner.update()
+ def _compute_abi_rebuild_info(self):
+ """
+ Fill self._forced_rebuilds with packages that cause rebuilds.
+ """
+
+ debug = "--debug" in self._frozen_config.myopts
+
+ # Get all atoms that might have caused a forced rebuild.
+ atoms = {}
+ for s in self._dynamic_config._initial_arg_list:
+ if s.force_reinstall:
+ root = s.root_config.root
+ atoms.setdefault(root, set()).update(s.pset)
+
+ if debug:
+ writemsg_level("forced reinstall atoms:\n",
+ level=logging.DEBUG, noiselevel=-1)
+
+ for root in atoms:
+ writemsg_level(" root: %s\n" % root,
+ level=logging.DEBUG, noiselevel=-1)
+ for atom in atoms[root]:
+ writemsg_level(" atom: %s\n" % atom,
+ level=logging.DEBUG, noiselevel=-1)
+ writemsg_level("\n\n",
+ level=logging.DEBUG, noiselevel=-1)
+
+ # Go through all slot operator deps and check if one of these deps
+ # has a parent that is matched by one of the atoms from above.
+ forced_rebuilds = {}
+ for (root, slot_atom), deps in self._dynamic_config._slot_operator_deps.items():
+ rebuild_atoms = atoms.get(root, set())
+
+ for dep in deps:
+ if getattr(dep.parent, "installed", False) or dep.child.installed or \
+ dep.parent.slot_atom not in rebuild_atoms:
+ continue
+
+ # Make sure the child's slot/subslot has changed. If it hasn't,
+ # then another child has forced this rebuild.
+ installed_pkg = self._select_pkg_from_installed(root, dep.child.slot_atom)[0]
+ if installed_pkg and installed_pkg.slot == dep.child.slot and \
+ installed_pkg.sub_slot == dep.child.sub_slot:
+ continue
+
+ # The child has forced a rebuild of the parent
+ forced_rebuilds.setdefault(root, {}).setdefault(dep.child, set()).add(dep.parent)
+
+ if debug:
+ writemsg_level("slot operator dependencies:\n",
+ level=logging.DEBUG, noiselevel=-1)
+
+ for (root, slot_atom), deps in self._dynamic_config._slot_operator_deps.items():
+ writemsg_level(" (%s, %s)\n" % \
+ (root, slot_atom), level=logging.DEBUG, noiselevel=-1)
+ for dep in deps:
+ writemsg_level(" parent: %s\n" % dep.parent, level=logging.DEBUG, noiselevel=-1)
+ writemsg_level(" child: %s (%s)\n" % (dep.child, dep.priority), level=logging.DEBUG, noiselevel=-1)
+
+ writemsg_level("\n\n",
+ level=logging.DEBUG, noiselevel=-1)
+
+
+ writemsg_level("forced rebuilds:\n",
+ level=logging.DEBUG, noiselevel=-1)
+
+ for root in forced_rebuilds:
+ writemsg_level(" root: %s\n" % root,
+ level=logging.DEBUG, noiselevel=-1)
+ for child in forced_rebuilds[root]:
+ writemsg_level(" child: %s\n" % child,
+ level=logging.DEBUG, noiselevel=-1)
+ for parent in forced_rebuilds[root][child]:
+ writemsg_level(" parent: %s\n" % parent,
+ level=logging.DEBUG, noiselevel=-1)
+ writemsg_level("\n\n",
+ level=logging.DEBUG, noiselevel=-1)
+
+ self._forced_rebuilds = forced_rebuilds
+
+ def _show_abi_rebuild_info(self):
+
+ if not self._forced_rebuilds:
+ return
+
+ writemsg_stdout("\nThe following packages are causing rebuilds:\n\n", noiselevel=-1)
+
+ for root in self._forced_rebuilds:
+ for child in self._forced_rebuilds[root]:
+ writemsg_stdout(" %s causes rebuilds for:\n" % (child,), noiselevel=-1)
+ for parent in self._forced_rebuilds[root][child]:
+ writemsg_stdout(" %s\n" % (parent,), noiselevel=-1)
+
def _show_ignored_binaries(self):
"""
Show binaries that have been ignored because their USE didn't
@@ -583,26 +715,23 @@ class depgraph(object):
for pkg in list(self._dynamic_config.ignored_binaries):
- selected_pkg = self._dynamic_config.mydbapi[pkg.root
- ].match_pkgs(pkg.slot_atom)
+ selected_pkg = list()
- if not selected_pkg:
- continue
+ for selected_pkg in self._dynamic_config._package_tracker.match(
+ pkg.root, pkg.slot_atom):
- selected_pkg = selected_pkg[-1]
- if selected_pkg > pkg:
- self._dynamic_config.ignored_binaries.pop(pkg)
- continue
+ if selected_pkg > pkg:
+ self._dynamic_config.ignored_binaries.pop(pkg)
+ break
- if selected_pkg.installed and \
- selected_pkg.cpv == pkg.cpv and \
- selected_pkg.metadata.get('BUILD_TIME') == \
- pkg.metadata.get('BUILD_TIME'):
- # We don't care about ignored binaries when an
- # identical installed instance is selected to
- # fill the slot.
- self._dynamic_config.ignored_binaries.pop(pkg)
- continue
+ if selected_pkg.installed and \
+ selected_pkg.cpv == pkg.cpv and \
+ selected_pkg.build_time == pkg.build_time:
+ # We don't care about ignored binaries when an
+ # identical installed instance is selected to
+ # fill the slot.
+ self._dynamic_config.ignored_binaries.pop(pkg)
+ break
if not self._dynamic_config.ignored_binaries:
return
@@ -613,11 +742,17 @@ class depgraph(object):
"due to non matching USE:\n\n", noiselevel=-1)
for pkg, flags in self._dynamic_config.ignored_binaries.items():
- writemsg(" =%s" % pkg.cpv, noiselevel=-1)
+ flag_display = []
+ for flag in sorted(flags):
+ if flag not in pkg.use.enabled:
+ flag = "-" + flag
+ flag_display.append(flag)
+ flag_display = " ".join(flag_display)
+ # The user can paste this line into package.use
+ writemsg(" =%s %s" % (pkg.cpv, flag_display), noiselevel=-1)
if pkg.root_config.settings["ROOT"] != "/":
- writemsg(" for %s" % (pkg.root,), noiselevel=-1)
- writemsg("\n use flag(s): %s\n" % ", ".join(sorted(flags)),
- noiselevel=-1)
+ writemsg(" # for %s" % (pkg.root,), noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
msg = [
"",
@@ -631,31 +766,44 @@ class depgraph(object):
line = colorize("INFORM", line)
writemsg(line + "\n", noiselevel=-1)
- def _show_missed_update(self):
+ def _get_missed_updates(self):
# In order to minimize noise, show only the highest
# missed update from each SLOT.
missed_updates = {}
for pkg, mask_reasons in \
- self._dynamic_config._runtime_pkg_mask.items():
+ chain(self._dynamic_config._runtime_pkg_mask.items(),
+ self._dynamic_config._conflict_missed_update.items()):
if pkg.installed:
# Exclude installed here since we only
# want to show available updates.
continue
- chosen_pkg = self._dynamic_config.mydbapi[pkg.root
- ].match_pkgs(pkg.slot_atom)
- if not chosen_pkg or chosen_pkg[-1] >= pkg:
- continue
- k = (pkg.root, pkg.slot_atom)
- if k in missed_updates:
- other_pkg, mask_type, parent_atoms = missed_updates[k]
- if other_pkg > pkg:
- continue
- for mask_type, parent_atoms in mask_reasons.items():
- if not parent_atoms:
- continue
- missed_updates[k] = (pkg, mask_type, parent_atoms)
- break
+ missed_update = True
+ any_selected = False
+ for chosen_pkg in self._dynamic_config._package_tracker.match(
+ pkg.root, pkg.slot_atom):
+ any_selected = True
+ if chosen_pkg > pkg or (not chosen_pkg.installed and \
+ chosen_pkg.version == pkg.version):
+ missed_update = False
+ break
+ if any_selected and missed_update:
+ k = (pkg.root, pkg.slot_atom)
+ if k in missed_updates:
+ other_pkg, mask_type, parent_atoms = missed_updates[k]
+ if other_pkg > pkg:
+ continue
+ for mask_type, parent_atoms in mask_reasons.items():
+ if not parent_atoms:
+ continue
+ missed_updates[k] = (pkg, mask_type, parent_atoms)
+ break
+
+ return missed_updates
+
+ def _show_missed_update(self):
+
+ missed_updates = self._get_missed_updates()
if not missed_updates:
return
@@ -726,7 +874,7 @@ class depgraph(object):
self._show_merge_list()
msg = []
- msg.append("\nWARNING: One or more updates have been " + \
+ msg.append("\nWARNING: One or more updates/rebuilds have been " + \
"skipped due to a dependency conflict:\n\n")
indent = " "
@@ -736,22 +884,29 @@ class depgraph(object):
msg.append(" for %s" % (pkg.root,))
msg.append("\n\n")
- for parent, atom in parent_atoms:
- msg.append(indent)
- msg.append(str(pkg))
+ msg.append(indent)
+ msg.append(str(pkg))
+ msg.append(" conflicts with\n")
- msg.append(" conflicts with\n")
- msg.append(2*indent)
+ for parent, atom in parent_atoms:
if isinstance(parent,
(PackageArg, AtomArg)):
# For PackageArg and AtomArg types, it's
# redundant to display the atom attribute.
+ msg.append(2*indent)
msg.append(str(parent))
+ msg.append("\n")
else:
# Display the specific atom from SetArg or
# Package types.
- msg.append("%s required by %s" % (atom, parent))
- msg.append("\n")
+ atom, marker = format_unmatched_atom(
+ pkg, atom, self._pkg_use_enabled)
+
+ msg.append(2*indent)
+ msg.append("%s required by %s\n" % (atom, parent))
+ msg.append(2*indent)
+ msg.append(marker)
+ msg.append("\n")
msg.append("\n")
writemsg("".join(msg), noiselevel=-1)
@@ -764,7 +919,7 @@ class depgraph(object):
cases.
"""
- if not self._dynamic_config._slot_collision_info:
+ if not any(self._dynamic_config._package_tracker.slot_conflicts()):
return
self._show_merge_list()
@@ -774,7 +929,7 @@ class depgraph(object):
conflict = handler.get_conflict()
writemsg(conflict, noiselevel=-1)
-
+
explanation = handler.get_explanation()
if explanation:
writemsg(explanation, noiselevel=-1)
@@ -813,6 +968,239 @@ class depgraph(object):
writemsg(line + '\n', noiselevel=-1)
writemsg('\n', noiselevel=-1)
+ def _solve_non_slot_operator_slot_conflicts(self):
+ """
+ This function solves slot conflicts which can
+ be solved by simply choosing one of the conflicting
+ and removing all the other ones.
+ It is able to solve somewhat more complex cases where
+ conflicts can only be solved simultaniously.
+ """
+ debug = "--debug" in self._frozen_config.myopts
+
+ # List all conflicts. Ignore those that involve slot operator rebuilds
+ # as the logic there needs special slot conflict behavior which isn't
+ # provided by this function.
+ conflicts = []
+ for conflict in self._dynamic_config._package_tracker.slot_conflicts():
+ slot_key = conflict.root, conflict.atom
+ if slot_key not in self._dynamic_config._slot_operator_replace_installed:
+ conflicts.append(conflict)
+
+ if not conflicts:
+ return
+
+ # Get a set of all conflicting packages.
+ conflict_pkgs = set()
+ for conflict in conflicts:
+ conflict_pkgs.update(conflict)
+
+ # Get the list of other packages which are only
+ # required by conflict packages.
+ indirect_conflict_candidates = set()
+ for pkg in conflict_pkgs:
+ indirect_conflict_candidates.update(self._dynamic_config.digraph.child_nodes(pkg))
+ indirect_conflict_candidates.difference_update(conflict_pkgs)
+
+ indirect_conflict_pkgs = set()
+ while indirect_conflict_candidates:
+ pkg = indirect_conflict_candidates.pop()
+
+ only_conflict_parents = True
+ for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []):
+ if parent not in conflict_pkgs and parent not in indirect_conflict_pkgs:
+ only_conflict_parents = False
+ break
+ if not only_conflict_parents:
+ continue
+
+ indirect_conflict_pkgs.add(pkg)
+ for child in self._dynamic_config.digraph.child_nodes(pkg):
+ if child in conflict_pkgs or child in indirect_conflict_pkgs:
+ continue
+ indirect_conflict_candidates.add(child)
+
+ # Create a graph containing the conflict packages
+ # and a special 'non_conflict_node' that represents
+ # all non-conflict packages.
+ conflict_graph = digraph()
+
+ non_conflict_node = "(non-conflict package)"
+ conflict_graph.add(non_conflict_node, None)
+
+ for pkg in chain(conflict_pkgs, indirect_conflict_pkgs):
+ conflict_graph.add(pkg, None)
+
+ # Add parent->child edges for each conflict package.
+ # Parents, which aren't conflict packages are represented
+ # by 'non_conflict_node'.
+ # If several conflicting packages are matched, but not all,
+ # add a tuple with the matched packages to the graph.
+ class or_tuple(tuple):
+ """
+ Helper class for debug printing.
+ """
+ def __str__(self):
+ return "(%s)" % ",".join(str(pkg) for pkg in self)
+
+ for conflict in conflicts:
+ all_parent_atoms = set()
+ for pkg in conflict:
+ all_parent_atoms.update(
+ self._dynamic_config._parent_atoms.get(pkg, []))
+
+ for parent, atom in all_parent_atoms:
+ is_arg_parent = isinstance(parent, AtomArg)
+
+ if parent not in conflict_pkgs and \
+ parent not in indirect_conflict_pkgs:
+ parent = non_conflict_node
+
+ atom_set = InternalPackageSet(
+ initial_atoms=(atom,), allow_repo=True)
+
+ matched = []
+ for pkg in conflict:
+ if atom_set.findAtomForPackage(pkg, \
+ modified_use=self._pkg_use_enabled(pkg)) and \
+ not (is_arg_parent and pkg.installed):
+ matched.append(pkg)
+ if len(matched) == len(conflict):
+ # All packages match.
+ continue
+ elif len(matched) == 1:
+ conflict_graph.add(matched[0], parent)
+ else:
+ # More than one packages matched, but not all.
+ conflict_graph.add(or_tuple(matched), parent)
+
+ for pkg in indirect_conflict_pkgs:
+ for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []):
+ if parent not in conflict_pkgs and \
+ parent not in indirect_conflict_pkgs:
+ parent = non_conflict_node
+ conflict_graph.add(pkg, parent)
+
+ if debug:
+ writemsg_level(
+ "\n!!! Slot conflict graph:\n",
+ level=logging.DEBUG, noiselevel=-1)
+ conflict_graph.debug_print()
+
+ # Now select required packages. Collect them in the
+ # 'forced' set.
+ forced = set([non_conflict_node])
+ unexplored = set([non_conflict_node])
+ # or_tuples get special handling. We first explore
+ # all packages in the hope of having forced one of
+ # the packages in the tuple. This way we don't have
+ # to choose one.
+ unexplored_tuples = set()
+
+ while unexplored:
+ # Handle all unexplored packages.
+ while unexplored:
+ node = unexplored.pop()
+ for child in conflict_graph.child_nodes(node):
+ if child in forced:
+ continue
+ forced.add(child)
+ if isinstance(child, Package):
+ unexplored.add(child)
+ else:
+ unexplored_tuples.add(child)
+
+ # Now handle unexplored or_tuples. Move on with packages
+ # once we had to choose one.
+ while unexplored_tuples:
+ nodes = unexplored_tuples.pop()
+ if any(node in forced for node in nodes):
+ # At least one of the packages in the
+ # tuple is already forced, which means the
+ # dependency represented by this tuple
+ # is satisfied.
+ continue
+
+ # We now have to choose one of packages in the tuple.
+ # In theory one could solve more conflicts if we'd be
+ # able to try different choices here, but that has lots
+ # of other problems. For now choose the package that was
+ # pulled first, as this should be the most desirable choice
+ # (otherwise it wouldn't have been the first one).
+ forced.add(nodes[0])
+ unexplored.add(nodes[0])
+ break
+
+ # Remove 'non_conflict_node' and or_tuples from 'forced'.
+ forced = set(pkg for pkg in forced if isinstance(pkg, Package))
+ non_forced = set(pkg for pkg in conflict_pkgs if pkg not in forced)
+
+ if debug:
+ writemsg_level(
+ "\n!!! Slot conflict solution:\n",
+ level=logging.DEBUG, noiselevel=-1)
+ for conflict in conflicts:
+ writemsg_level(
+ " Conflict: (%s, %s)\n" % (conflict.root, conflict.atom),
+ level=logging.DEBUG, noiselevel=-1)
+ for pkg in conflict:
+ if pkg in forced:
+ writemsg_level(
+ " keep: %s\n" % pkg,
+ level=logging.DEBUG, noiselevel=-1)
+ else:
+ writemsg_level(
+ " remove: %s\n" % pkg,
+ level=logging.DEBUG, noiselevel=-1)
+
+ broken_packages = set()
+ for pkg in non_forced:
+ for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []):
+ if isinstance(parent, Package) and parent not in non_forced:
+ # Non-forcing set args are expected to be a parent of all
+ # packages in the conflict.
+ broken_packages.add(parent)
+ self._remove_pkg(pkg)
+
+ # Process the dependencies of choosen conflict packages
+ # again to properly account for blockers.
+ broken_packages.update(forced)
+
+ # Filter out broken packages which have been removed during
+ # recursive removal in self._remove_pkg.
+ broken_packages = list(pkg for pkg in broken_packages if pkg in broken_packages \
+ if self._dynamic_config._package_tracker.contains(pkg, installed=False))
+
+ self._dynamic_config._dep_stack.extend(broken_packages)
+
+ if broken_packages:
+ # Process dependencies. This cannot fail because we just ensured that
+ # the remaining packages satisfy all dependencies.
+ self._create_graph()
+
+ # Record missed updates.
+ for conflict in conflicts:
+ if not any(pkg in non_forced for pkg in conflict):
+ continue
+ for pkg in conflict:
+ if pkg not in non_forced:
+ continue
+
+ for other in conflict:
+ if other is pkg:
+ continue
+
+ for parent, atom in self._dynamic_config._parent_atoms.get(other, []):
+ atom_set = InternalPackageSet(
+ initial_atoms=(atom,), allow_repo=True)
+ if not atom_set.findAtomForPackage(pkg,
+ modified_use=self._pkg_use_enabled(pkg)):
+ self._dynamic_config._conflict_missed_update[pkg].setdefault(
+ "slot conflict", set())
+ self._dynamic_config._conflict_missed_update[pkg]["slot conflict"].add(
+ (parent, atom))
+
+
def _process_slot_conflicts(self):
"""
If there are any slot conflicts and backtracking is enabled,
@@ -820,16 +1208,21 @@ class depgraph(object):
is called, so that all relevant reverse dependencies are
available for use in backtracking decisions.
"""
- for (slot_atom, root), slot_nodes in \
- self._dynamic_config._slot_collision_info.items():
- self._process_slot_conflict(root, slot_atom, slot_nodes)
- def _process_slot_conflict(self, root, slot_atom, slot_nodes):
+ self._solve_non_slot_operator_slot_conflicts()
+
+ for conflict in self._dynamic_config._package_tracker.slot_conflicts():
+ self._process_slot_conflict(conflict)
+
+ def _process_slot_conflict(self, conflict):
"""
Process slot conflict data to identify specific atoms which
lead to conflict. These atoms only match a subset of the
packages that have been pulled into a given slot.
"""
+ root = conflict.root
+ slot_atom = conflict.atom
+ slot_nodes = conflict.pkgs
debug = "--debug" in self._frozen_config.myopts
@@ -897,21 +1290,13 @@ class depgraph(object):
all_parents, conflict_pkgs):
debug = "--debug" in self._frozen_config.myopts
- existing_node = self._dynamic_config._slot_pkg_map[root][slot_atom]
+ existing_node = next(self._dynamic_config._package_tracker.match(
+ root, slot_atom, installed=False))
+ # In order to avoid a missed update, first mask lower versions
+ # that conflict with higher versions (the backtracker visits
+ # these in reverse order).
+ conflict_pkgs.sort(reverse=True)
backtrack_data = []
- # The ordering of backtrack_data can make
- # a difference here, because both mask actions may lead
- # to valid, but different, solutions and the one with
- # 'existing_node' masked is usually the better one. Because
- # of that, we choose an order such that
- # the backtracker will first explore the choice with
- # existing_node masked. The backtracker reverses the
- # order, so the order it uses is the reverse of the
- # order shown here. See bug #339606.
- if existing_node in conflict_pkgs and \
- existing_node is not conflict_pkgs[-1]:
- conflict_pkgs.remove(existing_node)
- conflict_pkgs.append(existing_node)
for to_be_masked in conflict_pkgs:
# For missed update messages, find out which
# atoms matched to_be_selected that did not
@@ -922,19 +1307,6 @@ class depgraph(object):
if parent_atom not in parent_atoms)
backtrack_data.append((to_be_masked, conflict_atoms))
- if len(backtrack_data) > 1:
- # NOTE: Generally, we prefer to mask the higher
- # version since this solves common cases in which a
- # lower version is needed so that all dependencies
- # will be satisfied (bug #337178). However, if
- # existing_node happens to be installed then we
- # mask that since this is a common case that is
- # triggered when --update is not enabled.
- if existing_node.installed:
- pass
- elif any(pkg > existing_node for pkg in conflict_pkgs):
- backtrack_data.reverse()
-
to_be_masked = backtrack_data[-1][0]
self._dynamic_config._backtrack_infos.setdefault(
@@ -956,7 +1328,7 @@ class depgraph(object):
def _slot_conflict_backtrack_abi(self, pkg, slot_nodes, conflict_atoms):
"""
- If one or more conflict atoms have a SLOT/ABI dep that can be resolved
+ If one or more conflict atoms have a slot/sub-slot dep that can be resolved
by rebuilding the parent package, then schedule the rebuild via
backtracking, and return True. Otherwise, return False.
"""
@@ -964,7 +1336,7 @@ class depgraph(object):
found_update = False
for parent_atom, conflict_pkgs in conflict_atoms.items():
parent, atom = parent_atom
- if atom.slot_abi_op != "=" or not parent.built:
+ if atom.slot_operator != "=" or not parent.built:
continue
if pkg not in conflict_pkgs:
@@ -977,13 +1349,96 @@ class depgraph(object):
dep = Dependency(atom=atom, child=other_pkg,
parent=parent, root=pkg.root)
- if self._slot_abi_update_probe(dep):
- self._slot_abi_update_backtrack(dep)
+ new_dep = \
+ self._slot_operator_update_probe_slot_conflict(dep)
+ if new_dep is not None:
+ self._slot_operator_update_backtrack(dep,
+ new_dep=new_dep)
found_update = True
return found_update
- def _slot_abi_update_backtrack(self, dep, new_child_slot=None):
+ def _slot_change_probe(self, dep):
+ """
+ @rtype: bool
+ @return: True if dep.child should be rebuilt due to a change
+ in sub-slot (without revbump, as in bug #456208).
+ """
+ if not (isinstance(dep.parent, Package) and \
+ not dep.parent.built and dep.child.built):
+ return None
+
+ root_config = self._frozen_config.roots[dep.root]
+ matches = []
+ try:
+ matches.append(self._pkg(dep.child.cpv, "ebuild",
+ root_config, myrepo=dep.child.repo))
+ except PackageNotFound:
+ pass
+
+ for unbuilt_child in chain(matches,
+ self._iter_match_pkgs(root_config, "ebuild",
+ Atom("=%s" % (dep.child.cpv,)))):
+ if unbuilt_child in self._dynamic_config._runtime_pkg_mask:
+ continue
+ if self._frozen_config.excluded_pkgs.findAtomForPackage(
+ unbuilt_child,
+ modified_use=self._pkg_use_enabled(unbuilt_child)):
+ continue
+ if not self._pkg_visibility_check(unbuilt_child):
+ continue
+ break
+ else:
+ return None
+
+ if unbuilt_child.slot == dep.child.slot and \
+ unbuilt_child.sub_slot == dep.child.sub_slot:
+ return None
+
+ return unbuilt_child
+
+ def _slot_change_backtrack(self, dep, new_child_slot):
+ child = dep.child
+ if "--debug" in self._frozen_config.myopts:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("backtracking due to slot/sub-slot change:")
+ msg.append(" child package: %s" % child)
+ msg.append(" child slot: %s/%s" %
+ (child.slot, child.sub_slot))
+ msg.append(" new child: %s" % new_child_slot)
+ msg.append(" new child slot: %s/%s" %
+ (new_child_slot.slot, new_child_slot.sub_slot))
+ msg.append(" parent package: %s" % dep.parent)
+ msg.append(" atom: %s" % dep.atom)
+ msg.append("")
+ writemsg_level("\n".join(msg),
+ noiselevel=-1, level=logging.DEBUG)
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ config = backtrack_infos.setdefault("config", {})
+
+ # mask unwanted binary packages if necessary
+ masks = {}
+ if not child.installed:
+ masks.setdefault(dep.child, {})["slot_operator_mask_built"] = None
+ if masks:
+ config.setdefault("slot_operator_mask_built", {}).update(masks)
+
+ # trigger replacement of installed packages if necessary
+ reinstalls = set()
+ if child.installed:
+ replacement_atom = self._replace_installed_atom(child)
+ if replacement_atom is not None:
+ reinstalls.add((child.root, replacement_atom))
+ if reinstalls:
+ config.setdefault("slot_operator_replace_installed",
+ set()).update(reinstalls)
+
+ self._dynamic_config._need_restart = True
+
+ def _slot_operator_update_backtrack(self, dep, new_child_slot=None,
+ new_dep=None):
if new_child_slot is None:
child = dep.child
else:
@@ -997,6 +1452,8 @@ class depgraph(object):
if new_child_slot is not None:
msg.append(" new child slot package: %s" % new_child_slot)
msg.append(" parent package: %s" % dep.parent)
+ if new_dep is not None:
+ msg.append(" new parent pkg: %s" % new_dep.parent)
msg.append(" atom: %s" % dep.atom)
msg.append("")
writemsg_level("\n".join(msg),
@@ -1008,28 +1465,54 @@ class depgraph(object):
abi_masks = {}
if new_child_slot is None:
if not child.installed:
- abi_masks.setdefault(child, {})["slot_abi_mask_built"] = None
+ abi_masks.setdefault(child, {})["slot_operator_mask_built"] = None
if not dep.parent.installed:
- abi_masks.setdefault(dep.parent, {})["slot_abi_mask_built"] = None
+ abi_masks.setdefault(dep.parent, {})["slot_operator_mask_built"] = None
if abi_masks:
- config.setdefault("slot_abi_mask_built", {}).update(abi_masks)
+ config.setdefault("slot_operator_mask_built", {}).update(abi_masks)
# trigger replacement of installed packages if necessary
abi_reinstalls = set()
if dep.parent.installed:
- abi_reinstalls.add((dep.parent.root, dep.parent.slot_atom))
+ if new_dep is not None:
+ replacement_atom = new_dep.parent.slot_atom
+ else:
+ replacement_atom = self._replace_installed_atom(dep.parent)
+ if replacement_atom is not None:
+ abi_reinstalls.add((dep.parent.root, replacement_atom))
if new_child_slot is None and child.installed:
- abi_reinstalls.add((child.root, child.slot_atom))
+ replacement_atom = self._replace_installed_atom(child)
+ if replacement_atom is not None:
+ abi_reinstalls.add((child.root, replacement_atom))
if abi_reinstalls:
- config.setdefault("slot_abi_replace_installed",
+ config.setdefault("slot_operator_replace_installed",
set()).update(abi_reinstalls)
self._dynamic_config._need_restart = True
- def _slot_abi_update_probe(self, dep, new_child_slot=False):
+ def _slot_operator_update_probe_slot_conflict(self, dep):
+ new_dep = self._slot_operator_update_probe(dep, slot_conflict=True)
+
+ if new_dep is not None:
+ return new_dep
+
+ if self._dynamic_config._autounmask is True:
+
+ for autounmask_level in self._autounmask_levels():
+
+ new_dep = self._slot_operator_update_probe(dep,
+ slot_conflict=True, autounmask_level=autounmask_level)
+
+ if new_dep is not None:
+ return new_dep
+
+ return None
+
+ def _slot_operator_update_probe(self, dep, new_child_slot=False,
+ slot_conflict=False, autounmask_level=None):
"""
- SLOT/ABI := operators tend to prevent updates from getting pulled in,
- since installed packages pull in packages with the SLOT/ABI that they
+ slot/sub-slot := operators tend to prevent updates from getting pulled in,
+ since installed packages pull in packages with the slot/sub-slot that they
were built against. Detect this case so that we can schedule rebuilds
and reinstalls when appropriate.
NOTE: This function only searches for updates that involve upgrades
@@ -1048,20 +1531,70 @@ class depgraph(object):
return None
debug = "--debug" in self._frozen_config.myopts
+ selective = "selective" in self._dynamic_config.myparams
want_downgrade = None
+ def check_reverse_dependencies(existing_pkg, candidate_pkg):
+ """
+ Check if candidate_pkg satisfies all of existing_pkg's non-
+ slot operator parents.
+ """
+ for parent, atom in self._dynamic_config._parent_atoms.get(existing_pkg, []):
+ if atom.slot_operator == "=" and parent.built:
+ continue
+
+ atom_set = InternalPackageSet(initial_atoms=(atom,),
+ allow_repo=True)
+ if not atom_set.findAtomForPackage(candidate_pkg,
+ modified_use=self._pkg_use_enabled(candidate_pkg)):
+ return False
+ return True
+
+
for replacement_parent in self._iter_similar_available(dep.parent,
- dep.parent.slot_atom):
+ dep.parent.slot_atom, autounmask_level=autounmask_level):
- for atom in replacement_parent.validated_atoms:
- if not atom.slot_abi_op == "=" or \
- atom.blocker or \
+ if not check_reverse_dependencies(dep.parent, replacement_parent):
+ continue
+
+ selected_atoms = None
+
+ atoms = set()
+ invalid_metadata = False
+ for dep_key in ("DEPEND", "HDEPEND", "RDEPEND", "PDEPEND"):
+ dep_string = replacement_parent._metadata[dep_key]
+ if not dep_string:
+ continue
+
+ try:
+ dep_string = portage.dep.use_reduce(dep_string,
+ uselist=self._pkg_use_enabled(replacement_parent),
+ is_valid_flag=replacement_parent.iuse.is_valid_flag,
+ flat=True, token_class=Atom,
+ eapi=replacement_parent.eapi)
+ except portage.exception.InvalidDependString:
+ invalid_metadata = True
+ break
+
+ atoms.update(token for token in dep_string if isinstance(token, Atom))
+
+ if invalid_metadata:
+ continue
+
+ # List of list of child,atom pairs for each atom.
+ replacement_candidates = []
+ # Set of all packages all atoms can agree on.
+ all_candidate_pkgs = None
+
+ for atom in atoms:
+ if atom.blocker or \
atom.cp != dep.atom.cp:
continue
# Discard USE deps, we're only searching for an approximate
# pattern, and dealing with USE states is too complex for
# this purpose.
+ unevaluated_atom = atom.unevaluated_atom
atom = atom.without_use
if replacement_parent.built and \
@@ -1071,11 +1604,13 @@ class depgraph(object):
# parent and search for another.
break
+ candidate_pkg_atoms = []
+ candidate_pkgs = []
for pkg in self._iter_similar_available(
dep.child, atom):
if pkg.slot == dep.child.slot and \
- pkg.slot_abi == dep.child.slot_abi:
- # If SLOT/ABI is identical, then there's
+ pkg.sub_slot == dep.child.sub_slot:
+ # If slot/sub-slot is identical, then there's
# no point in updating.
continue
if new_child_slot:
@@ -1093,39 +1628,192 @@ class depgraph(object):
want_downgrade = self._downgrade_probe(dep.child)
# be careful not to trigger a rebuild when
# the only version available with a
- # different slot_abi is an older version
+ # different slot_operator is an older version
if not want_downgrade:
continue
+ insignificant = False
+ if not slot_conflict and \
+ selective and \
+ dep.parent.installed and \
+ dep.child.installed and \
+ dep.parent >= replacement_parent and \
+ dep.child.cpv == pkg.cpv:
+ # Then can happen if the child's sub-slot changed
+ # without a revision bump. The sub-slot change is
+ # considered insignificant until one of its parent
+ # packages needs to be rebuilt (which may trigger a
+ # slot conflict).
+ insignificant = True
+
+ if not insignificant:
+ # Evaluate USE conditionals and || deps, in order
+ # to see if this atom is really desirable, since
+ # otherwise we may trigger an undesirable rebuild
+ # as in bug #460304.
+ if selected_atoms is None:
+ selected_atoms = self._select_atoms_probe(
+ dep.child.root, replacement_parent)
+ if unevaluated_atom not in selected_atoms:
+ continue
+
+ if not insignificant and \
+ check_reverse_dependencies(dep.child, pkg):
+
+ candidate_pkg_atoms.append((pkg, unevaluated_atom))
+ candidate_pkgs.append(pkg)
+ replacement_candidates.append(candidate_pkg_atoms)
+ if all_candidate_pkgs is None:
+ all_candidate_pkgs = set(candidate_pkgs)
+ else:
+ all_candidate_pkgs.intersection_update(candidate_pkgs)
+
+ if not all_candidate_pkgs:
+ # If the atoms that connect parent and child can't agree on
+ # any replacement child, we can't do anything.
+ continue
+
+ # Now select one of the pkgs as replacement. This is as easy as
+ # selecting the highest version.
+ # The more complicated part is to choose an atom for the
+ # new Dependency object. Choose the one which ranked the selected
+ # parent highest.
+ selected = None
+ for candidate_pkg_atoms in replacement_candidates:
+ for i, (pkg, atom) in enumerate(candidate_pkg_atoms):
+ if pkg not in all_candidate_pkgs:
+ continue
+ if selected is None or \
+ selected[0] < pkg or \
+ (selected[0] is pkg and i < selected[2]):
+ selected = (pkg, atom, i)
+
+ if debug:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("slot_operator_update_probe:")
+ msg.append(" existing child package: %s" % dep.child)
+ msg.append(" existing parent package: %s" % dep.parent)
+ msg.append(" new child package: %s" % selected[0])
+ msg.append(" new parent package: %s" % replacement_parent)
+ msg.append("")
+ writemsg_level("\n".join(msg),
+ noiselevel=-1, level=logging.DEBUG)
+
+ return Dependency(parent=replacement_parent,
+ child=selected[0], atom=selected[1])
+
+ if debug:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("slot_operator_update_probe:")
+ msg.append(" existing child package: %s" % dep.child)
+ msg.append(" existing parent package: %s" % dep.parent)
+ msg.append(" new child package: %s" % None)
+ msg.append(" new parent package: %s" % None)
+ msg.append("")
+ writemsg_level("\n".join(msg),
+ noiselevel=-1, level=logging.DEBUG)
+
+ return None
+
+ def _slot_operator_unsatisfied_probe(self, dep):
+
+ if dep.parent.installed and \
+ self._frozen_config.excluded_pkgs.findAtomForPackage(dep.parent,
+ modified_use=self._pkg_use_enabled(dep.parent)):
+ return False
+
+ debug = "--debug" in self._frozen_config.myopts
+
+ for replacement_parent in self._iter_similar_available(dep.parent,
+ dep.parent.slot_atom):
+
+ for atom in replacement_parent.validated_atoms:
+ if not atom.slot_operator == "=" or \
+ atom.blocker or \
+ atom.cp != dep.atom.cp:
+ continue
+
+ # Discard USE deps, we're only searching for an approximate
+ # pattern, and dealing with USE states is too complex for
+ # this purpose.
+ atom = atom.without_use
+
+ pkg, existing_node = self._select_package(dep.root, atom,
+ onlydeps=dep.onlydeps)
+
+ if pkg is not None:
+
if debug:
msg = []
msg.append("")
msg.append("")
- msg.append("slot_abi_update_probe:")
- msg.append(" existing child package: %s" % dep.child)
+ msg.append("slot_operator_unsatisfied_probe:")
msg.append(" existing parent package: %s" % dep.parent)
- msg.append(" new child package: %s" % pkg)
+ msg.append(" existing parent atom: %s" % dep.atom)
msg.append(" new parent package: %s" % replacement_parent)
+ msg.append(" new child package: %s" % pkg)
msg.append("")
writemsg_level("\n".join(msg),
noiselevel=-1, level=logging.DEBUG)
- return pkg
+ return True
if debug:
msg = []
msg.append("")
msg.append("")
- msg.append("slot_abi_update_probe:")
- msg.append(" existing child package: %s" % dep.child)
+ msg.append("slot_operator_unsatisfied_probe:")
msg.append(" existing parent package: %s" % dep.parent)
- msg.append(" new child package: %s" % None)
+ msg.append(" existing parent atom: %s" % dep.atom)
msg.append(" new parent package: %s" % None)
+ msg.append(" new child package: %s" % None)
msg.append("")
writemsg_level("\n".join(msg),
noiselevel=-1, level=logging.DEBUG)
- return None
+ return False
+
+ def _slot_operator_unsatisfied_backtrack(self, dep):
+
+ parent = dep.parent
+
+ if "--debug" in self._frozen_config.myopts:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("backtracking due to unsatisfied "
+ "built slot-operator dep:")
+ msg.append(" parent package: %s" % parent)
+ msg.append(" atom: %s" % dep.atom)
+ msg.append("")
+ writemsg_level("\n".join(msg),
+ noiselevel=-1, level=logging.DEBUG)
+
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ config = backtrack_infos.setdefault("config", {})
+
+ # mask unwanted binary packages if necessary
+ masks = {}
+ if not parent.installed:
+ masks.setdefault(parent, {})["slot_operator_mask_built"] = None
+ if masks:
+ config.setdefault("slot_operator_mask_built", {}).update(masks)
+
+ # trigger replacement of installed packages if necessary
+ reinstalls = set()
+ if parent.installed:
+ replacement_atom = self._replace_installed_atom(parent)
+ if replacement_atom is not None:
+ reinstalls.add((parent.root, replacement_atom))
+ if reinstalls:
+ config.setdefault("slot_operator_replace_installed",
+ set()).update(reinstalls)
+
+ self._dynamic_config._need_restart = True
def _downgrade_probe(self, pkg):
"""
@@ -1142,7 +1830,19 @@ class depgraph(object):
return available_pkg is not None
- def _iter_similar_available(self, graph_pkg, atom):
+ def _select_atoms_probe(self, root, pkg):
+ selected_atoms = []
+ use = self._pkg_use_enabled(pkg)
+ for k in pkg._dep_keys:
+ v = pkg._metadata.get(k)
+ if not v:
+ continue
+ selected_atoms.extend(self._select_atoms(
+ root, v, myuse=use, parent=pkg)[pkg])
+ return frozenset(x.unevaluated_atom for
+ x in selected_atoms)
+
+ def _iter_similar_available(self, graph_pkg, atom, autounmask_level=None):
"""
Given a package that's in the graph, do a rough check to
see if a similar package is available to install. The given
@@ -1166,49 +1866,91 @@ class depgraph(object):
if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
modified_use=self._pkg_use_enabled(pkg)):
continue
- if not self._pkg_visibility_check(pkg):
- continue
if pkg.built:
if self._equiv_binary_installed(pkg):
continue
if not (not use_ebuild_visibility and
(usepkgonly or useoldpkg_atoms.findAtomForPackage(
pkg, modified_use=self._pkg_use_enabled(pkg)))) and \
- not self._equiv_ebuild_visible(pkg):
+ not self._equiv_ebuild_visible(pkg,
+ autounmask_level=autounmask_level):
continue
+ if not self._pkg_visibility_check(pkg,
+ autounmask_level=autounmask_level):
+ continue
yield pkg
- def _slot_abi_trigger_reinstalls(self):
+ def _replace_installed_atom(self, inst_pkg):
+ """
+ Given an installed package, generate an atom suitable for
+ slot_operator_replace_installed backtracking info. The replacement
+ SLOT may differ from the installed SLOT, so first search by cpv.
"""
- Search for packages with slot-abi deps on older slots, and schedule
+ built_pkgs = []
+ for pkg in self._iter_similar_available(inst_pkg,
+ Atom("=%s" % inst_pkg.cpv)):
+ if not pkg.built:
+ return pkg.slot_atom
+ elif not pkg.installed:
+ # avoid using SLOT from a built instance
+ built_pkgs.append(pkg)
+
+ for pkg in self._iter_similar_available(inst_pkg, inst_pkg.slot_atom):
+ if not pkg.built:
+ return pkg.slot_atom
+ elif not pkg.installed:
+ # avoid using SLOT from a built instance
+ built_pkgs.append(pkg)
+
+ if built_pkgs:
+ best_version = None
+ for pkg in built_pkgs:
+ if best_version is None or pkg > best_version:
+ best_version = pkg
+ return best_version.slot_atom
+
+ return None
+
+ def _slot_operator_trigger_reinstalls(self):
+ """
+ Search for packages with slot-operator deps on older slots, and schedule
rebuilds if they can link to a newer slot that's in the graph.
"""
- rebuild_if_new_slot_abi = self._dynamic_config.myparams.get(
- "rebuild_if_new_slot_abi", "y") == "y"
+ rebuild_if_new_slot = self._dynamic_config.myparams.get(
+ "rebuild_if_new_slot", "y") == "y"
- for slot_key, slot_info in self._dynamic_config._slot_abi_deps.items():
+ for slot_key, slot_info in self._dynamic_config._slot_operator_deps.items():
for dep in slot_info:
- if not (dep.child.built and dep.parent and
+
+ atom = dep.atom
+ if atom.slot_operator is None:
+ continue
+
+ if not atom.slot_operator_built:
+ new_child_slot = self._slot_change_probe(dep)
+ if new_child_slot is not None:
+ self._slot_change_backtrack(dep, new_child_slot)
+ continue
+
+ if not (dep.parent and
isinstance(dep.parent, Package) and dep.parent.built):
continue
# Check for slot update first, since we don't want to
# trigger reinstall of the child package when a newer
# slot will be used instead.
- if rebuild_if_new_slot_abi:
- new_child = self._slot_abi_update_probe(dep,
+ if rebuild_if_new_slot:
+ new_dep = self._slot_operator_update_probe(dep,
new_child_slot=True)
- if new_child:
- self._slot_abi_update_backtrack(dep,
- new_child_slot=new_child)
- break
+ if new_dep is not None:
+ self._slot_operator_update_backtrack(dep,
+ new_child_slot=new_dep.child)
if dep.want_update:
- if self._slot_abi_update_probe(dep):
- self._slot_abi_update_backtrack(dep)
- break
+ if self._slot_operator_update_probe(dep):
+ self._slot_operator_update_backtrack(dep)
def _reinstall_for_flags(self, pkg, forced_flags,
orig_use, orig_iuse, cur_use, cur_iuse):
@@ -1222,18 +1964,22 @@ class depgraph(object):
in ("y", "auto"))
newuse = "--newuse" in self._frozen_config.myopts
changed_use = "changed-use" == self._frozen_config.myopts.get("--reinstall")
+ feature_flags = _get_feature_flags(
+ _get_eapi_attrs(pkg.eapi))
if newuse or (binpkg_respect_use and not changed_use):
flags = set(orig_iuse.symmetric_difference(
cur_iuse).difference(forced_flags))
flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
cur_iuse.intersection(cur_use)))
+ flags.difference_update(feature_flags)
if flags:
return flags
elif changed_use or binpkg_respect_use:
- flags = orig_iuse.intersection(orig_use).symmetric_difference(
- cur_iuse.intersection(cur_use))
+ flags = set(orig_iuse.intersection(orig_use).symmetric_difference(
+ cur_iuse.intersection(cur_use)))
+ flags.difference_update(feature_flags)
if flags:
return flags
return None
@@ -1319,11 +2065,16 @@ class depgraph(object):
buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts
nodeps = "--nodeps" in self._frozen_config.myopts
if dep.blocker:
+
+ # Slot collision nodes are not allowed to block other packages since
+ # blocker validation is only able to account for one package per slot.
+ is_slot_conflict_parent = any(dep.parent in conflict.pkgs[1:] for conflict in \
+ self._dynamic_config._package_tracker.slot_conflicts())
if not buildpkgonly and \
not nodeps and \
not dep.collapsed_priority.ignored and \
not dep.collapsed_priority.optional and \
- dep.parent not in self._dynamic_config._slot_collision_nodes:
+ not is_slot_conflict_parent:
if dep.parent.onlydeps:
# It's safe to ignore blockers if the
# parent is an --onlydeps node.
@@ -1331,7 +2082,7 @@ class depgraph(object):
# The blocker applies to the root where
# the parent is or will be installed.
blocker = Blocker(atom=dep.atom,
- eapi=dep.parent.metadata["EAPI"],
+ eapi=dep.parent.eapi,
priority=dep.priority, root=dep.parent.root)
self._dynamic_config._blocker_parents.add(blocker, dep.parent)
return 1
@@ -1343,8 +2094,8 @@ class depgraph(object):
# The caller has selected a specific package
# via self._minimize_packages().
dep_pkg = dep.child
- existing_node = self._dynamic_config._slot_pkg_map[
- dep.root].get(dep_pkg.slot_atom)
+ existing_node = next(self._dynamic_config._package_tracker.match(
+ dep.root, dep_pkg.slot_atom, installed=False), None)
if not dep_pkg:
if (dep.collapsed_priority.optional or
@@ -1368,9 +2119,17 @@ class depgraph(object):
(dep.parent,
self._dynamic_config._runtime_pkg_mask[
dep.parent]), noiselevel=-1)
- elif not self.need_restart():
+ elif dep.atom.slot_operator_built and \
+ self._slot_operator_unsatisfied_probe(dep):
+ self._slot_operator_unsatisfied_backtrack(dep)
+ return 1
+ else:
# Do not backtrack if only USE have to be changed in
- # order to satisfy the dependency.
+ # order to satisfy the dependency. Note that when
+ # want_restart_for_use_change sets the need_restart
+ # flag, it causes _select_pkg_highest_available to
+ # return None, and eventually we come through here
+ # and skip the "missing dependency" backtracking path.
dep_pkg, existing_node = \
self._select_package(dep.root, dep.atom.without_use,
onlydeps=dep.onlydeps)
@@ -1401,7 +2160,9 @@ class depgraph(object):
return 1
def _check_slot_conflict(self, pkg, atom):
- existing_node = self._dynamic_config._slot_pkg_map[pkg.root].get(pkg.slot_atom)
+ existing_node = next(self._dynamic_config._package_tracker.match(
+ pkg.root, pkg.slot_atom, installed=False), None)
+
matches = None
if existing_node:
matches = pkg.cpv == existing_node.cpv
@@ -1477,12 +2238,13 @@ class depgraph(object):
# package selection, since we want to prompt the user
# for USE adjustment rather than have REQUIRED_USE
# affect package selection and || dep choices.
- if not pkg.built and pkg.metadata.get("REQUIRED_USE") and \
- eapi_has_required_use(pkg.metadata["EAPI"]):
+ if not pkg.built and pkg._metadata.get("REQUIRED_USE") and \
+ eapi_has_required_use(pkg.eapi):
required_use_is_sat = check_required_use(
- pkg.metadata["REQUIRED_USE"],
+ pkg._metadata["REQUIRED_USE"],
self._pkg_use_enabled(pkg),
- pkg.iuse.is_valid_flag)
+ pkg.iuse.is_valid_flag,
+ eapi=pkg.eapi)
if not required_use_is_sat:
if dep.atom is not None and dep.parent is not None:
self._add_parent_atom(pkg, (dep.parent, dep.atom))
@@ -1505,30 +2267,29 @@ class depgraph(object):
existing_node, existing_node_matches = \
self._check_slot_conflict(pkg, dep.atom)
- slot_collision = False
if existing_node:
if existing_node_matches:
# The existing node can be reused.
- if arg_atoms:
- for parent_atom in arg_atoms:
- parent, atom = parent_atom
- self._dynamic_config.digraph.add(existing_node, parent,
- priority=priority)
- self._add_parent_atom(existing_node, parent_atom)
- # If a direct circular dependency is not an unsatisfied
- # buildtime dependency then drop it here since otherwise
- # it can skew the merge order calculation in an unwanted
- # way.
- if existing_node != myparent or \
- (priority.buildtime and not priority.satisfied):
- self._dynamic_config.digraph.addnode(existing_node, myparent,
- priority=priority)
- if dep.atom is not None and dep.parent is not None:
- self._add_parent_atom(existing_node,
- (dep.parent, dep.atom))
- return 1
+ if pkg != existing_node:
+ pkg = existing_node
+ previously_added = True
+ try:
+ arg_atoms = list(self._iter_atoms_for_pkg(pkg))
+ except InvalidDependString as e:
+ if not pkg.installed:
+ # should have been masked before
+ # it was selected
+ raise
+
+ if debug:
+ writemsg_level(
+ "%s%s %s\n" % ("Re-used Child:".ljust(15),
+ pkg, pkg_use_display(pkg,
+ self._frozen_config.myopts,
+ modified_use=self._pkg_use_enabled(pkg))),
+ level=logging.DEBUG, noiselevel=-1)
+
else:
- self._add_slot_conflict(pkg)
if debug:
writemsg_level(
"%s%s %s\n" % ("Slot Conflict:".ljust(15),
@@ -1537,23 +2298,8 @@ class depgraph(object):
modified_use=self._pkg_use_enabled(existing_node))),
level=logging.DEBUG, noiselevel=-1)
- slot_collision = True
-
- if slot_collision:
- # Now add this node to the graph so that self.display()
- # can show use flags and --tree portage.output. This node is
- # only being partially added to the graph. It must not be
- # allowed to interfere with the other nodes that have been
- # added. Do not overwrite data for existing nodes in
- # self._dynamic_config.mydbapi since that data will be used for blocker
- # validation.
- # Even though the graph is now invalid, continue to process
- # dependencies so that things like --fetchonly can still
- # function despite collisions.
- pass
- elif not previously_added:
- self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
- self._dynamic_config.mydbapi[pkg.root].cpv_inject(pkg)
+ if not previously_added:
+ self._dynamic_config._package_tracker.add_pkg(pkg)
self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
self._dynamic_config._highest_pkg_cache.clear()
self._check_masks(pkg)
@@ -1563,11 +2309,11 @@ class depgraph(object):
# doesn't already. Any pre-existing providers will be preferred
# over this one.
try:
- pkgsettings.setinst(pkg.cpv, pkg.metadata)
+ pkgsettings.setinst(pkg.cpv, pkg._metadata)
# For consistency, also update the global virtuals.
settings = self._frozen_config.roots[pkg.root].settings
settings.unlock()
- settings.setinst(pkg.cpv, pkg.metadata)
+ settings.setinst(pkg.cpv, pkg._metadata)
settings.lock()
except portage.exception.InvalidDependString:
if not pkg.installed:
@@ -1577,12 +2323,19 @@ class depgraph(object):
if arg_atoms:
self._dynamic_config._set_nodes.add(pkg)
- # Do this even when addme is False (--onlydeps) so that the
+ # Do this even for onlydeps, so that the
# parent/child relationship is always known in case
# self._show_slot_collision_notice() needs to be called later.
- self._dynamic_config.digraph.add(pkg, myparent, priority=priority)
- if dep.atom is not None and dep.parent is not None:
- self._add_parent_atom(pkg, (dep.parent, dep.atom))
+ # If a direct circular dependency is not an unsatisfied
+ # buildtime dependency then drop it here since otherwise
+ # it can skew the merge order calculation in an unwanted
+ # way.
+ if pkg != dep.parent or \
+ (priority.buildtime and not priority.satisfied):
+ self._dynamic_config.digraph.add(pkg,
+ dep.parent, priority=priority)
+ if dep.atom is not None and dep.parent is not None:
+ self._add_parent_atom(pkg, (dep.parent, dep.atom))
if arg_atoms:
for parent_atom in arg_atoms:
@@ -1612,9 +2365,9 @@ class depgraph(object):
not (deep is not True and depth > deep))
dep.child = pkg
- if (not pkg.onlydeps and pkg.built and
- dep.atom and dep.atom.slot_abi_built):
- self._add_slot_abi_dep(dep)
+ if (not pkg.onlydeps and
+ dep.atom and dep.atom.slot_operator is not None):
+ self._add_slot_operator_dep(dep)
recurse = deep is True or depth + 1 <= deep
dep_stack = self._dynamic_config._dep_stack
@@ -1629,6 +2382,64 @@ class depgraph(object):
dep_stack.append(pkg)
return 1
+
+ def _remove_pkg(self, pkg):
+ """
+ Remove a package and all its then parentless digraph
+ children from all depgraph datastructures.
+ """
+ debug = "--debug" in self._frozen_config.myopts
+ if debug:
+ writemsg_level(
+ "Removing package: %s\n" % pkg,
+ level=logging.DEBUG, noiselevel=-1)
+
+ try:
+ children = [child for child in self._dynamic_config.digraph.child_nodes(pkg) \
+ if child is not pkg]
+ self._dynamic_config.digraph.remove(pkg)
+ except KeyError:
+ children = []
+
+ self._dynamic_config._package_tracker.discard_pkg(pkg)
+
+ self._dynamic_config._parent_atoms.pop(pkg, None)
+ self._dynamic_config._set_nodes.discard(pkg)
+
+ for child in children:
+ try:
+ self._dynamic_config._parent_atoms[child] = set((parent, atom) \
+ for (parent, atom) in self._dynamic_config._parent_atoms[child] \
+ if parent is not pkg)
+ except KeyError:
+ pass
+
+ # Remove slot operator dependencies.
+ slot_key = (pkg.root, pkg.slot_atom)
+ if slot_key in self._dynamic_config._slot_operator_deps:
+ self._dynamic_config._slot_operator_deps[slot_key] = \
+ [dep for dep in self._dynamic_config._slot_operator_deps[slot_key] \
+ if dep.child is not pkg]
+ if not self._dynamic_config._slot_operator_deps[slot_key]:
+ del self._dynamic_config._slot_operator_deps[slot_key]
+
+ # Remove blockers.
+ self._dynamic_config._blocker_parents.discard(pkg)
+ self._dynamic_config._irrelevant_blockers.discard(pkg)
+ self._dynamic_config._unsolvable_blockers.discard(pkg)
+ self._dynamic_config._blocked_pkgs.discard(pkg)
+ self._dynamic_config._blocked_world_pkgs.pop(pkg, None)
+
+ for child in children:
+ if child in self._dynamic_config.digraph and \
+ not self._dynamic_config.digraph.parent_nodes(child):
+ self._remove_pkg(child)
+
+ # Clear caches.
+ self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
+ self._dynamic_config._highest_pkg_cache.clear()
+
+
def _check_masks(self, pkg):
slot_key = (pkg.root, pkg.slot_atom)
@@ -1647,33 +2458,23 @@ class depgraph(object):
self._dynamic_config._parent_atoms[pkg] = parent_atoms
parent_atoms.add(parent_atom)
- def _add_slot_abi_dep(self, dep):
+ def _add_slot_operator_dep(self, dep):
slot_key = (dep.root, dep.child.slot_atom)
- slot_info = self._dynamic_config._slot_abi_deps.get(slot_key)
+ slot_info = self._dynamic_config._slot_operator_deps.get(slot_key)
if slot_info is None:
slot_info = []
- self._dynamic_config._slot_abi_deps[slot_key] = slot_info
+ self._dynamic_config._slot_operator_deps[slot_key] = slot_info
slot_info.append(dep)
- def _add_slot_conflict(self, pkg):
- self._dynamic_config._slot_collision_nodes.add(pkg)
- slot_key = (pkg.slot_atom, pkg.root)
- slot_nodes = self._dynamic_config._slot_collision_info.get(slot_key)
- if slot_nodes is None:
- slot_nodes = set()
- slot_nodes.add(self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom])
- self._dynamic_config._slot_collision_info[slot_key] = slot_nodes
- slot_nodes.add(pkg)
-
def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
myroot = pkg.root
- metadata = pkg.metadata
+ metadata = pkg._metadata
removal_action = "remove" in self._dynamic_config.myparams
+ eapi_attrs = _get_eapi_attrs(pkg.eapi)
edepend={}
- depkeys = ["DEPEND","RDEPEND","PDEPEND"]
- for k in depkeys:
+ for k in Package._dep_keys:
edepend[k] = metadata[k]
if not pkg.built and \
@@ -1700,31 +2501,44 @@ class depgraph(object):
# Removal actions never traverse ignored buildtime
# dependencies, so it's safe to discard them early.
edepend["DEPEND"] = ""
+ edepend["HDEPEND"] = ""
ignore_build_time_deps = True
+ ignore_depend_deps = ignore_build_time_deps
+ ignore_hdepend_deps = ignore_build_time_deps
+
if removal_action:
depend_root = myroot
else:
- depend_root = self._frozen_config._running_root.root
- root_deps = self._frozen_config.myopts.get("--root-deps")
- if root_deps is not None:
- if root_deps is True:
- depend_root = myroot
- elif root_deps == "rdeps":
- ignore_build_time_deps = True
+ if eapi_attrs.hdepend:
+ depend_root = myroot
+ else:
+ depend_root = self._frozen_config._running_root.root
+ root_deps = self._frozen_config.myopts.get("--root-deps")
+ if root_deps is not None:
+ if root_deps is True:
+ depend_root = myroot
+ elif root_deps == "rdeps":
+ ignore_depend_deps = True
# If rebuild mode is not enabled, it's safe to discard ignored
# build-time dependencies. If you want these deps to be traversed
# in "complete" mode then you need to specify --with-bdeps=y.
- if ignore_build_time_deps and \
- not self._rebuild.rebuild:
- edepend["DEPEND"] = ""
+ if not self._rebuild.rebuild:
+ if ignore_depend_deps:
+ edepend["DEPEND"] = ""
+ if ignore_hdepend_deps:
+ edepend["HDEPEND"] = ""
deps = (
(depend_root, edepend["DEPEND"],
self._priority(buildtime=True,
- optional=(pkg.built or ignore_build_time_deps),
- ignored=ignore_build_time_deps)),
+ optional=(pkg.built or ignore_depend_deps),
+ ignored=ignore_depend_deps)),
+ (self._frozen_config._running_root.root, edepend["HDEPEND"],
+ self._priority(buildtime=True,
+ optional=(pkg.built or ignore_hdepend_deps),
+ ignored=ignore_hdepend_deps)),
(myroot, edepend["RDEPEND"],
self._priority(runtime=True)),
(myroot, edepend["PDEPEND"],
@@ -1749,7 +2563,7 @@ class depgraph(object):
uselist=self._pkg_use_enabled(pkg),
is_valid_flag=pkg.iuse.is_valid_flag,
opconvert=True, token_class=Atom,
- eapi=pkg.metadata['EAPI'])
+ eapi=pkg.eapi)
except portage.exception.InvalidDependString as e:
if not pkg.installed:
# should have been masked before it was selected
@@ -1763,7 +2577,7 @@ class depgraph(object):
dep_string = portage.dep.use_reduce(dep_string,
uselist=self._pkg_use_enabled(pkg),
opconvert=True, token_class=Atom,
- eapi=pkg.metadata['EAPI'])
+ eapi=pkg.eapi)
except portage.exception.InvalidDependString as e:
self._dynamic_config._masked_installed.add(pkg)
del e
@@ -1806,6 +2620,37 @@ class depgraph(object):
finally:
self._dynamic_config._autounmask = _autounmask_backup
+ def _ignore_dependency(self, atom, pkg, child, dep, mypriority, recurse_satisfied):
+ """
+ In some cases, dep_check will return deps that shouldn't
+ be processed any further, so they are identified and
+ discarded here. Try to discard as few as possible since
+ discarded dependencies reduce the amount of information
+ available for optimization of merge order.
+ Don't ignore dependencies if pkg has a slot operator dependency on the child
+ and the child has changed slot/sub_slot.
+ """
+ if not mypriority.satisfied:
+ return False
+ slot_operator_rebuild = False
+ if atom.slot_operator == '=' and \
+ (pkg.root, pkg.slot_atom) in self._dynamic_config._slot_operator_replace_installed and \
+ mypriority.satisfied is not child and \
+ mypriority.satisfied.installed and \
+ child and \
+ not child.installed and \
+ (child.slot != mypriority.satisfied.slot or child.sub_slot != mypriority.satisfied.sub_slot):
+ slot_operator_rebuild = True
+
+ return not atom.blocker and \
+ not recurse_satisfied and \
+ mypriority.satisfied.visible and \
+ dep.child is not None and \
+ not dep.child.installed and \
+ not any(self._dynamic_config._package_tracker.match(
+ dep.child.root, dep.child.slot_atom, installed=False)) and \
+ not slot_operator_rebuild
+
def _wrapped_add_pkg_dep_string(self, pkg, dep_root, dep_priority,
dep_string, allow_unsatisfied):
depth = pkg.depth + 1
@@ -1864,6 +2709,13 @@ class depgraph(object):
mypriority = dep_priority.copy()
if not atom.blocker:
+
+ if atom.slot_operator == "=":
+ if mypriority.buildtime:
+ mypriority.buildtime_slot_op = True
+ if mypriority.runtime:
+ mypriority.runtime_slot_op = True
+
inst_pkgs = [inst_pkg for inst_pkg in
reversed(vardb.match_pkgs(atom))
if not reinstall_atoms.findAtomForPackage(inst_pkg,
@@ -1883,19 +2735,12 @@ class depgraph(object):
priority=mypriority, root=dep_root)
# In some cases, dep_check will return deps that shouldn't
- # be proccessed any further, so they are identified and
+ # be processed any further, so they are identified and
# discarded here. Try to discard as few as possible since
# discarded dependencies reduce the amount of information
# available for optimization of merge order.
ignored = False
- if not atom.blocker and \
- not recurse_satisfied and \
- mypriority.satisfied and \
- mypriority.satisfied.visible and \
- dep.child is not None and \
- not dep.child.installed and \
- self._dynamic_config._slot_pkg_map[dep.child.root].get(
- dep.child.slot_atom) is None:
+ if self._ignore_dependency(atom, pkg, child, dep, mypriority, recurse_satisfied):
myarg = None
try:
myarg = next(self._iter_atoms_for_pkg(dep.child), None)
@@ -1998,14 +2843,7 @@ class depgraph(object):
collapsed_parent=pkg, collapsed_priority=dep_priority)
ignored = False
- if not atom.blocker and \
- not recurse_satisfied and \
- mypriority.satisfied and \
- mypriority.satisfied.visible and \
- dep.child is not None and \
- not dep.child.installed and \
- self._dynamic_config._slot_pkg_map[dep.child.root].get(
- dep.child.slot_atom) is None:
+ if self._ignore_dependency(atom, pkg, child, dep, mypriority, recurse_satisfied):
myarg = None
try:
myarg = next(self._iter_atoms_for_pkg(dep.child), None)
@@ -2053,7 +2891,7 @@ class depgraph(object):
yield (atom, None)
continue
dep_pkg, existing_node = self._select_package(
- root_config.root, atom)
+ root_config.root, atom, parent=parent)
if dep_pkg is None:
yield (atom, None)
continue
@@ -2105,12 +2943,12 @@ class depgraph(object):
# Yield ~, =*, < and <= atoms first, since those are more likely to
# cause slot conflicts, and we want those atoms to be displayed
# in the resulting slot conflict message (see bug #291142).
- # Give similar treatment to SLOT/ABI atoms.
+ # Give similar treatment to slot/sub-slot atoms.
conflict_atoms = []
normal_atoms = []
abi_atoms = []
for atom in cp_atoms:
- if atom.slot_abi_built:
+ if atom.slot_operator_built:
abi_atoms.append(atom)
continue
conflict = False
@@ -2135,7 +2973,7 @@ class depgraph(object):
def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
"""
Queue disjunctive (virtual and ||) deps in self._dynamic_config._dep_disjunctive_stack.
- Yields non-disjunctive deps. Raises InvalidDependString when
+ Yields non-disjunctive deps. Raises InvalidDependString when
necessary.
"""
for x in dep_struct:
@@ -2242,9 +3080,24 @@ class depgraph(object):
continue
yield arg, atom
- def select_files(self, myfiles):
+ def select_files(self, args):
+ # Use the global event loop for spinner progress
+ # indication during file owner lookups (bug #461412).
+ spinner_id = None
+ try:
+ spinner = self._frozen_config.spinner
+ if spinner is not None and \
+ spinner.update is not spinner.update_quiet:
+ spinner_id = self._event_loop.idle_add(
+ self._frozen_config.spinner.update)
+ return self._select_files(args)
+ finally:
+ if spinner_id is not None:
+ self._event_loop.source_remove(spinner_id)
+
+ def _select_files(self, myfiles):
"""Given a list of .tbz2s, .ebuilds sets, and deps, populate
- self._dynamic_config._initial_arg_list and call self._resolve to create the
+ self._dynamic_config._initial_arg_list and call self._resolve to create the
appropriate depgraph and return a favorite list."""
self._load_vdb()
debug = "--debug" in self._frozen_config.myopts
@@ -2277,8 +3130,18 @@ class depgraph(object):
writemsg("!!! Please ensure the tbz2 exists as specified.\n\n", noiselevel=-1)
return 0, myfavorites
mytbz2=portage.xpak.tbz2(x)
- mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
- if os.path.realpath(x) != \
+ mykey = None
+ cat = mytbz2.getfile("CATEGORY")
+ if cat is not None:
+ cat = _unicode_decode(cat.strip(),
+ encoding=_encodings['repo.content'])
+ mykey = cat + "/" + os.path.basename(x)[:-5]
+
+ if mykey is None:
+ writemsg(colorize("BAD", "\n*** Package is missing CATEGORY metadata: %s.\n\n" % x), noiselevel=-1)
+ self._dynamic_config._skip_restart = True
+ return 0, myfavorites
+ elif os.path.realpath(x) != \
os.path.realpath(bindb.bintree.getname(mykey)):
writemsg(colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n\n"), noiselevel=-1)
self._dynamic_config._skip_restart = True
@@ -2293,15 +3156,16 @@ class depgraph(object):
pkgdir = os.path.dirname(ebuild_path)
tree_root = os.path.dirname(os.path.dirname(pkgdir))
cp = pkgdir[len(tree_root)+1:]
- e = portage.exception.PackageNotFound(
- ("%s is not in a valid portage tree " + \
- "hierarchy or does not exist") % x)
+ error_msg = ("\n\n!!! '%s' is not in a valid portage tree "
+ "hierarchy or does not exist\n") % x
if not portage.isvalidatom(cp):
- raise e
+ writemsg(error_msg, noiselevel=-1)
+ return 0, myfavorites
cat = portage.catsplit(cp)[0]
mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
if not portage.isvalidatom("="+mykey):
- raise e
+ writemsg(error_msg, noiselevel=-1)
+ return 0, myfavorites
ebuild_path = portdb.findname(mykey)
if ebuild_path:
if ebuild_path != os.path.join(os.path.realpath(tree_root),
@@ -2317,8 +3181,8 @@ class depgraph(object):
countdown(int(self._frozen_config.settings["EMERGE_WARNING_DELAY"]),
"Continuing...")
else:
- raise portage.exception.PackageNotFound(
- "%s is not in a valid portage tree hierarchy or does not exist" % x)
+ writemsg(error_msg, noiselevel=-1)
+ return 0, myfavorites
pkg = self._pkg(mykey, "ebuild", root_config,
onlydeps=onlydeps, myrepo=portdb.getRepositoryName(
os.path.dirname(os.path.dirname(os.path.dirname(ebuild_path)))))
@@ -2351,6 +3215,30 @@ class depgraph(object):
raise portage.exception.PackageSetNotFound(s)
if s in depgraph_sets.sets:
continue
+
+ try:
+ set_atoms = root_config.setconfig.getSetAtoms(s)
+ except portage.exception.PackageSetNotFound as e:
+ writemsg_level("\n\n", level=logging.ERROR,
+ noiselevel=-1)
+ for pset in list(depgraph_sets.sets.values()) + [sets[s]]:
+ for error_msg in pset.errors:
+ writemsg_level("%s\n" % (error_msg,),
+ level=logging.ERROR, noiselevel=-1)
+
+ writemsg_level(("emerge: the given set '%s' "
+ "contains a non-existent set named '%s'.\n") % \
+ (s, e), level=logging.ERROR, noiselevel=-1)
+ if s in ('world', 'selected') and \
+ SETPREFIX + e.value in sets['selected']:
+ writemsg_level(("Use `emerge --deselect %s%s` to "
+ "remove this set from world_sets.\n") %
+ (SETPREFIX, e,), level=logging.ERROR,
+ noiselevel=-1)
+ writemsg_level("\n", level=logging.ERROR,
+ noiselevel=-1)
+ return False, myfavorites
+
pset = sets[s]
depgraph_sets.sets[s] = pset
args.append(SetArg(arg=x, pset=pset,
@@ -2370,7 +3258,7 @@ class depgraph(object):
# came from, if any.
# 2) It takes away freedom from the resolver to choose other
# possible expansions when necessary.
- if "/" in x:
+ if "/" in x.split(":")[0]:
args.append(AtomArg(arg=x, atom=Atom(x, allow_repo=True),
root_config=root_config))
continue
@@ -2471,13 +3359,8 @@ class depgraph(object):
return 0, []
for cpv in owners:
- slot = vardb.aux_get(cpv, ["SLOT"])[0]
- if not slot:
- # portage now masks packages with missing slot, but it's
- # possible that one was installed by an older version
- atom = Atom(portage.cpv_getkey(cpv))
- else:
- atom = Atom("%s:%s" % (portage.cpv_getkey(cpv), slot))
+ pkg = vardb._pkg_str(cpv, None)
+ atom = Atom("%s:%s" % (pkg.cp, pkg.slot))
args.append(AtomArg(arg=atom, atom=atom,
root_config=root_config))
@@ -2542,7 +3425,7 @@ class depgraph(object):
# Order needs to be preserved since a feature of --nodeps
# is to allow the user to force a specific merge order.
self._dynamic_config._initial_arg_list = args[:]
-
+
return self._resolve(myfavorites)
def _gen_reinstall_sets(self):
@@ -2552,8 +3435,8 @@ class depgraph(object):
atom_list.append((root, '__auto_rebuild__', atom))
for root, atom in self._rebuild.reinstall_list:
atom_list.append((root, '__auto_reinstall__', atom))
- for root, atom in self._dynamic_config._slot_abi_replace_installed:
- atom_list.append((root, '__auto_slot_abi_replace_installed__', atom))
+ for root, atom in self._dynamic_config._slot_operator_replace_installed:
+ atom_list.append((root, '__auto_slot_operator_replace_installed__', atom))
set_dict = {}
for root, set_name, atom in atom_list:
@@ -2572,8 +3455,8 @@ class depgraph(object):
root_config=self._frozen_config.roots[root])
def _resolve(self, myfavorites):
- """Given self._dynamic_config._initial_arg_list, pull in the root nodes,
- call self._creategraph to process theier deps and return
+ """Given self._dynamic_config._initial_arg_list, pull in the root nodes,
+ call self._creategraph to process theier deps and return
a favorite list."""
debug = "--debug" in self._frozen_config.myopts
onlydeps = "--onlydeps" in self._frozen_config.myopts
@@ -2624,6 +3507,16 @@ class depgraph(object):
if pprovided_match:
continue
+ excluded = False
+ for any_match in self._iter_match_pkgs_any(
+ self._frozen_config.roots[myroot], atom):
+ if self._frozen_config.excluded_pkgs.findAtomForPackage(
+ any_match, modified_use=self._pkg_use_enabled(any_match)):
+ excluded = True
+ break
+ if excluded:
+ continue
+
if not (isinstance(arg, SetArg) and \
arg.name in ("selected", "system", "world")):
self._dynamic_config._unsatisfied_deps_for_display.append(
@@ -2692,7 +3585,8 @@ class depgraph(object):
except self._unknown_internal_error:
return False, myfavorites
- if (self._dynamic_config._slot_collision_info and
+ have_slot_conflict = any(self._dynamic_config._package_tracker.slot_conflicts())
+ if (have_slot_conflict and
not self._accept_blocker_conflicts()) or \
(self._dynamic_config._allow_backtracking and
"slot conflict" in self._dynamic_config._backtrack_infos):
@@ -2707,11 +3601,47 @@ class depgraph(object):
return False, myfavorites
if "config" in self._dynamic_config._backtrack_infos and \
- ("slot_abi_mask_built" in self._dynamic_config._backtrack_infos["config"] or
- "slot_abi_replace_installed" in self._dynamic_config._backtrack_infos["config"]) and \
+ ("slot_operator_mask_built" in self._dynamic_config._backtrack_infos["config"] or
+ "slot_operator_replace_installed" in self._dynamic_config._backtrack_infos["config"]) and \
self.need_restart():
return False, myfavorites
+ if not self._dynamic_config._prune_rebuilds and \
+ self._dynamic_config._slot_operator_replace_installed and \
+ self._get_missed_updates():
+ # When there are missed updates, we might have triggered
+ # some unnecessary rebuilds (see bug #439688). So, prune
+ # all the rebuilds and backtrack with the problematic
+ # updates masked. The next backtrack run should pull in
+ # any rebuilds that are really needed, and this
+ # prune_rebuilds path should never be entered more than
+ # once in a series of backtracking nodes (in order to
+ # avoid a backtracking loop).
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ config = backtrack_infos.setdefault("config", {})
+ config["prune_rebuilds"] = True
+ self._dynamic_config._need_restart = True
+ return False, myfavorites
+
+ if self.need_restart():
+ # want_restart_for_use_change triggers this
+ return False, myfavorites
+
+ if "--fetchonly" not in self._frozen_config.myopts and \
+ "--buildpkgonly" in self._frozen_config.myopts:
+ graph_copy = self._dynamic_config.digraph.copy()
+ removed_nodes = set()
+ for node in graph_copy:
+ if not isinstance(node, Package) or \
+ node.operation == "nomerge":
+ removed_nodes.add(node)
+ graph_copy.difference_update(removed_nodes)
+ if not graph_copy.hasallzeros(ignore_priority = \
+ DepPrioritySatisfiedRange.ignore_medium):
+ self._dynamic_config._buildpkgonly_deps_unsatisfied = True
+ self._dynamic_config._skip_restart = True
+ return False, myfavorites
+
# Any failures except those due to autounmask *alone* should return
# before this point, since the success_without_autounmask flag that's
# set below is reserved for cases where there are *zero* other
@@ -2773,8 +3703,8 @@ class depgraph(object):
if refs is None:
refs = []
atom_arg_map[atom_key] = refs
- if arg not in refs:
- refs.append(arg)
+ if arg not in refs:
+ refs.append(arg)
for root in self._dynamic_config.sets:
depgraph_sets = self._dynamic_config.sets[root]
@@ -2804,14 +3734,15 @@ class depgraph(object):
slots = set()
for cpv in vardb.match(atom):
# don't mix new virtuals with old virtuals
- if portage.cpv_getkey(cpv) == highest_pkg.cp:
- slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
+ pkg = vardb._pkg_str(cpv, None)
+ if pkg.cp == highest_pkg.cp:
+ slots.add(pkg.slot)
- slots.add(highest_pkg.metadata["SLOT"])
+ slots.add(highest_pkg.slot)
if len(slots) == 1:
return []
greedy_pkgs = []
- slots.remove(highest_pkg.metadata["SLOT"])
+ slots.remove(highest_pkg.slot)
while slots:
slot = slots.pop()
slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
@@ -2825,9 +3756,9 @@ class depgraph(object):
return [pkg.slot_atom for pkg in greedy_pkgs]
blockers = {}
- blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
+ blocker_dep_keys = Package._dep_keys
for pkg in greedy_pkgs + [highest_pkg]:
- dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
+ dep_str = " ".join(pkg._metadata[k] for k in blocker_dep_keys)
try:
selected_atoms = self._select_atoms(
pkg.root, dep_str, self._pkg_use_enabled(pkg),
@@ -2879,7 +3810,8 @@ class depgraph(object):
not been scheduled for replacement.
"""
kwargs["trees"] = self._dynamic_config._graph_trees
- return self._select_atoms_highest_available(*pargs, **kwargs)
+ return self._select_atoms_highest_available(*pargs,
+ **portage._native_kwargs(kwargs))
def _select_atoms_highest_available(self, root, depstring,
myuse=None, parent=None, strict=True, trees=None, priority=None):
@@ -2890,7 +3822,7 @@ class depgraph(object):
eapi = None
is_valid_flag = None
if parent is not None:
- eapi = parent.metadata['EAPI']
+ eapi = parent.eapi
if not parent.installed:
is_valid_flag = parent.iuse.is_valid_flag
depstring = portage.dep.use_reduce(depstring,
@@ -2898,9 +3830,9 @@ class depgraph(object):
is_valid_flag=is_valid_flag, eapi=eapi)
if (self._dynamic_config.myparams.get(
- "ignore_built_slot_abi_deps", "n") == "y" and
+ "ignore_built_slot_operator_deps", "n") == "y" and
parent and parent.built):
- ignore_built_slot_abi_deps(depstring)
+ ignore_built_slot_operator_deps(depstring)
pkgsettings = self._frozen_config.pkgsettings[root]
if trees is None:
@@ -3005,35 +3937,37 @@ class depgraph(object):
def _expand_virt_from_graph(self, root, atom):
if not isinstance(atom, Atom):
atom = Atom(atom)
- graphdb = self._dynamic_config.mydbapi[root]
- match = graphdb.match_pkgs(atom)
- if not match:
- yield atom
- return
- pkg = match[-1]
- if not pkg.cpv.startswith("virtual/"):
- yield atom
- return
- try:
- rdepend = self._select_atoms_from_graph(
- pkg.root, pkg.metadata.get("RDEPEND", ""),
- myuse=self._pkg_use_enabled(pkg),
- parent=pkg, strict=False)
- except InvalidDependString as e:
- writemsg_level("!!! Invalid RDEPEND in " + \
- "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
- (pkg.root, pkg.cpv, e),
- noiselevel=-1, level=logging.ERROR)
+
+ if not atom.cp.startswith("virtual/"):
yield atom
return
- for atoms in rdepend.values():
- for atom in atoms:
- if hasattr(atom, "_orig_atom"):
- # Ignore virtual atoms since we're only
- # interested in expanding the real atoms.
- continue
- yield atom
+ any_match = False
+ for pkg in self._dynamic_config._package_tracker.match(root, atom):
+ try:
+ rdepend = self._select_atoms_from_graph(
+ pkg.root, pkg._metadata.get("RDEPEND", ""),
+ myuse=self._pkg_use_enabled(pkg),
+ parent=pkg, strict=False)
+ except InvalidDependString as e:
+ writemsg_level("!!! Invalid RDEPEND in " + \
+ "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
+ (pkg.root, pkg.cpv, e),
+ noiselevel=-1, level=logging.ERROR)
+ continue
+
+ for atoms in rdepend.values():
+ for atom in atoms:
+ if hasattr(atom, "_orig_atom"):
+ # Ignore virtual atoms since we're only
+ # interested in expanding the real atoms.
+ continue
+ yield atom
+
+ any_match = True
+
+ if not any_match:
+ yield atom
def _virt_deps_visible(self, pkg, ignore_use=False):
"""
@@ -3044,7 +3978,7 @@ class depgraph(object):
"""
try:
rdepend = self._select_atoms(
- pkg.root, pkg.metadata.get("RDEPEND", ""),
+ pkg.root, pkg._metadata.get("RDEPEND", ""),
myuse=self._pkg_use_enabled(pkg),
parent=pkg, priority=self._priority(runtime=True))
except InvalidDependString as e:
@@ -3083,19 +4017,29 @@ class depgraph(object):
child = None
all_parents = self._dynamic_config._parent_atoms
graph = self._dynamic_config.digraph
+ verbose_main_repo_display = "--verbose-main-repo-display" in \
+ self._frozen_config.myopts
+
+ def format_pkg(pkg):
+ pkg_name = "%s" % (pkg.cpv,)
+ if verbose_main_repo_display or pkg.repo != \
+ pkg.root_config.settings.repositories.mainRepo().name:
+ pkg_name += _repo_separator + pkg.repo
+ return pkg_name
if target_atom is not None and isinstance(node, Package):
affecting_use = set()
- for dep_str in "DEPEND", "RDEPEND", "PDEPEND":
+ for dep_str in Package._dep_keys:
try:
affecting_use.update(extract_affecting_use(
- node.metadata[dep_str], target_atom,
- eapi=node.metadata["EAPI"]))
+ node._metadata[dep_str], target_atom,
+ eapi=node.eapi))
except InvalidDependString:
if not node.installed:
raise
affecting_use.difference_update(node.use.mask, node.use.force)
- pkg_name = _unicode_decode("%s") % (node.cpv,)
+ pkg_name = format_pkg(node)
+
if affecting_use:
usedep = []
for flag in affecting_use:
@@ -3150,7 +4094,7 @@ class depgraph(object):
node_type = "set"
else:
node_type = "argument"
- dep_chain.append((_unicode_decode("%s") % (node,), node_type))
+ dep_chain.append(("%s" % (node,), node_type))
elif node is not start_node:
for ppkg, patom in all_parents[child]:
@@ -3167,23 +4111,23 @@ class depgraph(object):
if priorities is None:
# This edge comes from _parent_atoms and was not added to
# the graph, and _parent_atoms does not contain priorities.
- dep_strings.add(node.metadata["DEPEND"])
- dep_strings.add(node.metadata["RDEPEND"])
- dep_strings.add(node.metadata["PDEPEND"])
+ for k in Package._dep_keys:
+ dep_strings.add(node._metadata[k])
else:
for priority in priorities:
if priority.buildtime:
- dep_strings.add(node.metadata["DEPEND"])
+ for k in Package._buildtime_keys:
+ dep_strings.add(node._metadata[k])
if priority.runtime:
- dep_strings.add(node.metadata["RDEPEND"])
+ dep_strings.add(node._metadata["RDEPEND"])
if priority.runtime_post:
- dep_strings.add(node.metadata["PDEPEND"])
+ dep_strings.add(node._metadata["PDEPEND"])
affecting_use = set()
for dep_str in dep_strings:
try:
affecting_use.update(extract_affecting_use(
- dep_str, atom, eapi=node.metadata["EAPI"]))
+ dep_str, atom, eapi=node.eapi))
except InvalidDependString:
if not node.installed:
raise
@@ -3192,7 +4136,7 @@ class depgraph(object):
affecting_use.difference_update(node.use.mask, \
node.use.force)
- pkg_name = _unicode_decode("%s") % (node.cpv,)
+ pkg_name = format_pkg(node)
if affecting_use:
usedep = []
for flag in affecting_use:
@@ -3244,8 +4188,7 @@ class depgraph(object):
if self._dynamic_config.digraph.parent_nodes(parent_arg):
selected_parent = parent_arg
else:
- dep_chain.append(
- (_unicode_decode("%s") % (parent_arg,), "argument"))
+ dep_chain.append(("%s" % (parent_arg,), "argument"))
selected_parent = None
node = selected_parent
@@ -3260,7 +4203,7 @@ class depgraph(object):
else:
display_list.append("required by %s" % node)
- msg = "#" + ", ".join(display_list) + "\n"
+ msg = "# " + "\n# ".join(display_list) + "\n"
return msg
@@ -3281,7 +4224,7 @@ class depgraph(object):
if arg:
xinfo='"%s"' % arg
if isinstance(myparent, AtomArg):
- xinfo = _unicode_decode('"%s"') % (myparent,)
+ xinfo = '"%s"' % (myparent,)
# Discard null/ from failed cpv_expand category expansion.
xinfo = xinfo.replace("null/", "")
if root != self._frozen_config._running_root.root:
@@ -3326,9 +4269,9 @@ class depgraph(object):
repo = metadata.get('repository')
pkg = self._pkg(cpv, pkg_type, root_config,
installed=installed, myrepo=repo)
- # pkg.metadata contains calculated USE for ebuilds,
+ # pkg._metadata contains calculated USE for ebuilds,
# required later for getMissingLicenses.
- metadata = pkg.metadata
+ metadata = pkg._metadata
if pkg.invalid:
# Avoid doing any operations with packages that
# have invalid metadata. It would be unsafe at
@@ -3367,12 +4310,13 @@ class depgraph(object):
raise
if not mreasons and \
not pkg.built and \
- pkg.metadata.get("REQUIRED_USE") and \
- eapi_has_required_use(pkg.metadata["EAPI"]):
+ pkg._metadata.get("REQUIRED_USE") and \
+ eapi_has_required_use(pkg.eapi):
if not check_required_use(
- pkg.metadata["REQUIRED_USE"],
+ pkg._metadata["REQUIRED_USE"],
self._pkg_use_enabled(pkg),
- pkg.iuse.is_valid_flag):
+ pkg.iuse.is_valid_flag,
+ eapi=pkg.eapi):
required_use_unsatisfied.append(pkg)
continue
root_slot = (pkg.root, pkg.slot_atom)
@@ -3422,7 +4366,7 @@ class depgraph(object):
continue
missing_use_adjustable.add(pkg)
- required_use = pkg.metadata.get("REQUIRED_USE")
+ required_use = pkg._metadata.get("REQUIRED_USE")
required_use_warning = ""
if required_use:
old_use = self._pkg_use_enabled(pkg)
@@ -3431,8 +4375,10 @@ class depgraph(object):
new_use.add(flag)
for flag in need_disable:
new_use.discard(flag)
- if check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
- not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
+ if check_required_use(required_use, old_use,
+ pkg.iuse.is_valid_flag, eapi=pkg.eapi) \
+ and not check_required_use(required_use, new_use,
+ pkg.iuse.is_valid_flag, eapi=pkg.eapi):
required_use_warning = ", this change violates use flag constraints " + \
"defined by %s: '%s'" % (pkg.cpv, human_readable_required_use(required_use))
@@ -3470,7 +4416,7 @@ class depgraph(object):
if any(x in untouchable_flags for x in involved_flags):
continue
- required_use = myparent.metadata.get("REQUIRED_USE")
+ required_use = myparent._metadata.get("REQUIRED_USE")
required_use_warning = ""
if required_use:
old_use = self._pkg_use_enabled(myparent)
@@ -3480,8 +4426,12 @@ class depgraph(object):
new_use.discard(flag)
else:
new_use.add(flag)
- if check_required_use(required_use, old_use, myparent.iuse.is_valid_flag) and \
- not check_required_use(required_use, new_use, myparent.iuse.is_valid_flag):
+ if check_required_use(required_use, old_use,
+ myparent.iuse.is_valid_flag,
+ eapi=myparent.eapi) and \
+ not check_required_use(required_use, new_use,
+ myparent.iuse.is_valid_flag,
+ eapi=myparent.eapi):
required_use_warning = ", this change violates use flag constraints " + \
"defined by %s: '%s'" % (myparent.cpv, \
human_readable_required_use(required_use))
@@ -3568,14 +4518,15 @@ class depgraph(object):
writemsg("\n The following REQUIRED_USE flag constraints " + \
"are unsatisfied:\n", noiselevel=-1)
reduced_noise = check_required_use(
- pkg.metadata["REQUIRED_USE"],
+ pkg._metadata["REQUIRED_USE"],
self._pkg_use_enabled(pkg),
- pkg.iuse.is_valid_flag).tounicode()
+ pkg.iuse.is_valid_flag,
+ eapi=pkg.eapi).tounicode()
writemsg(" %s\n" % \
human_readable_required_use(reduced_noise),
noiselevel=-1)
normalized_required_use = \
- " ".join(pkg.metadata["REQUIRED_USE"].split())
+ " ".join(pkg._metadata["REQUIRED_USE"].split())
if reduced_noise != normalized_required_use:
writemsg("\n The above constraints " + \
"are a subset of the following complete expression:\n",
@@ -3620,57 +4571,17 @@ class depgraph(object):
not cp_exists and \
self._frozen_config.myopts.get(
"--misspell-suggestions", "y") != "n":
- cp = myparent.atom.cp.lower()
- cat, pkg = portage.catsplit(cp)
- if cat == "null":
- cat = None
writemsg("\nemerge: searching for similar names..."
, noiselevel=-1)
- all_cp = set()
- all_cp.update(vardb.cp_all())
+ dbs = [vardb]
if "--usepkgonly" not in self._frozen_config.myopts:
- all_cp.update(portdb.cp_all())
+ dbs.append(portdb)
if "--usepkg" in self._frozen_config.myopts:
- all_cp.update(bindb.cp_all())
- # discard dir containing no ebuilds
- all_cp.discard(cp)
-
- orig_cp_map = {}
- for cp_orig in all_cp:
- orig_cp_map.setdefault(cp_orig.lower(), []).append(cp_orig)
- all_cp = set(orig_cp_map)
-
- if cat:
- matches = difflib.get_close_matches(cp, all_cp)
- else:
- pkg_to_cp = {}
- for other_cp in list(all_cp):
- other_pkg = portage.catsplit(other_cp)[1]
- if other_pkg == pkg:
- # Check for non-identical package that
- # differs only by upper/lower case.
- identical = True
- for cp_orig in orig_cp_map[other_cp]:
- if portage.catsplit(cp_orig)[1] != \
- portage.catsplit(atom.cp)[1]:
- identical = False
- break
- if identical:
- # discard dir containing no ebuilds
- all_cp.discard(other_cp)
- continue
- pkg_to_cp.setdefault(other_pkg, set()).add(other_cp)
- pkg_matches = difflib.get_close_matches(pkg, pkg_to_cp)
- matches = []
- for pkg_match in pkg_matches:
- matches.extend(pkg_to_cp[pkg_match])
+ dbs.append(bindb)
- matches_orig_case = []
- for cp in matches:
- matches_orig_case.extend(orig_cp_map[cp])
- matches = matches_orig_case
+ matches = similar_name_search(dbs, atom)
if len(matches) == 1:
writemsg("\nemerge: Maybe you meant " + matches[0] + "?\n"
@@ -3691,8 +4602,7 @@ class depgraph(object):
dep_chain = self._get_dep_chain(myparent, atom)
for node, node_type in dep_chain:
msg.append('(dependency required by "%s" [%s])' % \
- (colorize('INFORM', _unicode_decode("%s") % \
- (node)), node_type))
+ (colorize('INFORM', "%s" % (node)), node_type))
if msg:
writemsg("\n".join(msg), noiselevel=-1)
@@ -3770,7 +4680,8 @@ class depgraph(object):
# the newly built package still won't have the expected slot.
# Therefore, assume that such SLOT dependencies are already
# satisfied rather than forcing a rebuild.
- if not matched_something and installed and atom.slot is not None:
+ if not matched_something and installed and \
+ atom.slot is not None and not atom.slot_operator_built:
if "remove" in self._dynamic_config.myparams:
# We need to search the portdbapi, which is not in our
@@ -3794,11 +4705,11 @@ class depgraph(object):
for other_db, other_type, other_built, \
other_installed, other_keys in dbs:
try:
- if atom.slot == \
- other_db.aux_get(cpv, ["SLOT"])[0]:
+ if portage.dep._match_slot(atom,
+ other_db._pkg_str(_unicode(cpv), None)):
slot_available = True
break
- except KeyError:
+ except (KeyError, InvalidData):
pass
if not slot_available:
continue
@@ -3810,12 +4721,12 @@ class depgraph(object):
yield inst_pkg
return
- def _select_pkg_highest_available(self, root, atom, onlydeps=False):
+ def _select_pkg_highest_available(self, root, atom, onlydeps=False, parent=None):
cache_key = (root, atom, atom.unevaluated_atom, onlydeps, self._dynamic_config._autounmask)
ret = self._dynamic_config._highest_pkg_cache.get(cache_key)
if ret is not None:
return ret
- ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
+ ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps, parent=parent)
self._dynamic_config._highest_pkg_cache[cache_key] = ret
pkg, existing = ret
if pkg is not None:
@@ -3847,6 +4758,36 @@ class depgraph(object):
return not arg
+ def _want_update_pkg(self, parent, pkg):
+
+ if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
+ modified_use=self._pkg_use_enabled(pkg)):
+ return False
+
+ arg_atoms = None
+ try:
+ arg_atoms = list(self._iter_atoms_for_pkg(pkg))
+ except InvalidDependString:
+ if not pkg.installed:
+ # should have been masked before it was selected
+ raise
+
+ depth = parent.depth or 0
+ depth += 1
+
+ if arg_atoms:
+ for arg, atom in arg_atoms:
+ if arg.reset_depth:
+ depth = 0
+ break
+
+ deep = self._dynamic_config.myparams.get("deep", 0)
+ update = "--update" in self._frozen_config.myopts
+
+ return (not self._dynamic_config._complete_mode and
+ (arg_atoms or update) and
+ not (deep is not True and depth > deep))
+
def _equiv_ebuild_visible(self, pkg, autounmask_level=None):
try:
pkg_eb = self._pkg(
@@ -3867,7 +4808,7 @@ class depgraph(object):
return True
def _equiv_binary_installed(self, pkg):
- build_time = pkg.metadata.get('BUILD_TIME')
+ build_time = pkg.build_time
if not build_time:
return False
@@ -3877,7 +4818,7 @@ class depgraph(object):
except PackageNotFound:
return False
- return build_time == inst_pkg.metadata.get('BUILD_TIME')
+ return build_time == inst_pkg.build_time
class _AutounmaskLevel(object):
__slots__ = ("allow_use_changes", "allow_unstable_keywords", "allow_license_changes", \
@@ -3898,8 +4839,9 @@ class depgraph(object):
1. USE + license
2. USE + ~arch + license
3. USE + ~arch + license + missing keywords
- 4. USE + ~arch + license + masks
- 5. USE + ~arch + license + missing keywords + masks
+ 4. USE + license + masks
+ 5. USE + ~arch + license + masks
+ 6. USE + ~arch + license + missing keywords + masks
Some thoughts:
* Do least invasive changes first.
@@ -3919,15 +4861,25 @@ class depgraph(object):
autounmask_level.allow_license_changes = True
yield autounmask_level
- for only_use_changes in (False,):
+ autounmask_level.allow_unstable_keywords = True
+ yield autounmask_level
- autounmask_level.allow_unstable_keywords = (not only_use_changes)
- autounmask_level.allow_license_changes = (not only_use_changes)
+ if not autounmask_keep_masks:
- for missing_keyword, unmask in ((False,False), (True, False), (False, True), (True, True)):
+ autounmask_level.allow_missing_keywords = True
+ yield autounmask_level
- if (only_use_changes or autounmask_keep_masks) and (missing_keyword or unmask):
- break
+ # 4. USE + license + masks
+ # Try to respect keywords while discarding
+ # package.mask (see bug #463394).
+ autounmask_level.allow_unstable_keywords = False
+ autounmask_level.allow_missing_keywords = False
+ autounmask_level.allow_unmasks = True
+ yield autounmask_level
+
+ autounmask_level.allow_unstable_keywords = True
+
+ for missing_keyword, unmask in ((False, True), (True, True)):
autounmask_level.allow_missing_keywords = missing_keyword
autounmask_level.allow_unmasks = unmask
@@ -3935,33 +4887,42 @@ class depgraph(object):
yield autounmask_level
- def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
- pkg, existing = self._wrapped_select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
+ def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False, parent=None):
+ pkg, existing = self._wrapped_select_pkg_highest_available_imp(
+ root, atom, onlydeps=onlydeps, parent=parent)
default_selection = (pkg, existing)
- def reset_pkg(pkg):
+ if self._dynamic_config._autounmask is True:
if pkg is not None and \
pkg.installed and \
not self._want_installed_pkg(pkg):
pkg = None
- if self._dynamic_config._autounmask is True:
- reset_pkg(pkg)
+ # Temporarily reset _need_restart state, in order to
+ # avoid interference as reported in bug #459832.
+ earlier_need_restart = self._dynamic_config._need_restart
+ self._dynamic_config._need_restart = False
+ try:
+ for autounmask_level in self._autounmask_levels():
+ if pkg is not None:
+ break
- for autounmask_level in self._autounmask_levels():
- if pkg is not None:
- break
+ pkg, existing = \
+ self._wrapped_select_pkg_highest_available_imp(
+ root, atom, onlydeps=onlydeps,
+ autounmask_level=autounmask_level, parent=parent)
- pkg, existing = \
- self._wrapped_select_pkg_highest_available_imp(
- root, atom, onlydeps=onlydeps,
- autounmask_level=autounmask_level)
+ if pkg is not None and \
+ pkg.installed and \
+ not self._want_installed_pkg(pkg):
+ pkg = None
- reset_pkg(pkg)
-
- if self._dynamic_config._need_restart:
- return None, None
+ if self._dynamic_config._need_restart:
+ return None, None
+ finally:
+ if earlier_need_restart:
+ self._dynamic_config._need_restart = True
if pkg is None:
# This ensures that we can fall back to an installed package
@@ -4091,25 +5052,29 @@ class depgraph(object):
new_changes = {}
for flag, state in target_use.items():
+ real_flag = pkg.iuse.get_real_flag(flag)
+ if real_flag is None:
+ # Triggered by use-dep defaults.
+ continue
if state:
- if flag not in old_use:
- if new_changes.get(flag) == False:
+ if real_flag not in old_use:
+ if new_changes.get(real_flag) == False:
return old_use
- new_changes[flag] = True
+ new_changes[real_flag] = True
new_use.add(flag)
else:
- if flag in old_use:
- if new_changes.get(flag) == True:
+ if real_flag in old_use:
+ if new_changes.get(real_flag) == True:
return old_use
- new_changes[flag] = False
+ new_changes[real_flag] = False
new_use.update(old_use.difference(target_use))
def want_restart_for_use_change(pkg, new_use):
if pkg not in self._dynamic_config.digraph.nodes:
return False
- for key in "DEPEND", "RDEPEND", "PDEPEND", "LICENSE":
- dep = pkg.metadata[key]
+ for key in Package._dep_keys + ("LICENSE",):
+ dep = pkg._metadata[key]
old_val = set(portage.dep.use_reduce(dep, pkg.use.enabled, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
new_val = set(portage.dep.use_reduce(dep, new_use, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
@@ -4132,9 +5097,11 @@ class depgraph(object):
if new_changes != old_changes:
#Don't do the change if it violates REQUIRED_USE.
- required_use = pkg.metadata.get("REQUIRED_USE")
- if required_use and check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
- not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
+ required_use = pkg._metadata.get("REQUIRED_USE")
+ if required_use and check_required_use(required_use, old_use,
+ pkg.iuse.is_valid_flag, eapi=pkg.eapi) and \
+ not check_required_use(required_use, new_use,
+ pkg.iuse.is_valid_flag, eapi=pkg.eapi):
return old_use
if any(x in pkg.use.mask for x in new_changes) or \
@@ -4150,14 +5117,13 @@ class depgraph(object):
self._dynamic_config._need_restart = True
return new_use
- def _wrapped_select_pkg_highest_available_imp(self, root, atom, onlydeps=False, autounmask_level=None):
+ def _wrapped_select_pkg_highest_available_imp(self, root, atom, onlydeps=False, autounmask_level=None, parent=None):
root_config = self._frozen_config.roots[root]
pkgsettings = self._frozen_config.pkgsettings[root]
dbs = self._dynamic_config._filtered_trees[root]["dbs"]
vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
# List of acceptable packages, ordered by type preference.
matched_packages = []
- matched_pkgs_ignore_use = []
highest_version = None
if not isinstance(atom, portage.dep.Atom):
atom = portage.dep.Atom(atom)
@@ -4209,7 +5175,7 @@ class depgraph(object):
# Ignore USE deps for the initial match since we want to
# ensure that updates aren't missed solely due to the user's
# USE configuration.
- for pkg in self._iter_match_pkgs(root_config, pkg_type, atom.without_use,
+ for pkg in self._iter_match_pkgs(root_config, pkg_type, atom.without_use,
onlydeps=onlydeps):
if pkg.cp != atom_cp and have_new_virt:
# pull in a new-style virtual instead
@@ -4295,8 +5261,8 @@ class depgraph(object):
for selected_pkg in matched_packages:
if selected_pkg.type_name == "binary" and \
selected_pkg.cpv == pkg.cpv and \
- selected_pkg.metadata.get('BUILD_TIME') == \
- pkg.metadata.get('BUILD_TIME'):
+ selected_pkg.build_time == \
+ pkg.build_time:
identical_binary = True
break
@@ -4339,7 +5305,6 @@ class depgraph(object):
if atom.use:
- matched_pkgs_ignore_use.append(pkg)
if autounmask_level and autounmask_level.allow_use_changes and not pkg.built:
target_use = {}
for flag in atom.use.enabled:
@@ -4352,8 +5317,11 @@ class depgraph(object):
use_match = True
can_adjust_use = not pkg.built
- missing_enabled = atom.use.missing_enabled.difference(pkg.iuse.all)
- missing_disabled = atom.use.missing_disabled.difference(pkg.iuse.all)
+ is_valid_flag = pkg.iuse.is_valid_flag
+ missing_enabled = frozenset(x for x in
+ atom.use.missing_enabled if not is_valid_flag(x))
+ missing_disabled = frozenset(x for x in
+ atom.use.missing_disabled if not is_valid_flag(x))
if atom.use.enabled:
if any(x in atom.use.enabled for x in missing_disabled):
@@ -4406,7 +5374,9 @@ class depgraph(object):
# will always end with a break statement below
# this point.
if find_existing_node:
- e_pkg = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
+ e_pkg = next(self._dynamic_config._package_tracker.match(
+ root, pkg.slot_atom, installed=False), None)
+
if not e_pkg:
break
@@ -4427,50 +5397,56 @@ class depgraph(object):
break
# Compare built package to current config and
# reject the built package if necessary.
- if built and not useoldpkg and (not installed or matched_pkgs_ignore_use) and \
- ("--newuse" in self._frozen_config.myopts or \
- "--reinstall" in self._frozen_config.myopts or \
- (not installed and self._dynamic_config.myparams.get(
- "binpkg_respect_use") in ("y", "auto"))):
- iuses = pkg.iuse.all
- old_use = self._pkg_use_enabled(pkg)
- if myeb:
- pkgsettings.setcpv(myeb)
- else:
- pkgsettings.setcpv(pkg)
- now_use = pkgsettings["PORTAGE_USE"].split()
- forced_flags = set()
- forced_flags.update(pkgsettings.useforce)
- forced_flags.update(pkgsettings.usemask)
- cur_iuse = iuses
- if myeb and not usepkgonly and not useoldpkg:
- cur_iuse = myeb.iuse.all
- reinstall_for_flags = self._reinstall_for_flags(pkg,
- forced_flags, old_use, iuses, now_use, cur_iuse)
- if reinstall_for_flags:
- if not pkg.installed:
- self._dynamic_config.ignored_binaries.setdefault(pkg, set()).update(reinstall_for_flags)
+ reinstall_use = ("--newuse" in self._frozen_config.myopts or \
+ "--reinstall" in self._frozen_config.myopts)
+ respect_use = self._dynamic_config.myparams.get("binpkg_respect_use") in ("y", "auto")
+ if built and not useoldpkg and \
+ (not installed or matched_packages) and \
+ not (installed and
+ self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
+ modified_use=self._pkg_use_enabled(pkg))):
+ if myeb and "--newrepo" in self._frozen_config.myopts and myeb.repo != pkg.repo:
break
+ elif reinstall_use or (not installed and respect_use):
+ iuses = pkg.iuse.all
+ old_use = self._pkg_use_enabled(pkg)
+ if myeb:
+ pkgsettings.setcpv(myeb)
+ else:
+ pkgsettings.setcpv(pkg)
+ now_use = pkgsettings["PORTAGE_USE"].split()
+ forced_flags = set()
+ forced_flags.update(pkgsettings.useforce)
+ forced_flags.update(pkgsettings.usemask)
+ cur_iuse = iuses
+ if myeb and not usepkgonly and not useoldpkg:
+ cur_iuse = myeb.iuse.all
+ reinstall_for_flags = self._reinstall_for_flags(pkg,
+ forced_flags, old_use, iuses, now_use, cur_iuse)
+ if reinstall_for_flags:
+ if not pkg.installed:
+ self._dynamic_config.ignored_binaries.setdefault(pkg, set()).update(reinstall_for_flags)
+ break
# Compare current config to installed package
# and do not reinstall if possible.
- if not installed and not useoldpkg and \
- ("--newuse" in self._frozen_config.myopts or \
- "--reinstall" in self._frozen_config.myopts) and \
- cpv in vardb.match(atom):
- forced_flags = set()
- forced_flags.update(pkg.use.force)
- forced_flags.update(pkg.use.mask)
+ if not installed and not useoldpkg and cpv in vardb.match(atom):
inst_pkg = vardb.match_pkgs('=' + pkg.cpv)[0]
- old_use = inst_pkg.use.enabled
- old_iuse = inst_pkg.iuse.all
- cur_use = self._pkg_use_enabled(pkg)
- cur_iuse = pkg.iuse.all
- reinstall_for_flags = \
- self._reinstall_for_flags(pkg,
- forced_flags, old_use, old_iuse,
- cur_use, cur_iuse)
- if reinstall_for_flags:
+ if "--newrepo" in self._frozen_config.myopts and pkg.repo != inst_pkg.repo:
reinstall = True
+ elif reinstall_use:
+ forced_flags = set()
+ forced_flags.update(pkg.use.force)
+ forced_flags.update(pkg.use.mask)
+ old_use = inst_pkg.use.enabled
+ old_iuse = inst_pkg.iuse.all
+ cur_use = self._pkg_use_enabled(pkg)
+ cur_iuse = pkg.iuse.all
+ reinstall_for_flags = \
+ self._reinstall_for_flags(pkg,
+ forced_flags, old_use, old_iuse,
+ cur_use, cur_iuse)
+ if reinstall_for_flags:
+ reinstall = True
if reinstall_atoms.findAtomForPackage(pkg, \
modified_use=self._pkg_use_enabled(pkg)):
reinstall = True
@@ -4512,6 +5488,26 @@ class depgraph(object):
return existing_node, existing_node
if len(matched_packages) > 1:
+ if parent is not None and \
+ (parent.root, parent.slot_atom) in self._dynamic_config._slot_operator_replace_installed:
+ # We're forcing a rebuild of the parent because we missed some
+ # update because of a slot operator dep.
+ if atom.slot_operator == "=" and atom.sub_slot is None:
+ # This one is a slot operator dep. Exclude the installed packages if a newer non-installed
+ # pkg exists.
+ highest_installed = None
+ for pkg in matched_packages:
+ if pkg.installed:
+ if highest_installed is None or pkg.version > highest_installed.version:
+ highest_installed = pkg
+
+ if highest_installed:
+ non_installed = [pkg for pkg in matched_packages \
+ if not pkg.installed and pkg.version > highest_installed.version]
+
+ if non_installed:
+ matched_packages = non_installed
+
if rebuilt_binaries:
inst_pkg = None
built_pkg = None
@@ -4529,15 +5525,8 @@ class depgraph(object):
# non-empty, in order to avoid cases like to
# bug #306659 where BUILD_TIME fields are missing
# in local and/or remote Packages file.
- try:
- built_timestamp = int(built_pkg.metadata['BUILD_TIME'])
- except (KeyError, ValueError):
- built_timestamp = 0
-
- try:
- installed_timestamp = int(inst_pkg.metadata['BUILD_TIME'])
- except (KeyError, ValueError):
- installed_timestamp = 0
+ built_timestamp = built_pkg.build_time
+ installed_timestamp = inst_pkg.build_time
if unbuilt_pkg is not None and unbuilt_pkg > built_pkg:
pass
@@ -4584,7 +5573,7 @@ class depgraph(object):
# ordered by type preference ("ebuild" type is the last resort)
return matched_packages[-1], existing_node
- def _select_pkg_from_graph(self, root, atom, onlydeps=False):
+ def _select_pkg_from_graph(self, root, atom, onlydeps=False, parent=None):
"""
Select packages that have already been added to the graph or
those that are installed and have not been scheduled for
@@ -4594,11 +5583,18 @@ class depgraph(object):
matches = graph_db.match_pkgs(atom)
if not matches:
return None, None
- pkg = matches[-1] # highest match
- in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
- return pkg, in_graph
- def _select_pkg_from_installed(self, root, atom, onlydeps=False):
+ # There may be multiple matches, and they may
+ # conflict with eachother, so choose the highest
+ # version that has already been added to the graph.
+ for pkg in reversed(matches):
+ if pkg in self._dynamic_config.digraph:
+ return pkg, pkg
+
+ # Fall back to installed packages
+ return self._select_pkg_from_installed(root, atom, onlydeps=onlydeps, parent=parent)
+
+ def _select_pkg_from_installed(self, root, atom, onlydeps=False, parent=None):
"""
Select packages that are installed.
"""
@@ -4621,8 +5617,18 @@ class depgraph(object):
unmasked = [pkg for pkg in matches if not pkg.masks]
if unmasked:
matches = unmasked
+ if len(matches) > 1:
+ # Now account for packages for which existing
+ # ebuilds are masked or unavailable (bug #445506).
+ unmasked = [pkg for pkg in matches if
+ self._equiv_ebuild_visible(pkg)]
+ if unmasked:
+ matches = unmasked
+
pkg = matches[-1] # highest match
- in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
+ in_graph = next(self._dynamic_config._package_tracker.match(
+ root, pkg.slot_atom, installed=False), None)
+
return pkg, in_graph
def _complete_graph(self, required_sets=None):
@@ -4649,9 +5655,9 @@ class depgraph(object):
"complete_if_new_use", "y") == "y"
complete_if_new_ver = self._dynamic_config.myparams.get(
"complete_if_new_ver", "y") == "y"
- rebuild_if_new_slot_abi = self._dynamic_config.myparams.get(
- "rebuild_if_new_slot_abi", "y") == "y"
- complete_if_new_slot = rebuild_if_new_slot_abi
+ rebuild_if_new_slot = self._dynamic_config.myparams.get(
+ "rebuild_if_new_slot", "y") == "y"
+ complete_if_new_slot = rebuild_if_new_slot
if "complete" not in self._dynamic_config.myparams and \
(complete_if_new_use or
@@ -4670,10 +5676,16 @@ class depgraph(object):
inst_pkg = vardb.match_pkgs(node.slot_atom)
if inst_pkg and inst_pkg[0].cp == node.cp:
inst_pkg = inst_pkg[0]
- if complete_if_new_ver and \
- (inst_pkg < node or node < inst_pkg):
- version_change = True
- break
+ if complete_if_new_ver:
+ if inst_pkg < node or node < inst_pkg:
+ version_change = True
+ break
+ elif not (inst_pkg.slot == node.slot and
+ inst_pkg.sub_slot == node.sub_slot):
+ # slot/sub-slot change without revbump gets
+ # similar treatment to a version change
+ version_change = True
+ break
# Intersect enabled USE with IUSE, in order to
# ignore forced USE from implicit IUSE flags, since
@@ -4689,7 +5701,8 @@ class depgraph(object):
if complete_if_new_slot:
cp_list = vardb.match_pkgs(Atom(node.cp))
if (cp_list and cp_list[0].cp == node.cp and
- not any(node.slot == pkg.slot for pkg in cp_list)):
+ not any(node.slot == pkg.slot and
+ node.sub_slot == pkg.sub_slot for pkg in cp_list)):
version_change = True
break
@@ -4795,7 +5808,7 @@ class depgraph(object):
return 0
return 1
- def _pkg(self, cpv, type_name, root_config, installed=False,
+ def _pkg(self, cpv, type_name, root_config, installed=False,
onlydeps=False, myrepo = None):
"""
Get a package instance from the cache, or create a new
@@ -4813,10 +5826,14 @@ class depgraph(object):
installed=installed, onlydeps=onlydeps))
if pkg is None and onlydeps and not installed:
# Maybe it already got pulled in as a "merge" node.
- pkg = self._dynamic_config.mydbapi[root_config.root].get(
- Package._gen_hash_key(cpv=cpv, type_name=type_name,
- repo_name=myrepo, root_config=root_config,
- installed=installed, onlydeps=False))
+ for candidate in self._dynamic_config._package_tracker.match(
+ root_config.root, Atom("="+cpv)):
+ if candidate.type_name == type_name and \
+ candidate.repo_name == myrepo and \
+ candidate.root_config is root_config and \
+ candidate.installed == installed and \
+ not candidate.onlydeps:
+ pkg = candidate
if pkg is None:
tree_type = self.pkg_tree_map[type_name]
@@ -4866,7 +5883,7 @@ class depgraph(object):
# For installed packages, always ignore blockers from DEPEND since
# only runtime dependencies should be relevant for packages that
# are already built.
- dep_keys = ["RDEPEND", "PDEPEND"]
+ dep_keys = Package._runtime_keys
for myroot in self._frozen_config.trees:
if self._frozen_config.myopts.get("--root-deps") is not None and \
@@ -4876,7 +5893,8 @@ class depgraph(object):
vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
pkgsettings = self._frozen_config.pkgsettings[myroot]
root_config = self._frozen_config.roots[myroot]
- final_db = self._dynamic_config.mydbapi[myroot]
+ final_db = PackageTrackerDbapiWrapper(
+ myroot, self._dynamic_config._package_tracker)
blocker_cache = BlockerCache(myroot, vardb)
stale_cache = set(blocker_cache)
@@ -4893,7 +5911,7 @@ class depgraph(object):
# the merge process or by --depclean. Always warn about
# packages masked by license, since the user likely wants
# to adjust ACCEPT_LICENSE.
- if pkg in final_db:
+ if pkg in self._dynamic_config._package_tracker:
if not self._pkg_visibility_check(pkg,
trust_graph=False) and \
(pkg_in_graph or 'LICENSE' in pkg.masks):
@@ -4928,7 +5946,7 @@ class depgraph(object):
self._spinner_update()
blocker_data = blocker_cache.get(cpv)
if blocker_data is not None and \
- blocker_data.counter != long(pkg.metadata["COUNTER"]):
+ blocker_data.counter != pkg.counter:
blocker_data = None
# If blocker data from the graph is available, use
@@ -4945,9 +5963,8 @@ class depgraph(object):
blockers is not None:
# Re-use the blockers from the graph.
blocker_atoms = sorted(blockers)
- counter = long(pkg.metadata["COUNTER"])
blocker_data = \
- blocker_cache.BlockerData(counter, blocker_atoms)
+ blocker_cache.BlockerData(pkg.counter, blocker_atoms)
blocker_cache[pkg.cpv] = blocker_data
continue
@@ -4972,13 +5989,14 @@ class depgraph(object):
# matches (this can happen if an atom lacks a
# category).
show_invalid_depstring_notice(
- pkg, depstr, _unicode_decode("%s") % (e,))
+ pkg, depstr, "%s" % (e,))
del e
raise
if not success:
- replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
- if replacement_pkg and \
- replacement_pkg[0].operation == "merge":
+ replacement_pkgs = self._dynamic_config._package_tracker.match(
+ myroot, pkg.slot_atom)
+ if any(replacement_pkg[0].operation == "merge" for \
+ replacement_pkg in replacement_pkgs):
# This package is being replaced anyway, so
# ignore invalid dependencies so as not to
# annoy the user too much (otherwise they'd be
@@ -4989,22 +6007,20 @@ class depgraph(object):
blocker_atoms = [myatom for myatom in atoms \
if myatom.blocker]
blocker_atoms.sort()
- counter = long(pkg.metadata["COUNTER"])
blocker_cache[cpv] = \
- blocker_cache.BlockerData(counter, blocker_atoms)
+ blocker_cache.BlockerData(pkg.counter, blocker_atoms)
if blocker_atoms:
try:
for atom in blocker_atoms:
blocker = Blocker(atom=atom,
- eapi=pkg.metadata["EAPI"],
+ eapi=pkg.eapi,
priority=self._priority(runtime=True),
root=myroot)
self._dynamic_config._blocker_parents.add(blocker, pkg)
except portage.exception.InvalidAtom as e:
depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
show_invalid_depstring_notice(
- pkg, depstr,
- _unicode_decode("Invalid Atom: %s") % (e,))
+ pkg, depstr, "Invalid Atom: %s" % (e,))
return False
for cpv in stale_cache:
del blocker_cache[cpv]
@@ -5025,8 +6041,7 @@ class depgraph(object):
virtuals = root_config.settings.getvirtuals()
myroot = blocker.root
initial_db = self._frozen_config.trees[myroot]["vartree"].dbapi
- final_db = self._dynamic_config.mydbapi[myroot]
-
+
provider_virtual = False
if blocker.cp in virtuals and \
not self._have_new_virt(blocker.root, blocker.cp):
@@ -5053,7 +6068,7 @@ class depgraph(object):
blocked_final = set()
for atom in atoms:
- for pkg in final_db.match_pkgs(atom):
+ for pkg in self._dynamic_config._package_tracker.match(myroot, atom):
if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
blocked_final.add(pkg)
@@ -5137,7 +6152,7 @@ class depgraph(object):
for inst_pkg, inst_task in depends_on_order:
uninst_task = Package(built=inst_pkg.built,
cpv=inst_pkg.cpv, installed=inst_pkg.installed,
- metadata=inst_pkg.metadata,
+ metadata=inst_pkg._metadata,
operation="uninstall",
root_config=inst_pkg.root_config,
type_name=inst_pkg.type_name)
@@ -5203,7 +6218,12 @@ class depgraph(object):
mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
- def altlist(self, reversed=False):
+ def altlist(self, reversed=DeprecationWarning):
+
+ if reversed is not DeprecationWarning:
+ warnings.warn("The reversed parameter of "
+ "_emerge.depgraph.depgraph.altlist() is deprecated",
+ DeprecationWarning, stacklevel=2)
while self._dynamic_config._serialized_tasks_cache is None:
self._resolve_conflicts()
@@ -5213,9 +6233,13 @@ class depgraph(object):
except self._serialize_tasks_retry:
pass
- retlist = self._dynamic_config._serialized_tasks_cache[:]
- if reversed:
+ retlist = self._dynamic_config._serialized_tasks_cache
+ if reversed is not DeprecationWarning and reversed:
+ # TODO: remove the "reversed" parameter (builtin name collision)
+ retlist = list(retlist)
retlist.reverse()
+ retlist = tuple(retlist)
+
return retlist
def _implicit_libc_deps(self, mergelist, graph):
@@ -5226,19 +6250,15 @@ class depgraph(object):
libc_pkgs = {}
implicit_libc_roots = (self._frozen_config._running_root.root,)
for root in implicit_libc_roots:
- graphdb = self._dynamic_config.mydbapi[root]
vardb = self._frozen_config.trees[root]["vartree"].dbapi
for atom in self._expand_virt_from_graph(root,
portage.const.LIBC_PACKAGE_ATOM):
if atom.blocker:
continue
- match = graphdb.match_pkgs(atom)
- if not match:
- continue
- pkg = match[-1]
- if pkg.operation == "merge" and \
- not vardb.cpv_exists(pkg.cpv):
- libc_pkgs.setdefault(pkg.root, set()).add(pkg)
+ for pkg in self._dynamic_config._package_tracker.match(root, atom):
+ if pkg.operation == "merge" and \
+ not vardb.cpv_exists(pkg.cpv):
+ libc_pkgs.setdefault(pkg.root, set()).add(pkg)
if not libc_pkgs:
return
@@ -5326,7 +6346,7 @@ class depgraph(object):
if "complete" not in self._dynamic_config.myparams and \
self._dynamic_config._allow_backtracking and \
- self._dynamic_config._slot_collision_nodes and \
+ any(self._dynamic_config._package_tracker.slot_conflicts()) and \
not self._accept_blocker_conflicts():
self._dynamic_config.myparams["complete"] = True
@@ -5335,10 +6355,13 @@ class depgraph(object):
self._process_slot_conflicts()
- self._slot_abi_trigger_reinstalls()
+ if self._dynamic_config._allow_backtracking:
+ self._slot_operator_trigger_reinstalls()
if not self._validate_blockers():
- self._dynamic_config._skip_restart = True
+ # Blockers don't trigger the _skip_restart flag, since
+ # backtracking may solve blockers when it solves slot
+ # conflicts (or by blind luck).
raise self._unknown_internal_error()
def _serialize_tasks(self):
@@ -5436,8 +6459,8 @@ class depgraph(object):
initial_atoms=[PORTAGE_PACKAGE_ATOM])
running_portage = self._frozen_config.trees[running_root]["vartree"].dbapi.match_pkgs(
PORTAGE_PACKAGE_ATOM)
- replacement_portage = self._dynamic_config.mydbapi[running_root].match_pkgs(
- PORTAGE_PACKAGE_ATOM)
+ replacement_portage = list(self._dynamic_config._package_tracker.match(
+ running_root, Atom(PORTAGE_PACKAGE_ATOM)))
if running_portage:
running_portage = running_portage[0]
@@ -5455,7 +6478,7 @@ class depgraph(object):
if running_portage is not None:
try:
portage_rdepend = self._select_atoms_highest_available(
- running_root, running_portage.metadata["RDEPEND"],
+ running_root, running_portage._metadata["RDEPEND"],
myuse=self._pkg_use_enabled(running_portage),
parent=running_portage, strict=False)
except portage.exception.InvalidDependString as e:
@@ -5474,18 +6497,15 @@ class depgraph(object):
for root in implicit_libc_roots:
libc_pkgs = set()
vardb = self._frozen_config.trees[root]["vartree"].dbapi
- graphdb = self._dynamic_config.mydbapi[root]
for atom in self._expand_virt_from_graph(root,
portage.const.LIBC_PACKAGE_ATOM):
if atom.blocker:
continue
- match = graphdb.match_pkgs(atom)
- if not match:
- continue
- pkg = match[-1]
- if pkg.operation == "merge" and \
- not vardb.cpv_exists(pkg.cpv):
- libc_pkgs.add(pkg)
+
+ for pkg in self._dynamic_config._package_tracker.match(root, atom):
+ if pkg.operation == "merge" and \
+ not vardb.cpv_exists(pkg.cpv):
+ libc_pkgs.add(pkg)
if libc_pkgs:
# If there's also an os-headers upgrade, we need to
@@ -5494,13 +6514,11 @@ class depgraph(object):
portage.const.OS_HEADERS_PACKAGE_ATOM):
if atom.blocker:
continue
- match = graphdb.match_pkgs(atom)
- if not match:
- continue
- pkg = match[-1]
- if pkg.operation == "merge" and \
- not vardb.cpv_exists(pkg.cpv):
- asap_nodes.append(pkg)
+
+ for pkg in self._dynamic_config._package_tracker.match(root, atom):
+ if pkg.operation == "merge" and \
+ not vardb.cpv_exists(pkg.cpv):
+ asap_nodes.append(pkg)
asap_nodes.extend(libc_pkgs)
@@ -5803,8 +6821,7 @@ class depgraph(object):
other_version = None
for pkg in vardb.match_pkgs(atom):
if pkg.cpv == task.cpv and \
- pkg.metadata["COUNTER"] == \
- task.metadata["COUNTER"]:
+ pkg.counter == task.counter:
continue
other_version = pkg
break
@@ -5843,13 +6860,12 @@ class depgraph(object):
# For packages in the world set, go ahead an uninstall
# when necessary, as long as the atom will be satisfied
# in the final state.
- graph_db = self._dynamic_config.mydbapi[task.root]
skip = False
try:
for atom in root_config.sets[
"selected"].iterAtomsForPackage(task):
satisfied = False
- for pkg in graph_db.match_pkgs(atom):
+ for pkg in self._dynamic_config._package_tracker.match(task.root, atom):
if pkg == inst_pkg:
continue
satisfied = True
@@ -5931,12 +6947,11 @@ class depgraph(object):
# node unnecessary (due to occupying the same SLOT),
# and we want to avoid executing a separate uninstall
# task in that case.
- slot_node = self._dynamic_config.mydbapi[uninst_task.root
- ].match_pkgs(uninst_task.slot_atom)
- if slot_node and \
- slot_node[0].operation == "merge":
- mygraph.add(slot_node[0], uninst_task,
- priority=BlockerDepPriority.instance)
+ for slot_node in self._dynamic_config._package_tracker.match(
+ uninst_task.root, uninst_task.slot_atom):
+ if slot_node.operation == "merge":
+ mygraph.add(slot_node, uninst_task,
+ priority=BlockerDepPriority.instance)
# Reset the state variables for leaf node selection and
# continue trying to select leaf nodes.
@@ -6011,7 +7026,7 @@ class depgraph(object):
inst_pkg = inst_pkg[0]
uninst_task = Package(built=inst_pkg.built,
cpv=inst_pkg.cpv, installed=inst_pkg.installed,
- metadata=inst_pkg.metadata,
+ metadata=inst_pkg._metadata,
operation="uninstall",
root_config=inst_pkg.root_config,
type_name=inst_pkg.type_name)
@@ -6083,17 +7098,22 @@ class depgraph(object):
for blocker in unsolvable_blockers:
retlist.append(blocker)
+ retlist = tuple(retlist)
+
if unsolvable_blockers and \
not self._accept_blocker_conflicts():
self._dynamic_config._unsatisfied_blockers_for_display = unsolvable_blockers
- self._dynamic_config._serialized_tasks_cache = retlist[:]
+ self._dynamic_config._serialized_tasks_cache = retlist
self._dynamic_config._scheduler_graph = scheduler_graph
- self._dynamic_config._skip_restart = True
+ # Blockers don't trigger the _skip_restart flag, since
+ # backtracking may solve blockers when it solves slot
+ # conflicts (or by blind luck).
raise self._unknown_internal_error()
- if self._dynamic_config._slot_collision_info and \
+ have_slot_conflict = any(self._dynamic_config._package_tracker.slot_conflicts())
+ if have_slot_conflict and \
not self._accept_blocker_conflicts():
- self._dynamic_config._serialized_tasks_cache = retlist[:]
+ self._dynamic_config._serialized_tasks_cache = retlist
self._dynamic_config._scheduler_graph = scheduler_graph
raise self._unknown_internal_error()
@@ -6147,13 +7167,8 @@ class depgraph(object):
def _show_merge_list(self):
if self._dynamic_config._serialized_tasks_cache is not None and \
not (self._dynamic_config._displayed_list is not None and \
- (self._dynamic_config._displayed_list == self._dynamic_config._serialized_tasks_cache or \
- self._dynamic_config._displayed_list == \
- list(reversed(self._dynamic_config._serialized_tasks_cache)))):
- display_list = self._dynamic_config._serialized_tasks_cache[:]
- if "--tree" in self._frozen_config.myopts:
- display_list.reverse()
- self.display(display_list)
+ self._dynamic_config._displayed_list is self._dynamic_config._serialized_tasks_cache):
+ self.display(self._dynamic_config._serialized_tasks_cache)
def _show_unsatisfied_blockers(self, blockers):
self._show_merge_list()
@@ -6175,6 +7190,18 @@ class depgraph(object):
for blocker in blockers:
for pkg in chain(self._dynamic_config._blocked_pkgs.child_nodes(blocker), \
self._dynamic_config._blocker_parents.parent_nodes(blocker)):
+
+ is_slot_conflict_pkg = False
+ for conflict in self._dynamic_config._package_tracker.slot_conflicts():
+ if conflict.root == pkg.root and conflict.atom == pkg.slot_atom:
+ is_slot_conflict_pkg = True
+ break
+ if is_slot_conflict_pkg:
+ # The slot conflict display has better noise reduction
+ # than the unsatisfied blockers display, so skip
+ # unsatisfied blockers display for packages involved
+ # directly in slot conflicts (see bug #385391).
+ continue
parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
if not parent_atoms:
atom = self._dynamic_config._blocked_world_pkgs.get(pkg)
@@ -6232,7 +7259,14 @@ class depgraph(object):
else:
# Display the specific atom from SetArg or
# Package types.
- msg.append("%s required by %s" % (atom, parent))
+ if atom != atom.unevaluated_atom:
+ # Show the unevaluated atom, since it can reveal
+ # issues with conditional use-flags missing
+ # from IUSE.
+ msg.append("%s (%s) required by %s" %
+ (atom.unevaluated_atom, atom, parent))
+ else:
+ msg.append("%s required by %s" % (atom, parent))
msg.append("\n")
msg.append("\n")
@@ -6248,6 +7282,10 @@ class depgraph(object):
# redundantly displaying this exact same merge list
# again via _show_merge_list().
self._dynamic_config._displayed_list = mylist
+
+ if "--tree" in self._frozen_config.myopts:
+ mylist = tuple(reversed(mylist))
+
display = Display()
return display(self, mylist, favorites, verbosity)
@@ -6320,7 +7358,7 @@ class depgraph(object):
if is_latest:
unstable_keyword_msg[root].append(">=%s %s\n" % (pkg.cpv, keyword))
elif is_latest_in_slot:
- unstable_keyword_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], keyword))
+ unstable_keyword_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.slot, keyword))
else:
unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
else:
@@ -6343,7 +7381,7 @@ class depgraph(object):
keyword = reason.unmask_hint.value
comment, filename = portage.getmaskingreason(
- pkg.cpv, metadata=pkg.metadata,
+ pkg.cpv, metadata=pkg._metadata,
settings=pkgsettings,
portdb=pkg.root_config.trees["porttree"].dbapi,
return_location=True)
@@ -6360,7 +7398,7 @@ class depgraph(object):
if is_latest:
p_mask_change_msg[root].append(">=%s\n" % pkg.cpv)
elif is_latest_in_slot:
- p_mask_change_msg[root].append(">=%s:%s\n" % (pkg.cpv, pkg.metadata["SLOT"]))
+ p_mask_change_msg[root].append(">=%s:%s\n" % (pkg.cpv, pkg.slot))
else:
p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
else:
@@ -6385,7 +7423,7 @@ class depgraph(object):
if is_latest:
use_changes_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
elif is_latest_in_slot:
- use_changes_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], " ".join(adjustments)))
+ use_changes_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.slot, " ".join(adjustments)))
else:
use_changes_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
@@ -6402,7 +7440,7 @@ class depgraph(object):
if is_latest:
license_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
elif is_latest_in_slot:
- license_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], " ".join(sorted(missing_licenses))))
+ license_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.slot, " ".join(sorted(missing_licenses))))
else:
license_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
@@ -6442,7 +7480,7 @@ class depgraph(object):
if stat.S_ISREG(st.st_mode):
last_file_path = p
elif stat.S_ISDIR(st.st_mode):
- if os.path.basename(p) in _ignorecvs_dirs:
+ if os.path.basename(p) in VCS_DIRS:
continue
try:
contents = os.listdir(p)
@@ -6511,24 +7549,25 @@ class depgraph(object):
if len(roots) > 1:
writemsg("\nFor %s:\n" % abs_user_config, noiselevel=-1)
+ def _writemsg(reason, file):
+ writemsg(('\nThe following %s are necessary to proceed:\n'
+ ' (see "%s" in the portage(5) man page for more details)\n')
+ % (colorize('BAD', reason), file), noiselevel=-1)
+
if root in unstable_keyword_msg:
- writemsg("\nThe following " + colorize("BAD", "keyword changes") + \
- " are necessary to proceed:\n", noiselevel=-1)
+ _writemsg('keyword changes', 'package.accept_keywords')
writemsg(format_msg(unstable_keyword_msg[root]), noiselevel=-1)
if root in p_mask_change_msg:
- writemsg("\nThe following " + colorize("BAD", "mask changes") + \
- " are necessary to proceed:\n", noiselevel=-1)
+ _writemsg('mask changes', 'package.unmask')
writemsg(format_msg(p_mask_change_msg[root]), noiselevel=-1)
if root in use_changes_msg:
- writemsg("\nThe following " + colorize("BAD", "USE changes") + \
- " are necessary to proceed:\n", noiselevel=-1)
+ _writemsg('USE changes', 'package.use')
writemsg(format_msg(use_changes_msg[root]), noiselevel=-1)
if root in license_msg:
- writemsg("\nThe following " + colorize("BAD", "license changes") + \
- " are necessary to proceed:\n", noiselevel=-1)
+ _writemsg('license changes', 'package.license')
writemsg(format_msg(license_msg[root]), noiselevel=-1)
protect_obj = {}
@@ -6542,11 +7581,12 @@ class depgraph(object):
def write_changes(root, changes, file_to_write_to):
file_contents = None
try:
- file_contents = io.open(
+ with io.open(
_unicode_encode(file_to_write_to,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['content'],
- errors='replace').readlines()
+ errors='replace') as f:
+ file_contents = f.readlines()
except IOError as e:
if e.errno == errno.ENOENT:
file_contents = []
@@ -6612,10 +7652,16 @@ class depgraph(object):
noiselevel=-1)
writemsg("".join(problems), noiselevel=-1)
elif write_to_file and roots:
- writemsg("\nAutounmask changes successfully written. Remember to run dispatch-conf.\n", \
+ writemsg("\nAutounmask changes successfully written.\n",
noiselevel=-1)
+ for root in roots:
+ chk_updated_cfg_files(root,
+ [os.path.join(os.sep, USER_CONFIG_PATH)])
elif not pretend and not autounmask_write and roots:
- writemsg("\nUse --autounmask-write to write changes to config files (honoring CONFIG_PROTECT).\n", \
+ writemsg("\nUse --autounmask-write to write changes to config files (honoring\n"
+ "CONFIG_PROTECT). Carefully examine the list of proposed changes,\n"
+ "paying special attention to mask or keyword changes that may expose\n"
+ "experimental or unstable packages.\n",
noiselevel=-1)
@@ -6632,21 +7678,35 @@ class depgraph(object):
self._show_circular_deps(
self._dynamic_config._circular_deps_for_display)
- # The slot conflict display has better noise reduction than
- # the unsatisfied blockers display, so skip unsatisfied blockers
- # display if there are slot conflicts (see bug #385391).
- if self._dynamic_config._slot_collision_info:
+ unresolved_conflicts = False
+ have_slot_conflict = any(self._dynamic_config._package_tracker.slot_conflicts())
+ if have_slot_conflict:
+ unresolved_conflicts = True
self._show_slot_collision_notice()
- elif self._dynamic_config._unsatisfied_blockers_for_display is not None:
+ if self._dynamic_config._unsatisfied_blockers_for_display is not None:
+ unresolved_conflicts = True
self._show_unsatisfied_blockers(
self._dynamic_config._unsatisfied_blockers_for_display)
- else:
+
+ # Only show missed updates if there are no unresolved conflicts,
+ # since they may be irrelevant after the conflicts are solved.
+ if not unresolved_conflicts:
self._show_missed_update()
+ if self._frozen_config.myopts.get("--verbose-slot-rebuilds", 'y') != 'n':
+ self._compute_abi_rebuild_info()
+ self._show_abi_rebuild_info()
+
self._show_ignored_binaries()
self._display_autounmask()
+ for depgraph_sets in self._dynamic_config.sets.values():
+ for pset in depgraph_sets.sets.values():
+ for error_msg in pset.errors:
+ writemsg_level("%s\n" % (error_msg,),
+ level=logging.ERROR, noiselevel=-1)
+
# TODO: Add generic support for "set problem" handlers so that
# the below warnings aren't special cases for world only.
@@ -6722,7 +7782,7 @@ class depgraph(object):
pkgsettings = self._frozen_config.pkgsettings[pkg.root]
mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
masked_packages.append((root_config, pkgsettings,
- pkg.cpv, pkg.repo, pkg.metadata, mreasons))
+ pkg.cpv, pkg.repo, pkg._metadata, mreasons))
if masked_packages:
writemsg("\n" + colorize("BAD", "!!!") + \
" The following updates are masked by LICENSE changes:\n",
@@ -6737,7 +7797,7 @@ class depgraph(object):
pkgsettings = self._frozen_config.pkgsettings[pkg.root]
mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled)
masked_packages.append((root_config, pkgsettings,
- pkg.cpv, pkg.repo, pkg.metadata, mreasons))
+ pkg.cpv, pkg.repo, pkg._metadata, mreasons))
if masked_packages:
writemsg("\n" + colorize("BAD", "!!!") + \
" The following installed packages are masked:\n",
@@ -6747,7 +7807,15 @@ class depgraph(object):
writemsg("\n", noiselevel=-1)
for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
- self._show_unsatisfied_dep(*pargs, **kwargs)
+ self._show_unsatisfied_dep(*pargs,
+ **portage._native_kwargs(kwargs))
+
+ if self._dynamic_config._buildpkgonly_deps_unsatisfied:
+ self._show_merge_list()
+ writemsg("\n!!! --buildpkgonly requires all "
+ "dependencies to be merged.\n", noiselevel=-1)
+ writemsg("!!! Cannot merge requested packages. "
+ "Merge deps and try again.\n\n", noiselevel=-1)
def saveNomergeFavorites(self):
"""Find atoms in favorites that are not in the mergelist and add them
@@ -6808,16 +7876,31 @@ class depgraph(object):
all_added.append(SETPREFIX + k)
all_added.extend(added_favorites)
all_added.sort()
- for a in all_added:
- if a.startswith(SETPREFIX):
- filename = "world_sets"
- else:
- filename = "world"
- writemsg_stdout(
- ">>> Recording %s in \"%s\" favorites file...\n" %
- (colorize("INFORM", _unicode(a)), filename), noiselevel=-1)
if all_added:
- world_set.update(all_added)
+ skip = False
+ if "--ask" in self._frozen_config.myopts:
+ writemsg_stdout("\n", noiselevel=-1)
+ for a in all_added:
+ writemsg_stdout(" %s %s\n" % (colorize("GOOD", "*"), a),
+ noiselevel=-1)
+ writemsg_stdout("\n", noiselevel=-1)
+ prompt = "Would you like to add these packages to your world " \
+ "favorites?"
+ enter_invalid = '--ask-enter-invalid' in \
+ self._frozen_config.myopts
+ if userquery(prompt, enter_invalid) == "No":
+ skip = True
+
+ if not skip:
+ for a in all_added:
+ if a.startswith(SETPREFIX):
+ filename = "world_sets"
+ else:
+ filename = "world"
+ writemsg_stdout(
+ ">>> Recording %s in \"%s\" favorites file...\n" %
+ (colorize("INFORM", _unicode(a)), filename), noiselevel=-1)
+ world_set.update(all_added)
if world_locked:
world_set.unlock()
@@ -6844,7 +7927,6 @@ class depgraph(object):
else:
args = []
- fakedb = self._dynamic_config.mydbapi
serialized_tasks = []
masked_tasks = []
for x in mergelist:
@@ -6902,7 +7984,7 @@ class depgraph(object):
self._dynamic_config._unsatisfied_deps_for_display.append(
((pkg.root, "="+pkg.cpv), {"myparent":None}))
- fakedb[myroot].cpv_inject(pkg)
+ self._dynamic_config._package_tracker.add_pkg(pkg)
serialized_tasks.append(pkg)
self._spinner_update()
@@ -7092,14 +8174,15 @@ class depgraph(object):
try:
for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
self._show_unsatisfied_dep(
- *pargs, check_autounmask_breakage=True, **kwargs)
+ *pargs, check_autounmask_breakage=True,
+ **portage._native_kwargs(kwargs))
except self._autounmask_breakage:
return True
return False
def get_backtrack_infos(self):
return self._dynamic_config._backtrack_infos
-
+
class _dep_check_composite_db(dbapi):
"""
@@ -7214,8 +8297,9 @@ class _dep_check_composite_db(dbapi):
elif not self._depgraph._equiv_ebuild_visible(pkg):
return False
- in_graph = self._depgraph._dynamic_config._slot_pkg_map[
- self._root].get(pkg.slot_atom)
+ in_graph = next(self._depgraph._dynamic_config._package_tracker.match(
+ self._root, pkg.slot_atom, installed=False), None)
+
if in_graph is None:
# Mask choices for packages which are not the highest visible
# version within their slot (since they usually trigger slot
@@ -7234,7 +8318,7 @@ class _dep_check_composite_db(dbapi):
return True
def aux_get(self, cpv, wants):
- metadata = self._cpv_pkg_map[cpv].metadata
+ metadata = self._cpv_pkg_map[cpv]._metadata
return [metadata.get(x, "") for x in wants]
def match_pkgs(self, atom):
@@ -7308,14 +8392,14 @@ def _spinner_stop(spinner):
portage.writemsg_stdout("... done!\n")
-def backtrack_depgraph(settings, trees, myopts, myparams,
+def backtrack_depgraph(settings, trees, myopts, myparams,
myaction, myfiles, spinner):
"""
Raises PackageSetNotFound if myfiles contains a missing package set.
"""
_spinner_start(spinner, myopts)
try:
- return _backtrack_depgraph(settings, trees, myopts, myparams,
+ return _backtrack_depgraph(settings, trees, myopts, myparams,
myaction, myfiles, spinner)
finally:
_spinner_stop(spinner)
@@ -7412,7 +8496,7 @@ def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
skip_masked = True
skip_unsatisfied = True
mergelist = mtimedb["resume"]["mergelist"]
- dropped_tasks = set()
+ dropped_tasks = {}
frozen_config = _frozen_depgraph_config(settings, trees,
myopts, spinner)
while True:
@@ -7426,12 +8510,21 @@ def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
raise
graph = mydepgraph._dynamic_config.digraph
- unsatisfied_parents = dict((dep.parent, dep.parent) \
- for dep in e.value)
+ unsatisfied_parents = {}
traversed_nodes = set()
- unsatisfied_stack = list(unsatisfied_parents)
+ unsatisfied_stack = [(dep.parent, dep.atom) for dep in e.value]
while unsatisfied_stack:
- pkg = unsatisfied_stack.pop()
+ pkg, atom = unsatisfied_stack.pop()
+ if atom is not None and \
+ mydepgraph._select_pkg_from_installed(
+ pkg.root, atom)[0] is not None:
+ continue
+ atoms = unsatisfied_parents.get(pkg)
+ if atoms is None:
+ atoms = []
+ unsatisfied_parents[pkg] = atoms
+ if atom is not None:
+ atoms.append(atom)
if pkg in traversed_nodes:
continue
traversed_nodes.add(pkg)
@@ -7440,7 +8533,8 @@ def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
# package scheduled for merge, removing this
# package may cause the the parent package's
# dependency to become unsatisfied.
- for parent_node in graph.parent_nodes(pkg):
+ for parent_node, atom in \
+ mydepgraph._dynamic_config._parent_atoms.get(pkg, []):
if not isinstance(parent_node, Package) \
or parent_node.operation not in ("merge", "nomerge"):
continue
@@ -7448,8 +8542,7 @@ def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
# ensure that a package with an unsatisfied depenedency
# won't get pulled in, even indirectly via a soft
# dependency.
- unsatisfied_parents[parent_node] = parent_node
- unsatisfied_stack.append(parent_node)
+ unsatisfied_stack.append((parent_node, atom))
unsatisfied_tuples = frozenset(tuple(parent_node)
for parent_node in unsatisfied_parents
@@ -7470,8 +8563,8 @@ def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
# Exclude installed packages that have been removed from the graph due
# to failure to build/install runtime dependencies after the dependent
# package has already been installed.
- dropped_tasks.update(pkg for pkg in \
- unsatisfied_parents if pkg.operation != "nomerge")
+ dropped_tasks.update((pkg, atoms) for pkg, atoms in \
+ unsatisfied_parents.items() if pkg.operation != "nomerge")
del e, graph, traversed_nodes, \
unsatisfied_parents, unsatisfied_stack
@@ -7557,9 +8650,11 @@ def show_masked_packages(masked_packages):
shown_comments.add(comment)
portdb = root_config.trees["porttree"].dbapi
for l in missing_licenses:
- l_path = portdb.findLicensePath(l)
if l in shown_licenses:
continue
+ l_path = portdb.findLicensePath(l)
+ if l_path is None:
+ continue
msg = ("A copy of the '%s' license" + \
" is located at '%s'.\n\n") % (l, l_path)
writemsg(msg, noiselevel=-1)
@@ -7586,9 +8681,9 @@ def _get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
portdb=root_config.trees["porttree"].dbapi, myrepo=myrepo)
if not pkg.installed:
- if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
+ if not pkgsettings._accept_chost(pkg.cpv, pkg._metadata):
mreasons.append(_MaskReason("CHOST", "CHOST: %s" % \
- pkg.metadata["CHOST"]))
+ pkg._metadata["CHOST"]))
if pkg.invalid:
for msgs in pkg.invalid.values():
@@ -7596,7 +8691,7 @@ def _get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
mreasons.append(
_MaskReason("invalid", "invalid: %s" % (msg,)))
- if not pkg.metadata["SLOT"]:
+ if not pkg._metadata["SLOT"]:
mreasons.append(
_MaskReason("invalid", "SLOT: undefined"))
diff --git a/pym/_emerge/emergelog.py b/pym/_emerge/emergelog.py
index b1b093f52..aea94f74e 100644
--- a/pym/_emerge/emergelog.py
+++ b/pym/_emerge/emergelog.py
@@ -1,7 +1,7 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-from __future__ import print_function
+from __future__ import unicode_literals
import io
import sys
@@ -20,10 +20,6 @@ from portage.output import xtermTitle
_disable = True
_emerge_log_dir = '/var/log'
-# Coerce to unicode, in order to prevent TypeError when writing
-# raw bytes to TextIOWrapper with python2.
-_log_fmt = _unicode_decode("%.0f: %s\n")
-
def emergelog(xterm_titles, mystr, short_msg=None):
if _disable:
@@ -51,10 +47,10 @@ def emergelog(xterm_titles, mystr, short_msg=None):
mode=0o660)
mylock = portage.locks.lockfile(file_path)
try:
- mylogfile.write(_log_fmt % (time.time(), mystr))
+ mylogfile.write("%.0f: %s\n" % (time.time(), mystr))
mylogfile.close()
finally:
portage.locks.unlockfile(mylock)
except (IOError,OSError,portage.exception.PortageException) as e:
if secpass >= 1:
- print("emergelog():",e, file=sys.stderr)
+ portage.util.writemsg("emergelog(): %s\n" % (e,), noiselevel=-1)
diff --git a/pym/_emerge/getloadavg.py b/pym/_emerge/getloadavg.py
index e9babf13e..6a2794fb1 100644
--- a/pym/_emerge/getloadavg.py
+++ b/pym/_emerge/getloadavg.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2009 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage import os
@@ -11,7 +11,8 @@ if getloadavg is None:
Raises OSError if the load average was unobtainable.
"""
try:
- loadavg_str = open('/proc/loadavg').readline()
+ with open('/proc/loadavg') as f:
+ loadavg_str = f.readline()
except IOError:
# getloadavg() is only supposed to raise OSError, so convert
raise OSError('unknown')
diff --git a/pym/_emerge/help.py b/pym/_emerge/help.py
index a1dbb37cc..8e241a85c 100644
--- a/pym/_emerge/help.py
+++ b/pym/_emerge/help.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -9,15 +9,15 @@ def help():
print(bold("emerge:")+" the other white meat (command-line interface to the Portage system)")
print(bold("Usage:"))
print(" "+turquoise("emerge")+" [ "+green("options")+" ] [ "+green("action")+" ] [ "+turquoise("ebuild")+" | "+turquoise("tbz2")+" | "+turquoise("file")+" | "+turquoise("@set")+" | "+turquoise("atom")+" ] [ ... ]")
- print(" "+turquoise("emerge")+" [ "+green("options")+" ] [ "+green("action")+" ] < "+turquoise("system")+" | "+turquoise("world")+" >")
+ print(" "+turquoise("emerge")+" [ "+green("options")+" ] [ "+green("action")+" ] < "+turquoise("@system")+" | "+turquoise("@world")+" >")
print(" "+turquoise("emerge")+" < "+turquoise("--sync")+" | "+turquoise("--metadata")+" | "+turquoise("--info")+" >")
print(" "+turquoise("emerge")+" "+turquoise("--resume")+" [ "+green("--pretend")+" | "+green("--ask")+" | "+green("--skipfirst")+" ]")
- print(" "+turquoise("emerge")+" "+turquoise("--help")+" [ "+green("--verbose")+" ] ")
- print(bold("Options:")+" "+green("-")+"["+green("abBcCdDefgGhjkKlnNoOpPqrsStuvV")+"]")
+ print(" "+turquoise("emerge")+" "+turquoise("--help"))
+ print(bold("Options:")+" "+green("-")+"["+green("abBcCdDefgGhjkKlnNoOpPqrsStuvVw")+"]")
print(" [ " + green("--color")+" < " + turquoise("y") + " | "+ turquoise("n")+" > ] [ "+green("--columns")+" ]")
print(" [ "+green("--complete-graph")+" ] [ "+green("--deep")+" ]")
print(" [ "+green("--jobs") + " " + turquoise("JOBS")+" ] [ "+green("--keep-going")+" ] [ " + green("--load-average")+" " + turquoise("LOAD") + " ]")
- print(" [ "+green("--newuse")+" ] [ "+green("--noconfmem")+" ] [ "+green("--nospinner")+" ]")
+ print(" [ "+green("--newrepo")+" ] [ "+green("--newuse")+" ] [ "+green("--noconfmem")+" ] [ "+green("--nospinner")+" ]")
print(" [ "+green("--oneshot")+" ] [ "+green("--onlydeps")+" ] [ "+ green("--quiet-build")+" [ " + turquoise("y") + " | "+ turquoise("n")+" ] ]")
print(" [ "+green("--reinstall ")+turquoise("changed-use")+" ] [ " + green("--with-bdeps")+" < " + turquoise("y") + " | "+ turquoise("n")+" > ]")
print(bold("Actions:")+" [ "+green("--depclean")+" | "+green("--list-sets")+" | "+green("--search")+" | "+green("--sync")+" | "+green("--version")+" ]")
diff --git a/pym/_emerge/is_valid_package_atom.py b/pym/_emerge/is_valid_package_atom.py
index 7cb2a5bb1..112afc1ec 100644
--- a/pym/_emerge/is_valid_package_atom.py
+++ b/pym/_emerge/is_valid_package_atom.py
@@ -1,11 +1,12 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import re
from portage.dep import isvalidatom
def insert_category_into_atom(atom, category):
- alphanum = re.search(r'\w', atom)
+ # Handle '*' character for "extended syntax" wildcard support.
+ alphanum = re.search(r'[\*\w]', atom, re.UNICODE)
if alphanum:
ret = atom[:alphanum.start()] + "%s/" % category + \
atom[alphanum.start():]
@@ -14,7 +15,7 @@ def insert_category_into_atom(atom, category):
return ret
def is_valid_package_atom(x, allow_repo=False):
- if "/" not in x:
+ if "/" not in x.split(":")[0]:
x2 = insert_category_into_atom(x, 'cat')
if x2 != None:
x = x2
diff --git a/pym/_emerge/main.py b/pym/_emerge/main.py
index f19994c46..cfe133264 100644
--- a/pym/_emerge/main.py
+++ b/pym/_emerge/main.py
@@ -1,53 +1,24 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
-import logging
-import signal
-import stat
-import subprocess
-import sys
-import textwrap
import platform
+import sys
+
import portage
portage.proxy.lazyimport.lazyimport(globals(),
- 'portage.news:count_unread_news,display_news_notifications',
- 'portage.emaint.modules.logs.logs:CleanLogs',
+ 'logging',
+ 'portage.dep:Atom',
+ 'portage.util:writemsg_level',
+ 'textwrap',
+ '_emerge.actions:load_emerge_config,run_action,' + \
+ 'validate_ebuild_environment',
+ '_emerge.help:help@emerge_help',
+ '_emerge.is_valid_package_atom:insert_category_into_atom'
)
from portage import os
-from portage import _encodings
-from portage import _unicode_decode
-import _emerge.help
-import portage.xpak, errno, re, time
-from portage.output import colorize, xtermTitle, xtermTitleReset
-from portage.output import create_color_func
-good = create_color_func("GOOD")
-bad = create_color_func("BAD")
-
-from portage.const import _ENABLE_DYN_LINK_MAP
-import portage.elog
-import portage.util
-import portage.locks
-import portage.exception
-from portage.data import secpass
-from portage.dbapi.dep_expand import dep_expand
-from portage.util import normalize_path as normpath
-from portage.util import (shlex_split, varexpand,
- writemsg_level, writemsg_stdout)
-from portage._sets import SETPREFIX
-from portage._global_updates import _global_updates
-
-from _emerge.actions import action_config, action_sync, action_metadata, \
- action_regen, action_search, action_uninstall, action_info, action_build, \
- adjust_configs, chk_updated_cfg_files, display_missing_pkg_set, \
- display_news_notification, getportageversion, load_emerge_config
-import _emerge
-from _emerge.emergelog import emergelog
-from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo
-from _emerge.is_valid_package_atom import is_valid_package_atom
-from _emerge.stdout_spinner import stdout_spinner
-from _emerge.userquery import userquery
+from portage.util._argparse import ArgumentParser
if sys.hexversion >= 0x3000000:
long = int
@@ -61,9 +32,11 @@ options=[
"--debug",
"--digest",
"--emptytree",
+"--verbose-conflicts",
"--fetchonly", "--fetch-all-uri",
"--ignore-default-opts",
"--noconfmem",
+"--newrepo",
"--newuse",
"--nodeps", "--noreplace",
"--nospinner", "--oneshot",
@@ -76,7 +49,6 @@ options=[
"--tree",
"--unordered-display",
"--update",
-"--verbose",
"--verbose-main-repo-display",
]
@@ -97,7 +69,7 @@ shortmapping={
"s":"--search", "S":"--searchdesc",
"t":"--tree",
"u":"--update",
-"v":"--verbose", "V":"--version"
+"V":"--version"
}
COWSAY_MOO = """
@@ -109,331 +81,12 @@ COWSAY_MOO = """
-----------------------
\ ^__^
\ (oo)\_______
- (__)\ )\/\
+ (__)\ )\/\\
||----w |
|| ||
"""
-def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
-
- if os.path.exists("/usr/bin/install-info"):
- out = portage.output.EOutput()
- regen_infodirs=[]
- for z in infodirs:
- if z=='':
- continue
- inforoot=normpath(root+z)
- if os.path.isdir(inforoot) and \
- not [x for x in os.listdir(inforoot) \
- if x.startswith('.keepinfodir')]:
- infomtime = os.stat(inforoot)[stat.ST_MTIME]
- if inforoot not in prev_mtimes or \
- prev_mtimes[inforoot] != infomtime:
- regen_infodirs.append(inforoot)
-
- if not regen_infodirs:
- portage.writemsg_stdout("\n")
- if portage.util.noiselimit >= 0:
- out.einfo("GNU info directory index is up-to-date.")
- else:
- portage.writemsg_stdout("\n")
- if portage.util.noiselimit >= 0:
- out.einfo("Regenerating GNU info directory index...")
-
- dir_extensions = ("", ".gz", ".bz2")
- icount=0
- badcount=0
- errmsg = ""
- for inforoot in regen_infodirs:
- if inforoot=='':
- continue
-
- if not os.path.isdir(inforoot) or \
- not os.access(inforoot, os.W_OK):
- continue
-
- file_list = os.listdir(inforoot)
- file_list.sort()
- dir_file = os.path.join(inforoot, "dir")
- moved_old_dir = False
- processed_count = 0
- for x in file_list:
- if x.startswith(".") or \
- os.path.isdir(os.path.join(inforoot, x)):
- continue
- if x.startswith("dir"):
- skip = False
- for ext in dir_extensions:
- if x == "dir" + ext or \
- x == "dir" + ext + ".old":
- skip = True
- break
- if skip:
- continue
- if processed_count == 0:
- for ext in dir_extensions:
- try:
- os.rename(dir_file + ext, dir_file + ext + ".old")
- moved_old_dir = True
- except EnvironmentError as e:
- if e.errno != errno.ENOENT:
- raise
- del e
- processed_count += 1
- try:
- proc = subprocess.Popen(
- ['/usr/bin/install-info',
- '--dir-file=%s' % os.path.join(inforoot, "dir"),
- os.path.join(inforoot, x)],
- env=dict(os.environ, LANG="C", LANGUAGE="C"),
- stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- except OSError:
- myso = None
- else:
- myso = _unicode_decode(
- proc.communicate()[0]).rstrip("\n")
- proc.wait()
- existsstr="already exists, for file `"
- if myso:
- if re.search(existsstr,myso):
- # Already exists... Don't increment the count for this.
- pass
- elif myso[:44]=="install-info: warning: no info dir entry in ":
- # This info file doesn't contain a DIR-header: install-info produces this
- # (harmless) warning (the --quiet switch doesn't seem to work).
- # Don't increment the count for this.
- pass
- else:
- badcount=badcount+1
- errmsg += myso + "\n"
- icount=icount+1
-
- if moved_old_dir and not os.path.exists(dir_file):
- # We didn't generate a new dir file, so put the old file
- # back where it was originally found.
- for ext in dir_extensions:
- try:
- os.rename(dir_file + ext + ".old", dir_file + ext)
- except EnvironmentError as e:
- if e.errno != errno.ENOENT:
- raise
- del e
-
- # Clean dir.old cruft so that they don't prevent
- # unmerge of otherwise empty directories.
- for ext in dir_extensions:
- try:
- os.unlink(dir_file + ext + ".old")
- except EnvironmentError as e:
- if e.errno != errno.ENOENT:
- raise
- del e
-
- #update mtime so we can potentially avoid regenerating.
- prev_mtimes[inforoot] = os.stat(inforoot)[stat.ST_MTIME]
-
- if badcount:
- out.eerror("Processed %d info files; %d errors." % \
- (icount, badcount))
- writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
- else:
- if icount > 0 and portage.util.noiselimit >= 0:
- out.einfo("Processed %d info files." % (icount,))
-
-def display_preserved_libs(vardbapi, myopts):
- MAX_DISPLAY = 3
-
- if vardbapi._linkmap is None or \
- vardbapi._plib_registry is None:
- # preserve-libs is entirely disabled
- return
-
- # Explicitly load and prune the PreservedLibsRegistry in order
- # to ensure that we do not display stale data.
- vardbapi._plib_registry.load()
-
- if vardbapi._plib_registry.hasEntries():
- if "--quiet" in myopts:
- print()
- print(colorize("WARN", "!!!") + " existing preserved libs found")
- return
- else:
- print()
- print(colorize("WARN", "!!!") + " existing preserved libs:")
-
- plibdata = vardbapi._plib_registry.getPreservedLibs()
- linkmap = vardbapi._linkmap
- consumer_map = {}
- owners = {}
-
- try:
- linkmap.rebuild()
- except portage.exception.CommandNotFound as e:
- writemsg_level("!!! Command Not Found: %s\n" % (e,),
- level=logging.ERROR, noiselevel=-1)
- del e
- else:
- search_for_owners = set()
- for cpv in plibdata:
- internal_plib_keys = set(linkmap._obj_key(f) \
- for f in plibdata[cpv])
- for f in plibdata[cpv]:
- if f in consumer_map:
- continue
- consumers = []
- for c in linkmap.findConsumers(f):
- # Filter out any consumers that are also preserved libs
- # belonging to the same package as the provider.
- if linkmap._obj_key(c) not in internal_plib_keys:
- consumers.append(c)
- consumers.sort()
- consumer_map[f] = consumers
- search_for_owners.update(consumers[:MAX_DISPLAY+1])
-
- owners = {}
- for f in search_for_owners:
- owner_set = set()
- for owner in linkmap.getOwners(f):
- owner_dblink = vardbapi._dblink(owner)
- if owner_dblink.exists():
- owner_set.add(owner_dblink)
- if owner_set:
- owners[f] = owner_set
-
- for cpv in plibdata:
- print(colorize("WARN", ">>>") + " package: %s" % cpv)
- samefile_map = {}
- for f in plibdata[cpv]:
- obj_key = linkmap._obj_key(f)
- alt_paths = samefile_map.get(obj_key)
- if alt_paths is None:
- alt_paths = set()
- samefile_map[obj_key] = alt_paths
- alt_paths.add(f)
-
- for alt_paths in samefile_map.values():
- alt_paths = sorted(alt_paths)
- for p in alt_paths:
- print(colorize("WARN", " * ") + " - %s" % (p,))
- f = alt_paths[0]
- consumers = consumer_map.get(f, [])
- for c in consumers[:MAX_DISPLAY]:
- print(colorize("WARN", " * ") + " used by %s (%s)" % \
- (c, ", ".join(x.mycpv for x in owners.get(c, []))))
- if len(consumers) == MAX_DISPLAY + 1:
- print(colorize("WARN", " * ") + " used by %s (%s)" % \
- (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
- for x in owners.get(consumers[MAX_DISPLAY], []))))
- elif len(consumers) > MAX_DISPLAY:
- print(colorize("WARN", " * ") + " used by %d other files" % (len(consumers) - MAX_DISPLAY))
- print("Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries")
-
-def post_emerge(myaction, myopts, myfiles,
- target_root, trees, mtimedb, retval):
- """
- Misc. things to run at the end of a merge session.
-
- Update Info Files
- Update Config Files
- Update News Items
- Commit mtimeDB
- Display preserved libs warnings
-
- @param myaction: The action returned from parse_opts()
- @type myaction: String
- @param myopts: emerge options
- @type myopts: dict
- @param myfiles: emerge arguments
- @type myfiles: list
- @param target_root: The target EROOT for myaction
- @type target_root: String
- @param trees: A dictionary mapping each ROOT to it's package databases
- @type trees: dict
- @param mtimedb: The mtimeDB to store data needed across merge invocations
- @type mtimedb: MtimeDB class instance
- @param retval: Emerge's return value
- @type retval: Int
- """
-
- root_config = trees[target_root]["root_config"]
- vardbapi = trees[target_root]['vartree'].dbapi
- settings = vardbapi.settings
- info_mtimes = mtimedb["info"]
-
- # Load the most current variables from ${ROOT}/etc/profile.env
- settings.unlock()
- settings.reload()
- settings.regenerate()
- settings.lock()
-
- config_protect = shlex_split(settings.get("CONFIG_PROTECT", ""))
- infodirs = settings.get("INFOPATH","").split(":") + \
- settings.get("INFODIR","").split(":")
-
- os.chdir("/")
-
- if retval == os.EX_OK:
- exit_msg = " *** exiting successfully."
- else:
- exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
- emergelog("notitles" not in settings.features, exit_msg)
-
- _flush_elog_mod_echo()
-
- if not vardbapi._pkgs_changed:
- # GLEP 42 says to display news *after* an emerge --pretend
- if "--pretend" in myopts:
- display_news_notification(root_config, myopts)
- # If vdb state has not changed then there's nothing else to do.
- return
-
- vdb_path = os.path.join(root_config.settings['EROOT'], portage.VDB_PATH)
- portage.util.ensure_dirs(vdb_path)
- vdb_lock = None
- if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
- vardbapi.lock()
- vdb_lock = True
-
- if vdb_lock:
- try:
- if "noinfo" not in settings.features:
- chk_updated_info_files(target_root,
- infodirs, info_mtimes, retval)
- mtimedb.commit()
- finally:
- if vdb_lock:
- vardbapi.unlock()
-
- display_preserved_libs(vardbapi, myopts)
- chk_updated_cfg_files(settings['EROOT'], config_protect)
-
- display_news_notification(root_config, myopts)
-
- postemerge = os.path.join(settings["PORTAGE_CONFIGROOT"],
- portage.USER_CONFIG_PATH, "bin", "post_emerge")
- if os.access(postemerge, os.X_OK):
- hook_retval = portage.process.spawn(
- [postemerge], env=settings.environ())
- if hook_retval != os.EX_OK:
- writemsg_level(
- " %s spawn failed of %s\n" % (bad("*"), postemerge,),
- level=logging.ERROR, noiselevel=-1)
-
- clean_logs(settings)
-
- if "--quiet" not in myopts and \
- myaction is None and "@world" in myfiles:
- show_depclean_suggestion()
-
-def show_depclean_suggestion():
- out = portage.output.EOutput()
- msg = "After world updates, it is important to remove " + \
- "obsolete packages with emerge --depclean. Refer " + \
- "to `man emerge` for more information."
- for line in textwrap.wrap(msg, 72):
- out.ewarn(line)
-
def multiple_actions(action1, action2):
sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
@@ -455,6 +108,16 @@ def insert_optional_args(args):
return False
valid_integers = valid_integers()
+
+ class valid_floats(object):
+ def __contains__(self, s):
+ try:
+ return float(s) >= 0
+ except (ValueError, OverflowError):
+ return False
+
+ valid_floats = valid_floats()
+
y_or_n = ('y', 'n',)
new_args = []
@@ -468,6 +131,7 @@ def insert_optional_args(args):
'--buildpkg' : y_or_n,
'--complete-graph' : y_or_n,
'--deep' : valid_integers,
+ '--depclean-lib-check' : y_or_n,
'--deselect' : y_or_n,
'--binpkg-respect-use' : y_or_n,
'--fail-clean' : y_or_n,
@@ -475,10 +139,12 @@ def insert_optional_args(args):
'--getbinpkgonly' : y_or_n,
'--jobs' : valid_integers,
'--keep-going' : y_or_n,
+ '--load-average' : valid_floats,
'--package-moves' : y_or_n,
'--quiet' : y_or_n,
'--quiet-build' : y_or_n,
- '--rebuild-if-new-slot-abi': y_or_n,
+ '--quiet-fail' : y_or_n,
+ '--rebuild-if-new-slot': y_or_n,
'--rebuild-if-new-rev' : y_or_n,
'--rebuild-if-new-ver' : y_or_n,
'--rebuild-if-unbuilt' : y_or_n,
@@ -489,11 +155,10 @@ def insert_optional_args(args):
"--use-ebuild-visibility": y_or_n,
'--usepkg' : y_or_n,
'--usepkgonly' : y_or_n,
+ '--verbose' : y_or_n,
+ '--verbose-slot-rebuilds': y_or_n,
}
- if _ENABLE_DYN_LINK_MAP:
- default_arg_opts['--depclean-lib-check'] = y_or_n
-
short_arg_opts = {
'D' : valid_integers,
'j' : valid_integers,
@@ -509,6 +174,8 @@ def insert_optional_args(args):
'k' : y_or_n,
'K' : y_or_n,
'q' : y_or_n,
+ 'v' : y_or_n,
+ 'w' : y_or_n,
}
arg_stack = args[:]
@@ -597,14 +264,17 @@ def _find_bad_atoms(atoms, less_strict=False):
"""
bad_atoms = []
for x in ' '.join(atoms).split():
+ atom = x
+ if "/" not in x.split(":")[0]:
+ x_cat = insert_category_into_atom(x, 'dummy-category')
+ if x_cat is not None:
+ atom = x_cat
+
bad_atom = False
try:
- atom = portage.dep.Atom(x, allow_wildcard=True, allow_repo=less_strict)
+ atom = Atom(atom, allow_wildcard=True, allow_repo=less_strict)
except portage.exception.InvalidAtom:
- try:
- atom = portage.dep.Atom("*/"+x, allow_wildcard=True, allow_repo=less_strict)
- except portage.exception.InvalidAtom:
- bad_atom = True
+ bad_atom = True
if bad_atom or (atom.operator and not less_strict) or atom.blocker or atom.use:
bad_atoms.append(x)
@@ -632,31 +302,26 @@ def parse_opts(tmpcmdline, silent=False):
"--ask": {
"shortopt" : "-a",
"help" : "prompt before performing any actions",
- "type" : "choice",
"choices" : true_y_or_n
},
"--autounmask": {
"help" : "automatically unmask packages",
- "type" : "choice",
"choices" : true_y_or_n
},
"--autounmask-unrestricted-atoms": {
"help" : "write autounmask changes with >= atoms if possible",
- "type" : "choice",
"choices" : true_y_or_n
},
"--autounmask-keep-masks": {
"help" : "don't add package.unmask entries",
- "type" : "choice",
"choices" : true_y_or_n
},
"--autounmask-write": {
"help" : "write changes made by --autounmask to disk",
- "type" : "choice",
"choices" : true_y_or_n
},
@@ -665,6 +330,11 @@ def parse_opts(tmpcmdline, silent=False):
"action":"store"
},
+ "--accept-restrict": {
+ "help":"temporarily override ACCEPT_RESTRICT",
+ "action":"store"
+ },
+
"--backtrack": {
"help" : "Specifies how many times to backtrack if dependency " + \
@@ -676,7 +346,6 @@ def parse_opts(tmpcmdline, silent=False):
"--buildpkg": {
"shortopt" : "-b",
"help" : "build binary packages",
- "type" : "choice",
"choices" : true_y_or_n
},
@@ -694,25 +363,21 @@ def parse_opts(tmpcmdline, silent=False):
},
"--color": {
"help":"enable or disable color output",
- "type":"choice",
"choices":("y", "n")
},
"--complete-graph": {
"help" : "completely account for all known dependencies",
- "type" : "choice",
"choices" : true_y_or_n
},
"--complete-graph-if-new-use": {
"help" : "trigger --complete-graph behavior if USE or IUSE will change for an installed package",
- "type" : "choice",
"choices" : y_or_n
},
"--complete-graph-if-new-ver": {
"help" : "trigger --complete-graph behavior if an installed package version will change (upgrade or downgrade)",
- "type" : "choice",
"choices" : y_or_n
},
@@ -728,15 +393,18 @@ def parse_opts(tmpcmdline, silent=False):
"action" : "store"
},
+ "--depclean-lib-check": {
+ "help" : "check for consumers of libraries before removing them",
+ "choices" : true_y_or_n
+ },
+
"--deselect": {
"help" : "remove atoms/sets from the world file",
- "type" : "choice",
"choices" : true_y_or_n
},
"--dynamic-deps": {
"help": "substitute the dependencies of installed packages with the dependencies of unbuilt ebuilds",
- "type": "choice",
"choices": y_or_n
},
@@ -750,17 +418,15 @@ def parse_opts(tmpcmdline, silent=False):
"--fail-clean": {
"help" : "clean temp files after build failure",
- "type" : "choice",
"choices" : true_y_or_n
},
- "--ignore-built-slot-abi-deps": {
- "help": "Ignore the SLOT/ABI := operator parts of dependencies that have "
+ "--ignore-built-slot-operator-deps": {
+ "help": "Ignore the slot/sub-slot := operator parts of dependencies that have "
"been recorded when packages where built. This option is intended "
"only for debugging purposes, and it only affects built packages "
- "that specify SLOT/ABI := operator dependencies using the "
+ "that specify slot/sub-slot := operator dependencies using the "
"experimental \"4-slot-abi\" EAPI.",
- "type": "choice",
"choices": y_or_n
},
@@ -776,7 +442,6 @@ def parse_opts(tmpcmdline, silent=False):
"--keep-going": {
"help" : "continue as much as possible after an error",
- "type" : "choice",
"choices" : true_y_or_n
},
@@ -791,18 +456,15 @@ def parse_opts(tmpcmdline, silent=False):
"--misspell-suggestions": {
"help" : "enable package name misspell suggestions",
- "type" : "choice",
"choices" : ("y", "n")
},
"--with-bdeps": {
"help":"include unnecessary build time dependencies",
- "type":"choice",
"choices":("y", "n")
},
"--reinstall": {
"help":"specify conditions to trigger package reinstallation",
- "type":"choice",
"choices":["changed-use"]
},
@@ -817,21 +479,18 @@ def parse_opts(tmpcmdline, silent=False):
"--binpkg-respect-use": {
"help" : "discard binary packages if their use flags \
don't match the current configuration",
- "type" : "choice",
"choices" : true_y_or_n
},
"--getbinpkg": {
"shortopt" : "-g",
"help" : "fetch binary packages",
- "type" : "choice",
"choices" : true_y_or_n
},
"--getbinpkgonly": {
"shortopt" : "-G",
"help" : "fetch binary packages only",
- "type" : "choice",
"choices" : true_y_or_n
},
@@ -860,29 +519,40 @@ def parse_opts(tmpcmdline, silent=False):
"--package-moves": {
"help" : "perform package moves when necessary",
- "type" : "choice",
"choices" : true_y_or_n
},
+ "--prefix": {
+ "help" : "specify the installation prefix",
+ "action" : "store"
+ },
+
+ "--pkg-format": {
+ "help" : "format of result binary package",
+ "action" : "store",
+ },
+
"--quiet": {
"shortopt" : "-q",
"help" : "reduced or condensed output",
- "type" : "choice",
"choices" : true_y_or_n
},
"--quiet-build": {
"help" : "redirect build output to logs",
- "type" : "choice",
"choices" : true_y_or_n,
},
- "--rebuild-if-new-slot-abi": {
- "help" : ("Automatically rebuild or reinstall packages when SLOT/ABI := "
+ "--quiet-fail": {
+ "help" : "suppresses display of the build log on stdout",
+ "choices" : true_y_or_n,
+ },
+
+ "--rebuild-if-new-slot": {
+ "help" : ("Automatically rebuild or reinstall packages when slot/sub-slot := "
"operator dependencies can be satisfied by a newer slot, so that "
"older packages slots will become eligible for removal by the "
"--depclean action as soon as possible."),
- "type" : "choice",
"choices" : true_y_or_n
},
@@ -891,7 +561,6 @@ def parse_opts(tmpcmdline, silent=False):
"used at both build-time and run-time are built, " + \
"if the dependency is not already installed with the " + \
"same version and revision.",
- "type" : "choice",
"choices" : true_y_or_n
},
@@ -900,24 +569,21 @@ def parse_opts(tmpcmdline, silent=False):
"used at both build-time and run-time are built, " + \
"if the dependency is not already installed with the " + \
"same version. Revision numbers are ignored.",
- "type" : "choice",
"choices" : true_y_or_n
},
"--rebuild-if-unbuilt": {
"help" : "Rebuild packages when dependencies that are " + \
"used at both build-time and run-time are built.",
- "type" : "choice",
"choices" : true_y_or_n
},
"--rebuilt-binaries": {
"help" : "replace installed packages with binary " + \
"packages that have been rebuilt",
- "type" : "choice",
"choices" : true_y_or_n
},
-
+
"--rebuilt-binaries-timestamp": {
"help" : "use only binaries that are newer than this " + \
"timestamp for --rebuilt-binaries",
@@ -931,26 +597,23 @@ def parse_opts(tmpcmdline, silent=False):
"--root-deps": {
"help" : "modify interpretation of depedencies",
- "type" : "choice",
"choices" :("True", "rdeps")
},
"--select": {
+ "shortopt" : "-w",
"help" : "add specified packages to the world set " + \
"(inverse of --oneshot)",
- "type" : "choice",
"choices" : true_y_or_n
},
"--selective": {
"help" : "identical to --noreplace",
- "type" : "choice",
"choices" : true_y_or_n
},
"--use-ebuild-visibility": {
"help" : "use unbuilt ebuild metadata for visibility checks on built packages",
- "type" : "choice",
"choices" : true_y_or_n
},
@@ -964,42 +627,39 @@ def parse_opts(tmpcmdline, silent=False):
"--usepkg": {
"shortopt" : "-k",
"help" : "use binary packages",
- "type" : "choice",
"choices" : true_y_or_n
},
"--usepkgonly": {
"shortopt" : "-K",
"help" : "use only binary packages",
- "type" : "choice",
"choices" : true_y_or_n
},
+ "--verbose": {
+ "shortopt" : "-v",
+ "help" : "verbose output",
+ "choices" : true_y_or_n
+ },
+ "--verbose-slot-rebuilds": {
+ "help" : "verbose slot rebuild output",
+ "choices" : true_y_or_n
+ },
}
- if _ENABLE_DYN_LINK_MAP:
- argument_options["--depclean-lib-check"] = {
- "help" : "check for consumers of libraries before removing them",
- "type" : "choice",
- "choices" : true_y_or_n
- }
-
- from optparse import OptionParser
- parser = OptionParser()
- if parser.has_option("--help"):
- parser.remove_option("--help")
+ parser = ArgumentParser(add_help=False)
for action_opt in actions:
- parser.add_option("--" + action_opt, action="store_true",
+ parser.add_argument("--" + action_opt, action="store_true",
dest=action_opt.replace("-", "_"), default=False)
for myopt in options:
- parser.add_option(myopt, action="store_true",
+ parser.add_argument(myopt, action="store_true",
dest=myopt.lstrip("--").replace("-", "_"), default=False)
for shortopt, longopt in shortmapping.items():
- parser.add_option("-" + shortopt, action="store_true",
+ parser.add_argument("-" + shortopt, action="store_true",
dest=longopt.lstrip("--").replace("-", "_"), default=False)
for myalias, myopt in longopt_aliases.items():
- parser.add_option(myalias, action="store_true",
+ parser.add_argument(myalias, action="store_true",
dest=myopt.lstrip("--").replace("-", "_"), default=False)
for myopt, kwargs in argument_options.items():
@@ -1007,12 +667,12 @@ def parse_opts(tmpcmdline, silent=False):
args = [myopt]
if shortopt is not None:
args.append(shortopt)
- parser.add_option(dest=myopt.lstrip("--").replace("-", "_"),
+ parser.add_argument(dest=myopt.lstrip("--").replace("-", "_"),
*args, **kwargs)
tmpcmdline = insert_optional_args(tmpcmdline)
- myoptions, myargs = parser.parse_args(args=tmpcmdline)
+ myoptions, myargs = parser.parse_known_args(args=tmpcmdline)
if myoptions.ask in true_y:
myoptions.ask = True
@@ -1058,9 +718,8 @@ def parse_opts(tmpcmdline, silent=False):
else:
myoptions.complete_graph = None
- if _ENABLE_DYN_LINK_MAP:
- if myoptions.depclean_lib_check in true_y:
- myoptions.depclean_lib_check = True
+ if myoptions.depclean_lib_check in true_y:
+ myoptions.depclean_lib_check = True
if myoptions.exclude:
bad_atoms = _find_bad_atoms(myoptions.exclude)
@@ -1127,8 +786,11 @@ def parse_opts(tmpcmdline, silent=False):
if myoptions.quiet_build in true_y:
myoptions.quiet_build = 'y'
- if myoptions.rebuild_if_new_slot_abi in true_y:
- myoptions.rebuild_if_new_slot_abi = 'y'
+ if myoptions.quiet_fail in true_y:
+ myoptions.quiet_fail = 'y'
+
+ if myoptions.rebuild_if_new_slot in true_y:
+ myoptions.rebuild_if_new_slot = 'y'
if myoptions.rebuild_if_new_ver in true_y:
myoptions.rebuild_if_new_ver = True
@@ -1215,6 +877,9 @@ def parse_opts(tmpcmdline, silent=False):
myoptions.jobs = jobs
+ if myoptions.load_average == "True":
+ myoptions.load_average = None
+
if myoptions.load_average:
try:
load_average = float(myoptions.load_average)
@@ -1228,7 +893,7 @@ def parse_opts(tmpcmdline, silent=False):
(myoptions.load_average,))
myoptions.load_average = load_average
-
+
if myoptions.rebuilt_binaries_timestamp:
try:
rebuilt_binaries_timestamp = int(myoptions.rebuilt_binaries_timestamp)
@@ -1259,6 +924,11 @@ def parse_opts(tmpcmdline, silent=False):
else:
myoptions.usepkgonly = None
+ if myoptions.verbose in true_y:
+ myoptions.verbose = True
+ else:
+ myoptions.verbose = None
+
for myopt in options:
v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
if v:
@@ -1283,309 +953,10 @@ def parse_opts(tmpcmdline, silent=False):
if myaction is None and myoptions.deselect is True:
myaction = 'deselect'
- if myargs and isinstance(myargs[0], bytes):
- for i in range(len(myargs)):
- myargs[i] = portage._unicode_decode(myargs[i])
-
myfiles += myargs
return myaction, myopts, myfiles
-# Warn about features that may confuse users and
-# lead them to report invalid bugs.
-_emerge_features_warn = frozenset(['keeptemp', 'keepwork'])
-
-def validate_ebuild_environment(trees):
- features_warn = set()
- for myroot in trees:
- settings = trees[myroot]["vartree"].settings
- settings.validate()
- features_warn.update(
- _emerge_features_warn.intersection(settings.features))
-
- if features_warn:
- msg = "WARNING: The FEATURES variable contains one " + \
- "or more values that should be disabled under " + \
- "normal circumstances: %s" % " ".join(features_warn)
- out = portage.output.EOutput()
- for line in textwrap.wrap(msg, 65):
- out.ewarn(line)
-
-def apply_priorities(settings):
- ionice(settings)
- nice(settings)
-
-def nice(settings):
- try:
- os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
- except (OSError, ValueError) as e:
- out = portage.output.EOutput()
- out.eerror("Failed to change nice value to '%s'" % \
- settings["PORTAGE_NICENESS"])
- out.eerror("%s\n" % str(e))
-
-def ionice(settings):
-
- ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
- if ionice_cmd:
- ionice_cmd = portage.util.shlex_split(ionice_cmd)
- if not ionice_cmd:
- return
-
- variables = {"PID" : str(os.getpid())}
- cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
-
- try:
- rval = portage.process.spawn(cmd, env=os.environ)
- except portage.exception.CommandNotFound:
- # The OS kernel probably doesn't support ionice,
- # so return silently.
- return
-
- if rval != os.EX_OK:
- out = portage.output.EOutput()
- out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
- out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
-
-def clean_logs(settings):
-
- if "clean-logs" not in settings.features:
- return
-
- logdir = settings.get("PORT_LOGDIR")
- if logdir is None or not os.path.isdir(logdir):
- return
-
- options = {
- 'eerror': portage.output.EOutput().eerror,
- # uncomment next line to output a succeeded message
- #'einfo': portage.output.EOutput().einfo
- }
- cleanlogs = CleanLogs()
- cleanlogs.clean(settings=settings, options=options)
-
-def setconfig_fallback(root_config):
- setconfig = root_config.setconfig
- setconfig._create_default_config()
- setconfig._parse(update=True)
- root_config.sets = setconfig.getSets()
-
-def get_missing_sets(root_config):
- # emerge requires existence of "world", "selected", and "system"
- missing_sets = []
-
- for s in ("selected", "system", "world",):
- if s not in root_config.sets:
- missing_sets.append(s)
-
- return missing_sets
-
-def missing_sets_warning(root_config, missing_sets):
- if len(missing_sets) > 2:
- missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
- missing_sets_str += ', and "%s"' % missing_sets[-1]
- elif len(missing_sets) == 2:
- missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
- else:
- missing_sets_str = '"%s"' % missing_sets[-1]
- msg = ["emerge: incomplete set configuration, " + \
- "missing set(s): %s" % missing_sets_str]
- if root_config.sets:
- msg.append(" sets defined: %s" % ", ".join(root_config.sets))
- global_config_path = portage.const.GLOBAL_CONFIG_PATH
- if root_config.settings['EPREFIX']:
- global_config_path = os.path.join(root_config.settings['EPREFIX'],
- portage.const.GLOBAL_CONFIG_PATH.lstrip(os.sep))
- msg.append(" This usually means that '%s'" % \
- (os.path.join(global_config_path, "sets/portage.conf"),))
- msg.append(" is missing or corrupt.")
- msg.append(" Falling back to default world and system set configuration!!!")
- for line in msg:
- writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
-
-def ensure_required_sets(trees):
- warning_shown = False
- for root_trees in trees.values():
- missing_sets = get_missing_sets(root_trees["root_config"])
- if missing_sets and not warning_shown:
- warning_shown = True
- missing_sets_warning(root_trees["root_config"], missing_sets)
- if missing_sets:
- setconfig_fallback(root_trees["root_config"])
-
-def expand_set_arguments(myfiles, myaction, root_config):
- retval = os.EX_OK
- setconfig = root_config.setconfig
-
- sets = setconfig.getSets()
-
- # In order to know exactly which atoms/sets should be added to the
- # world file, the depgraph performs set expansion later. It will get
- # confused about where the atoms came from if it's not allowed to
- # expand them itself.
- do_not_expand = (None, )
- newargs = []
- for a in myfiles:
- if a in ("system", "world"):
- newargs.append(SETPREFIX+a)
- else:
- newargs.append(a)
- myfiles = newargs
- del newargs
- newargs = []
-
- # separators for set arguments
- ARG_START = "{"
- ARG_END = "}"
-
- for i in range(0, len(myfiles)):
- if myfiles[i].startswith(SETPREFIX):
- start = 0
- end = 0
- x = myfiles[i][len(SETPREFIX):]
- newset = ""
- while x:
- start = x.find(ARG_START)
- end = x.find(ARG_END)
- if start > 0 and start < end:
- namepart = x[:start]
- argpart = x[start+1:end]
-
- # TODO: implement proper quoting
- args = argpart.split(",")
- options = {}
- for a in args:
- if "=" in a:
- k, v = a.split("=", 1)
- options[k] = v
- else:
- options[a] = "True"
- setconfig.update(namepart, options)
- newset += (x[:start-len(namepart)]+namepart)
- x = x[end+len(ARG_END):]
- else:
- newset += x
- x = ""
- myfiles[i] = SETPREFIX+newset
-
- sets = setconfig.getSets()
-
- # display errors that occurred while loading the SetConfig instance
- for e in setconfig.errors:
- print(colorize("BAD", "Error during set creation: %s" % e))
-
- unmerge_actions = ("unmerge", "prune", "clean", "depclean")
-
- for a in myfiles:
- if a.startswith(SETPREFIX):
- s = a[len(SETPREFIX):]
- if s not in sets:
- display_missing_pkg_set(root_config, s)
- return (None, 1)
- setconfig.active.append(s)
- try:
- set_atoms = setconfig.getSetAtoms(s)
- except portage.exception.PackageSetNotFound as e:
- writemsg_level(("emerge: the given set '%s' " + \
- "contains a non-existent set named '%s'.\n") % \
- (s, e), level=logging.ERROR, noiselevel=-1)
- if s in ('world', 'selected') and \
- SETPREFIX + e.value in sets['selected']:
- writemsg_level(("Use `emerge --deselect %s%s` to "
- "remove this set from world_sets.\n") %
- (SETPREFIX, e,), level=logging.ERROR,
- noiselevel=-1)
- return (None, 1)
- if myaction in unmerge_actions and \
- not sets[s].supportsOperation("unmerge"):
- sys.stderr.write("emerge: the given set '%s' does " % s + \
- "not support unmerge operations\n")
- retval = 1
- elif not set_atoms:
- print("emerge: '%s' is an empty set" % s)
- elif myaction not in do_not_expand:
- newargs.extend(set_atoms)
- else:
- newargs.append(SETPREFIX+s)
- for e in sets[s].errors:
- print(e)
- else:
- newargs.append(a)
- return (newargs, retval)
-
-def repo_name_check(trees):
- missing_repo_names = set()
- for root_trees in trees.values():
- porttree = root_trees.get("porttree")
- if porttree:
- portdb = porttree.dbapi
- missing_repo_names.update(portdb.getMissingRepoNames())
- if portdb.porttree_root in missing_repo_names and \
- not os.path.exists(os.path.join(
- portdb.porttree_root, "profiles")):
- # This is normal if $PORTDIR happens to be empty,
- # so don't warn about it.
- missing_repo_names.remove(portdb.porttree_root)
-
- if missing_repo_names:
- msg = []
- msg.append("WARNING: One or more repositories " + \
- "have missing repo_name entries:")
- msg.append("")
- for p in missing_repo_names:
- msg.append("\t%s/profiles/repo_name" % (p,))
- msg.append("")
- msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
- "should be a plain text file containing a unique " + \
- "name for the repository on the first line.", 70))
- msg.append("\n")
- writemsg_level("".join("%s\n" % l for l in msg),
- level=logging.WARNING, noiselevel=-1)
-
- return bool(missing_repo_names)
-
-def repo_name_duplicate_check(trees):
- ignored_repos = {}
- for root, root_trees in trees.items():
- if 'porttree' in root_trees:
- portdb = root_trees['porttree'].dbapi
- if portdb.settings.get('PORTAGE_REPO_DUPLICATE_WARN') != '0':
- for repo_name, paths in portdb.getIgnoredRepos():
- k = (root, repo_name, portdb.getRepositoryPath(repo_name))
- ignored_repos.setdefault(k, []).extend(paths)
-
- if ignored_repos:
- msg = []
- msg.append('WARNING: One or more repositories ' + \
- 'have been ignored due to duplicate')
- msg.append(' profiles/repo_name entries:')
- msg.append('')
- for k in sorted(ignored_repos):
- msg.append(' %s overrides' % ", ".join(k))
- for path in ignored_repos[k]:
- msg.append(' %s' % (path,))
- msg.append('')
- msg.extend(' ' + x for x in textwrap.wrap(
- "All profiles/repo_name entries must be unique in order " + \
- "to avoid having duplicates ignored. " + \
- "Set PORTAGE_REPO_DUPLICATE_WARN=\"0\" in " + \
- "/etc/make.conf if you would like to disable this warning."))
- msg.append("\n")
- writemsg_level(''.join('%s\n' % l for l in msg),
- level=logging.WARNING, noiselevel=-1)
-
- return bool(ignored_repos)
-
-def config_protect_check(trees):
- for root, root_trees in trees.items():
- settings = root_trees["root_config"].settings
- if not settings.get("CONFIG_PROTECT"):
- msg = "!!! CONFIG_PROTECT is empty"
- if settings["ROOT"] != "/":
- msg += " for '%s'" % root
- msg += "\n"
- writemsg_level(msg, level=logging.WARN, noiselevel=-1)
-
def profile_check(trees, myaction):
if myaction in ("help", "info", "search", "sync", "version"):
return os.EX_OK
@@ -1603,16 +974,6 @@ def profile_check(trees, myaction):
return 1
return os.EX_OK
-def check_procfs():
- procfs_path = '/proc'
- if platform.system() not in ("Linux",) or \
- os.path.ismount(procfs_path):
- return os.EX_OK
- msg = "It seems that %s is not mounted. You have been warned." % procfs_path
- writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
- level=logging.ERROR, noiselevel=-1)
- return 1
-
def emerge_main(args=None):
"""
@param args: command arguments (default: sys.argv[1:])
@@ -1621,11 +982,12 @@ def emerge_main(args=None):
if args is None:
args = sys.argv[1:]
- portage._disable_legacy_globals()
- portage.dep._internal_warnings = True
+ args = portage._decode_argv(args)
+
# Disable color until we're sure that it should be enabled (after
# EMERGE_DEFAULT_OPTS has been parsed).
portage.output.havecolor = 0
+
# This first pass is just for options that need to be known as early as
# possible, such as --config-root. They will be parsed again later,
# together with EMERGE_DEFAULT_OPTS (which may vary depending on the
@@ -1637,428 +999,45 @@ def emerge_main(args=None):
os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
if "--root" in myopts:
os.environ["ROOT"] = myopts["--root"]
+ if "--prefix" in myopts:
+ os.environ["EPREFIX"] = myopts["--prefix"]
if "--accept-properties" in myopts:
os.environ["ACCEPT_PROPERTIES"] = myopts["--accept-properties"]
+ if "--accept-restrict" in myopts:
+ os.environ["ACCEPT_RESTRICT"] = myopts["--accept-restrict"]
+
+ # optimize --help (no need to load config / EMERGE_DEFAULT_OPTS)
+ if myaction == "help":
+ emerge_help()
+ return os.EX_OK
+ elif myaction == "moo":
+ print(COWSAY_MOO % platform.system())
+ return os.EX_OK
# Portage needs to ensure a sane umask for the files it creates.
os.umask(0o22)
- settings, trees, mtimedb = load_emerge_config()
- portdb = trees[settings['EROOT']]['porttree'].dbapi
- rval = profile_check(trees, myaction)
+ if myaction == "sync":
+ portage._sync_mode = True
+ emerge_config = load_emerge_config(
+ action=myaction, args=myfiles, opts=myopts)
+ rval = profile_check(emerge_config.trees, emerge_config.action)
if rval != os.EX_OK:
return rval
tmpcmdline = []
if "--ignore-default-opts" not in myopts:
- tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
+ tmpcmdline.extend(portage.util.shlex_split(
+ emerge_config.target_config.settings.get(
+ "EMERGE_DEFAULT_OPTS", "")))
tmpcmdline.extend(args)
- myaction, myopts, myfiles = parse_opts(tmpcmdline)
-
- # skip global updates prior to sync, since it's called after sync
- if myaction not in ('help', 'info', 'sync', 'version') and \
- myopts.get('--package-moves') != 'n' and \
- _global_updates(trees, mtimedb["updates"], quiet=("--quiet" in myopts)):
- mtimedb.commit()
- # Reload the whole config from scratch.
- settings, trees, mtimedb = load_emerge_config(trees=trees)
- portdb = trees[settings['EROOT']]['porttree'].dbapi
-
- xterm_titles = "notitles" not in settings.features
- if xterm_titles:
- xtermTitle("emerge")
-
- if "--digest" in myopts:
- os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
- # Reload the whole config from scratch so that the portdbapi internal
- # config is updated with new FEATURES.
- settings, trees, mtimedb = load_emerge_config(trees=trees)
- portdb = trees[settings['EROOT']]['porttree'].dbapi
-
- # NOTE: adjust_configs() can map options to FEATURES, so any relevant
- # options adjustments should be made prior to calling adjust_configs().
- if "--buildpkgonly" in myopts:
- myopts["--buildpkg"] = True
-
- adjust_configs(myopts, trees)
- apply_priorities(settings)
-
- if myaction == 'version':
- writemsg_stdout(getportageversion(
- settings["PORTDIR"], None,
- settings.profile_path, settings["CHOST"],
- trees[settings['EROOT']]['vartree'].dbapi) + '\n', noiselevel=-1)
- return 0
- elif myaction == 'help':
- _emerge.help.help()
- return 0
-
- spinner = stdout_spinner()
- if "candy" in settings.features:
- spinner.update = spinner.update_scroll
-
- if "--quiet" not in myopts:
- portage.deprecated_profile_check(settings=settings)
- if portage.const._ENABLE_REPO_NAME_WARN:
- # Bug #248603 - Disable warnings about missing
- # repo_name entries for stable branch.
- repo_name_check(trees)
- repo_name_duplicate_check(trees)
- config_protect_check(trees)
- check_procfs()
-
- if "getbinpkg" in settings.features:
- myopts["--getbinpkg"] = True
-
- if "--getbinpkgonly" in myopts:
- myopts["--getbinpkg"] = True
-
- if "--getbinpkgonly" in myopts:
- myopts["--usepkgonly"] = True
-
- if "--getbinpkg" in myopts:
- myopts["--usepkg"] = True
-
- if "--usepkgonly" in myopts:
- myopts["--usepkg"] = True
-
- if "--buildpkgonly" in myopts:
- # --buildpkgonly will not merge anything, so
- # it cancels all binary package options.
- for opt in ("--getbinpkg", "--getbinpkgonly",
- "--usepkg", "--usepkgonly"):
- myopts.pop(opt, None)
-
- for mytrees in trees.values():
- mydb = mytrees["porttree"].dbapi
- # Freeze the portdbapi for performance (memoize all xmatch results).
- mydb.freeze()
-
- if myaction in ('search', None) and \
- "--usepkg" in myopts:
- # Populate the bintree with current --getbinpkg setting.
- # This needs to happen before expand_set_arguments(), in case
- # any sets use the bintree.
- mytrees["bintree"].populate(
- getbinpkgs="--getbinpkg" in myopts)
-
- del mytrees, mydb
-
- if "moo" in myfiles:
- print(COWSAY_MOO % platform.system())
- msg = ("The above `emerge moo` display is deprecated. "
- "Please use `emerge --moo` instead.")
- for line in textwrap.wrap(msg, 50):
- print(" %s %s" % (colorize("WARN", "*"), line))
-
- for x in myfiles:
- ext = os.path.splitext(x)[1]
- if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
- print(colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n"))
- break
-
- root_config = trees[settings['EROOT']]['root_config']
- if myaction == "moo":
- print(COWSAY_MOO % platform.system())
- return os.EX_OK
- elif myaction == "list-sets":
- writemsg_stdout("".join("%s\n" % s for s in sorted(root_config.sets)))
- return os.EX_OK
- elif myaction == "check-news":
- news_counts = count_unread_news(
- root_config.trees["porttree"].dbapi,
- root_config.trees["vartree"].dbapi)
- if any(news_counts.values()):
- display_news_notifications(news_counts)
- elif "--quiet" not in myopts:
- print("", colorize("GOOD", "*"), "No news items were found.")
- return os.EX_OK
-
- ensure_required_sets(trees)
-
- # only expand sets for actions taking package arguments
- oldargs = myfiles[:]
- if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
- myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
- if retval != os.EX_OK:
- return retval
-
- # Need to handle empty sets specially, otherwise emerge will react
- # with the help message for empty argument lists
- if oldargs and not myfiles:
- print("emerge: no targets left after set expansion")
- return 0
-
- if ("--tree" in myopts) and ("--columns" in myopts):
- print("emerge: can't specify both of \"--tree\" and \"--columns\".")
- return 1
-
- if '--emptytree' in myopts and '--noreplace' in myopts:
- writemsg_level("emerge: can't specify both of " + \
- "\"--emptytree\" and \"--noreplace\".\n",
- level=logging.ERROR, noiselevel=-1)
- return 1
-
- if ("--quiet" in myopts):
- spinner.update = spinner.update_quiet
- portage.util.noiselimit = -1
-
- if "--fetch-all-uri" in myopts:
- myopts["--fetchonly"] = True
-
- if "--skipfirst" in myopts and "--resume" not in myopts:
- myopts["--resume"] = True
+ emerge_config.action, emerge_config.opts, emerge_config.args = \
+ parse_opts(tmpcmdline)
- # Allow -p to remove --ask
- if "--pretend" in myopts:
- myopts.pop("--ask", None)
-
- # forbid --ask when not in a terminal
- # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
- if ("--ask" in myopts) and (not sys.stdin.isatty()):
- portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
- noiselevel=-1)
- return 1
-
- if settings.get("PORTAGE_DEBUG", "") == "1":
- spinner.update = spinner.update_quiet
- portage.util.noiselimit = 0
- if "python-trace" in settings.features:
- import portage.debug as portage_debug
- portage_debug.set_trace(True)
-
- if not ("--quiet" in myopts):
- if '--nospinner' in myopts or \
- settings.get('TERM') == 'dumb' or \
- not sys.stdout.isatty():
- spinner.update = spinner.update_basic
-
- if "--debug" in myopts:
- print("myaction", myaction)
- print("myopts", myopts)
-
- if not myaction and not myfiles and "--resume" not in myopts:
- _emerge.help.help()
- return 1
-
- pretend = "--pretend" in myopts
- fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
- buildpkgonly = "--buildpkgonly" in myopts
-
- # check if root user is the current user for the actions where emerge needs this
- if portage.secpass < 2:
- # We've already allowed "--version" and "--help" above.
- if "--pretend" not in myopts and myaction not in ("search","info"):
- need_superuser = myaction in ('clean', 'depclean', 'deselect',
- 'prune', 'unmerge') or not \
- (fetchonly or \
- (buildpkgonly and secpass >= 1) or \
- myaction in ("metadata", "regen", "sync"))
- if portage.secpass < 1 or \
- need_superuser:
- if need_superuser:
- access_desc = "superuser"
- else:
- access_desc = "portage group"
- # Always show portage_group_warning() when only portage group
- # access is required but the user is not in the portage group.
- from portage.data import portage_group_warning
- if "--ask" in myopts:
- writemsg_stdout("This action requires %s access...\n" % \
- (access_desc,), noiselevel=-1)
- if portage.secpass < 1 and not need_superuser:
- portage_group_warning()
- if userquery("Would you like to add --pretend to options?",
- "--ask-enter-invalid" in myopts) == "No":
- return 128 + signal.SIGINT
- myopts["--pretend"] = True
- del myopts["--ask"]
- else:
- sys.stderr.write(("emerge: %s access is required\n") \
- % access_desc)
- if portage.secpass < 1 and not need_superuser:
- portage_group_warning()
- return 1
-
- # Disable emergelog for everything except build or unmerge operations.
- # This helps minimize parallel emerge.log entries that can confuse log
- # parsers like genlop.
- disable_emergelog = False
- for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
- if x in myopts:
- disable_emergelog = True
- break
- if disable_emergelog:
- pass
- elif myaction in ("search", "info"):
- disable_emergelog = True
- elif portage.data.secpass < 1:
- disable_emergelog = True
-
- _emerge.emergelog._disable = disable_emergelog
-
- if not disable_emergelog:
- if 'EMERGE_LOG_DIR' in settings:
- try:
- # At least the parent needs to exist for the lock file.
- portage.util.ensure_dirs(settings['EMERGE_LOG_DIR'])
- except portage.exception.PortageException as e:
- writemsg_level("!!! Error creating directory for " + \
- "EMERGE_LOG_DIR='%s':\n!!! %s\n" % \
- (settings['EMERGE_LOG_DIR'], e),
- noiselevel=-1, level=logging.ERROR)
- portage.util.ensure_dirs(_emerge.emergelog._emerge_log_dir)
- else:
- _emerge.emergelog._emerge_log_dir = settings["EMERGE_LOG_DIR"]
- else:
- _emerge.emergelog._emerge_log_dir = os.path.join(os.sep,
- settings["EPREFIX"].lstrip(os.sep), "var", "log")
- portage.util.ensure_dirs(_emerge.emergelog._emerge_log_dir)
-
- if not "--pretend" in myopts:
- emergelog(xterm_titles, "Started emerge on: "+\
- _unicode_decode(
- time.strftime("%b %d, %Y %H:%M:%S", time.localtime()),
- encoding=_encodings['content'], errors='replace'))
- myelogstr=""
- if myopts:
- opt_list = []
- for opt, arg in myopts.items():
- if arg is True:
- opt_list.append(opt)
- elif isinstance(arg, list):
- # arguments like --exclude that use 'append' action
- for x in arg:
- opt_list.append("%s=%s" % (opt, x))
- else:
- opt_list.append("%s=%s" % (opt, arg))
- myelogstr=" ".join(opt_list)
- if myaction:
- myelogstr += " --" + myaction
- if myfiles:
- myelogstr += " " + " ".join(oldargs)
- emergelog(xterm_titles, " *** emerge " + myelogstr)
- del oldargs
-
- def emergeexitsig(signum, frame):
- signal.signal(signal.SIGINT, signal.SIG_IGN)
- signal.signal(signal.SIGTERM, signal.SIG_IGN)
- portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
- sys.exit(128 + signum)
- signal.signal(signal.SIGINT, emergeexitsig)
- signal.signal(signal.SIGTERM, emergeexitsig)
-
- def emergeexit():
- """This gets out final log message in before we quit."""
- if "--pretend" not in myopts:
- emergelog(xterm_titles, " *** terminating.")
- if xterm_titles:
- xtermTitleReset()
- portage.atexit_register(emergeexit)
-
- if myaction in ("config", "metadata", "regen", "sync"):
- if "--pretend" in myopts:
- sys.stderr.write(("emerge: The '%s' action does " + \
- "not support '--pretend'.\n") % myaction)
- return 1
-
- if "sync" == myaction:
- return action_sync(settings, trees, mtimedb, myopts, myaction)
- elif "metadata" == myaction:
- action_metadata(settings, portdb, myopts)
- elif myaction=="regen":
- validate_ebuild_environment(trees)
- return action_regen(settings, portdb, myopts.get("--jobs"),
- myopts.get("--load-average"))
- # HELP action
- elif "config"==myaction:
- validate_ebuild_environment(trees)
- action_config(settings, trees, myopts, myfiles)
-
- # SEARCH action
- elif "search"==myaction:
- validate_ebuild_environment(trees)
- action_search(trees[settings['EROOT']]['root_config'],
- myopts, myfiles, spinner)
-
- elif myaction in ('clean', 'depclean', 'deselect', 'prune', 'unmerge'):
- validate_ebuild_environment(trees)
- rval = action_uninstall(settings, trees, mtimedb["ldpath"],
- myopts, myaction, myfiles, spinner)
- if not (myaction == 'deselect' or buildpkgonly or fetchonly or pretend):
- post_emerge(myaction, myopts, myfiles, settings['EROOT'],
- trees, mtimedb, rval)
- return rval
-
- elif myaction == 'info':
-
- # Ensure atoms are valid before calling unmerge().
- vardb = trees[settings['EROOT']]['vartree'].dbapi
- portdb = trees[settings['EROOT']]['porttree'].dbapi
- bindb = trees[settings['EROOT']]["bintree"].dbapi
- valid_atoms = []
- for x in myfiles:
- if is_valid_package_atom(x, allow_repo=True):
- try:
- #look at the installed files first, if there is no match
- #look at the ebuilds, since EAPI 4 allows running pkg_info
- #on non-installed packages
- valid_atom = dep_expand(x, mydb=vardb, settings=settings)
- if valid_atom.cp.split("/")[0] == "null":
- valid_atom = dep_expand(x, mydb=portdb, settings=settings)
- if valid_atom.cp.split("/")[0] == "null" and "--usepkg" in myopts:
- valid_atom = dep_expand(x, mydb=bindb, settings=settings)
- valid_atoms.append(valid_atom)
- except portage.exception.AmbiguousPackageName as e:
- msg = "The short ebuild name \"" + x + \
- "\" is ambiguous. Please specify " + \
- "one of the following " + \
- "fully-qualified ebuild names instead:"
- for line in textwrap.wrap(msg, 70):
- writemsg_level("!!! %s\n" % (line,),
- level=logging.ERROR, noiselevel=-1)
- for i in e.args[0]:
- writemsg_level(" %s\n" % colorize("INFORM", i),
- level=logging.ERROR, noiselevel=-1)
- writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
- return 1
- continue
- msg = []
- msg.append("'%s' is not a valid package atom." % (x,))
- msg.append("Please check ebuild(5) for full details.")
- writemsg_level("".join("!!! %s\n" % line for line in msg),
- level=logging.ERROR, noiselevel=-1)
- return 1
-
- return action_info(settings, trees, myopts, valid_atoms)
-
- # "update", "system", or just process files:
- else:
- validate_ebuild_environment(trees)
-
- for x in myfiles:
- if x.startswith(SETPREFIX) or \
- is_valid_package_atom(x, allow_repo=True):
- continue
- if x[:1] == os.sep:
- continue
- try:
- os.lstat(x)
+ try:
+ return run_action(emerge_config)
+ finally:
+ # Call destructors for our portdbapi instances.
+ for x in emerge_config.trees.values():
+ if "porttree" in x.lazy_items:
continue
- except OSError:
- pass
- msg = []
- msg.append("'%s' is not a valid package atom." % (x,))
- msg.append("Please check ebuild(5) for full details.")
- writemsg_level("".join("!!! %s\n" % line for line in msg),
- level=logging.ERROR, noiselevel=-1)
- return 1
-
- # GLEP 42 says to display news *after* an emerge --pretend
- if "--pretend" not in myopts:
- display_news_notification(root_config, myopts)
- retval = action_build(settings, trees, mtimedb,
- myopts, myaction, myfiles, spinner)
- post_emerge(myaction, myopts, myfiles, settings['EROOT'],
- trees, mtimedb, retval)
-
- return retval
+ x["porttree"].dbapi.close_caches()
diff --git a/pym/_emerge/post_emerge.py b/pym/_emerge/post_emerge.py
new file mode 100644
index 000000000..d5f1ba5fa
--- /dev/null
+++ b/pym/_emerge/post_emerge.py
@@ -0,0 +1,165 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import logging
+import textwrap
+
+import portage
+from portage import os
+from portage.emaint.modules.logs.logs import CleanLogs
+from portage.news import count_unread_news, display_news_notifications
+from portage.output import colorize
+from portage.util._dyn_libs.display_preserved_libs import \
+ display_preserved_libs
+from portage.util._info_files import chk_updated_info_files
+
+from .chk_updated_cfg_files import chk_updated_cfg_files
+from .emergelog import emergelog
+from ._flush_elog_mod_echo import _flush_elog_mod_echo
+
+def clean_logs(settings):
+
+ if "clean-logs" not in settings.features:
+ return
+
+ logdir = settings.get("PORT_LOGDIR")
+ if logdir is None or not os.path.isdir(logdir):
+ return
+
+ cleanlogs = CleanLogs()
+ errors = cleanlogs.clean(settings=settings)
+ if errors:
+ out = portage.output.EOutput()
+ for msg in errors:
+ out.eerror(msg)
+
+def display_news_notification(root_config, myopts):
+ if "news" not in root_config.settings.features:
+ return
+ portdb = root_config.trees["porttree"].dbapi
+ vardb = root_config.trees["vartree"].dbapi
+ news_counts = count_unread_news(portdb, vardb)
+ display_news_notifications(news_counts)
+
+def show_depclean_suggestion():
+ out = portage.output.EOutput()
+ msg = "After world updates, it is important to remove " + \
+ "obsolete packages with emerge --depclean. Refer " + \
+ "to `man emerge` for more information."
+ for line in textwrap.wrap(msg, 72):
+ out.ewarn(line)
+
+def post_emerge(myaction, myopts, myfiles,
+ target_root, trees, mtimedb, retval):
+ """
+ Misc. things to run at the end of a merge session.
+
+ Update Info Files
+ Update Config Files
+ Update News Items
+ Commit mtimeDB
+ Display preserved libs warnings
+
+ @param myaction: The action returned from parse_opts()
+ @type myaction: String
+ @param myopts: emerge options
+ @type myopts: dict
+ @param myfiles: emerge arguments
+ @type myfiles: list
+ @param target_root: The target EROOT for myaction
+ @type target_root: String
+ @param trees: A dictionary mapping each ROOT to it's package databases
+ @type trees: dict
+ @param mtimedb: The mtimeDB to store data needed across merge invocations
+ @type mtimedb: MtimeDB class instance
+ @param retval: Emerge's return value
+ @type retval: Int
+ """
+
+ root_config = trees[target_root]["root_config"]
+ vardbapi = trees[target_root]['vartree'].dbapi
+ settings = vardbapi.settings
+ info_mtimes = mtimedb["info"]
+
+ # Load the most current variables from ${ROOT}/etc/profile.env
+ settings.unlock()
+ settings.reload()
+ settings.regenerate()
+ settings.lock()
+
+ config_protect = portage.util.shlex_split(
+ settings.get("CONFIG_PROTECT", ""))
+ infodirs = settings.get("INFOPATH","").split(":") + \
+ settings.get("INFODIR","").split(":")
+
+ os.chdir("/")
+
+ if retval == os.EX_OK:
+ exit_msg = " *** exiting successfully."
+ else:
+ exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
+ emergelog("notitles" not in settings.features, exit_msg)
+
+ _flush_elog_mod_echo()
+
+ if not vardbapi._pkgs_changed:
+ # GLEP 42 says to display news *after* an emerge --pretend
+ if "--pretend" in myopts:
+ display_news_notification(root_config, myopts)
+ # If vdb state has not changed then there's nothing else to do.
+ return
+
+ vdb_path = os.path.join(root_config.settings['EROOT'], portage.VDB_PATH)
+ portage.util.ensure_dirs(vdb_path)
+ vdb_lock = None
+ if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
+ vardbapi.lock()
+ vdb_lock = True
+
+ if vdb_lock:
+ try:
+ if "noinfo" not in settings.features:
+ chk_updated_info_files(target_root,
+ infodirs, info_mtimes)
+ mtimedb.commit()
+ finally:
+ if vdb_lock:
+ vardbapi.unlock()
+
+ # Explicitly load and prune the PreservedLibsRegistry in order
+ # to ensure that we do not display stale data.
+ vardbapi._plib_registry.load()
+
+ if vardbapi._plib_registry.hasEntries():
+ if "--quiet" in myopts:
+ print()
+ print(colorize("WARN", "!!!") + " existing preserved libs found")
+ else:
+ print()
+ print(colorize("WARN", "!!!") + " existing preserved libs:")
+ display_preserved_libs(vardbapi)
+ print("Use " + colorize("GOOD", "emerge @preserved-rebuild") +
+ " to rebuild packages using these libraries")
+
+ chk_updated_cfg_files(settings['EROOT'], config_protect)
+
+ display_news_notification(root_config, myopts)
+
+ postemerge = os.path.join(settings["PORTAGE_CONFIGROOT"],
+ portage.USER_CONFIG_PATH, "bin", "post_emerge")
+ if os.access(postemerge, os.X_OK):
+ hook_retval = portage.process.spawn(
+ [postemerge], env=settings.environ())
+ if hook_retval != os.EX_OK:
+ portage.util.writemsg_level(
+ " %s spawn failed of %s\n" %
+ (colorize("BAD", "*"), postemerge,),
+ level=logging.ERROR, noiselevel=-1)
+
+ clean_logs(settings)
+
+ if "--quiet" not in myopts and \
+ myaction is None and "@world" in myfiles:
+ show_depclean_suggestion()
diff --git a/pym/_emerge/resolver/backtracking.py b/pym/_emerge/resolver/backtracking.py
index d8f49c679..c29b9d42a 100644
--- a/pym/_emerge/resolver/backtracking.py
+++ b/pym/_emerge/resolver/backtracking.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import copy
@@ -7,8 +7,8 @@ class BacktrackParameter(object):
__slots__ = (
"needed_unstable_keywords", "runtime_pkg_mask", "needed_use_config_changes", "needed_license_changes",
- "rebuild_list", "reinstall_list", "needed_p_mask_changes",
- "slot_abi_replace_installed"
+ "prune_rebuilds", "rebuild_list", "reinstall_list", "needed_p_mask_changes",
+ "slot_operator_mask_built", "slot_operator_replace_installed"
)
def __init__(self):
@@ -19,7 +19,9 @@ class BacktrackParameter(object):
self.needed_license_changes = {}
self.rebuild_list = set()
self.reinstall_list = set()
- self.slot_abi_replace_installed = set()
+ self.slot_operator_replace_installed = set()
+ self.slot_operator_mask_built = set()
+ self.prune_rebuilds = False
def __deepcopy__(self, memo=None):
if memo is None:
@@ -35,7 +37,9 @@ class BacktrackParameter(object):
result.needed_license_changes = copy.copy(self.needed_license_changes)
result.rebuild_list = copy.copy(self.rebuild_list)
result.reinstall_list = copy.copy(self.reinstall_list)
- result.slot_abi_replace_installed = copy.copy(self.slot_abi_replace_installed)
+ result.slot_operator_replace_installed = copy.copy(self.slot_operator_replace_installed)
+ result.slot_operator_mask_built = self.slot_operator_mask_built.copy()
+ result.prune_rebuilds = self.prune_rebuilds
# runtime_pkg_mask contains nested dicts that must also be copied
result.runtime_pkg_mask = {}
@@ -52,7 +56,9 @@ class BacktrackParameter(object):
self.needed_license_changes == other.needed_license_changes and \
self.rebuild_list == other.rebuild_list and \
self.reinstall_list == other.reinstall_list and \
- self.slot_abi_replace_installed == other.slot_abi_replace_installed
+ self.slot_operator_replace_installed == other.slot_operator_replace_installed and \
+ self.slot_operator_mask_built == other.slot_operator_mask_built and \
+ self.prune_rebuilds == other.prune_rebuilds
class _BacktrackNode(object):
@@ -125,7 +131,7 @@ class Backtracker(object):
for pkg, mask_info in runtime_pkg_mask.items():
if "missing dependency" in mask_info or \
- "slot_abi_mask_built" in mask_info:
+ "slot_operator_mask_built" in mask_info:
continue
entry_is_valid = False
@@ -192,16 +198,28 @@ class Backtracker(object):
para.needed_use_config_changes[pkg] = (new_use, new_changes)
elif change == "slot_conflict_abi":
new_node.terminal = False
- elif change == "slot_abi_mask_built":
+ elif change == "slot_operator_mask_built":
+ para.slot_operator_mask_built.update(data)
for pkg, mask_reasons in data.items():
para.runtime_pkg_mask.setdefault(pkg,
{}).update(mask_reasons)
- elif change == "slot_abi_replace_installed":
- para.slot_abi_replace_installed.update(data)
+ elif change == "slot_operator_replace_installed":
+ para.slot_operator_replace_installed.update(data)
elif change == "rebuild_list":
para.rebuild_list.update(data)
elif change == "reinstall_list":
para.reinstall_list.update(data)
+ elif change == "prune_rebuilds":
+ para.prune_rebuilds = True
+ para.slot_operator_replace_installed.clear()
+ for pkg in para.slot_operator_mask_built:
+ runtime_masks = para.runtime_pkg_mask.get(pkg)
+ if runtime_masks is None:
+ continue
+ runtime_masks.pop("slot_operator_mask_built", None)
+ if not runtime_masks:
+ para.runtime_pkg_mask.pop(pkg)
+ para.slot_operator_mask_built.clear()
self._add(new_node, explore=explore)
self._current_node = new_node
diff --git a/pym/_emerge/resolver/circular_dependency.py b/pym/_emerge/resolver/circular_dependency.py
index aca81face..b7106714a 100644
--- a/pym/_emerge/resolver/circular_dependency.py
+++ b/pym/_emerge/resolver/circular_dependency.py
@@ -1,7 +1,7 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-from __future__ import print_function
+from __future__ import print_function, unicode_literals
from itertools import chain, product
import logging
@@ -11,6 +11,7 @@ from portage.exception import InvalidDependString
from portage.output import colorize
from portage.util import writemsg_level
from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
+from _emerge.Package import Package
class circular_dependency_handler(object):
@@ -61,8 +62,7 @@ class circular_dependency_handler(object):
node = nodes[0]
display_order.append(node)
tempgraph.remove(node)
- display_order.reverse()
- return display_order
+ return tuple(display_order)
def _prepare_circular_dep_message(self):
"""
@@ -113,9 +113,10 @@ class circular_dependency_handler(object):
parent_atoms = self.all_parent_atoms.get(pkg)
if priorities[-1].buildtime:
- dep = parent.metadata["DEPEND"]
+ dep = " ".join(parent._metadata[k]
+ for k in Package._buildtime_keys)
elif priorities[-1].runtime:
- dep = parent.metadata["RDEPEND"]
+ dep = parent._metadata["RDEPEND"]
for ppkg, atom in parent_atoms:
if ppkg == parent:
@@ -125,7 +126,7 @@ class circular_dependency_handler(object):
try:
affecting_use = extract_affecting_use(dep, parent_atom,
- eapi=parent.metadata["EAPI"])
+ eapi=parent.eapi)
except InvalidDependString:
if not parent.installed:
raise
@@ -144,7 +145,8 @@ class circular_dependency_handler(object):
#If any of the flags we're going to touch is in REQUIRED_USE, add all
#other flags in REQUIRED_USE to affecting_use, to not lose any solution.
required_use_flags = get_required_use_flags(
- parent.metadata.get("REQUIRED_USE", ""))
+ parent._metadata.get("REQUIRED_USE", ""),
+ eapi=parent.eapi)
if affecting_use.intersection(required_use_flags):
# TODO: Find out exactly which REQUIRED_USE flags are
@@ -186,9 +188,11 @@ class circular_dependency_handler(object):
parent_atom not in reduced_dep:
#We found an assignment that removes the atom from 'dep'.
#Make sure it doesn't conflict with REQUIRED_USE.
- required_use = parent.metadata.get("REQUIRED_USE", "")
+ required_use = parent._metadata.get("REQUIRED_USE", "")
- if check_required_use(required_use, current_use, parent.iuse.is_valid_flag):
+ if check_required_use(required_use, current_use,
+ parent.iuse.is_valid_flag,
+ eapi=parent.eapi):
use = self.depgraph._pkg_use_enabled(parent)
solution = set()
for flag, state in zip(affecting_use, use_state):
diff --git a/pym/_emerge/resolver/output.py b/pym/_emerge/resolver/output.py
index 61cfe9e98..5f550be0d 100644
--- a/pym/_emerge/resolver/output.py
+++ b/pym/_emerge/resolver/output.py
@@ -1,26 +1,31 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
"""Resolver output display operation.
"""
+from __future__ import unicode_literals
+
__all__ = (
- "Display",
+ "Display", "format_unmatched_atom",
)
import sys
+import portage
from portage import os
-from portage import _unicode_decode
from portage.dbapi.dep_expand import dep_expand
-from portage.dep import cpvequal, _repo_separator
+from portage.dep import cpvequal, _repo_separator, _slot_separator
+from portage.eapi import _get_eapi_attrs
from portage.exception import InvalidDependString, SignatureException
+from portage.package.ebuild.config import _get_feature_flags
from portage.package.ebuild._spawn_nofetch import spawn_nofetch
from portage.output import ( blue, colorize, create_color_func,
- darkblue, darkgreen, green, nc_len, red, teal, turquoise, yellow )
+ darkblue, darkgreen, green, nc_len, teal)
bad = create_color_func("BAD")
+from portage._sets.base import InternalPackageSet
from portage.util import writemsg_stdout
-from portage.versions import best, catpkgsplit
+from portage.versions import best, cpv_getversion
from _emerge.Blocker import Blocker
from _emerge.create_world_atom import create_world_atom
@@ -30,7 +35,9 @@ from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
if sys.hexversion >= 0x3000000:
basestring = str
-
+ _unicode = str
+else:
+ _unicode = unicode
class Display(object):
"""Formats and outputs the depgrah supplied it for merge/re-merge, etc.
@@ -54,11 +61,6 @@ class Display(object):
self.oldlp = None
self.myfetchlist = None
self.indent = ''
- self.is_new = True
- self.cur_use = None
- self.cur_iuse = None
- self.old_use = ''
- self.old_iuse = ''
self.use_expand = None
self.use_expand_hidden = None
self.pkgsettings = None
@@ -68,93 +70,54 @@ class Display(object):
self.blocker_style = None
- def _blockers(self, pkg, fetch_symbol):
- """Processes pkg for blockers and adds colorized strings to
+ def _blockers(self, blocker):
+ """Adds colorized strings to
self.print_msg and self.blockers
- @param pkg: _emerge.Package.Package instance
- @param fetch_symbol: string
+ @param blocker: _emerge.Blocker.Blocker instance
@rtype: bool
Modifies class globals: self.blocker_style, self.resolved,
self.print_msg
"""
- if pkg.satisfied:
+ if blocker.satisfied:
self.blocker_style = "PKG_BLOCKER_SATISFIED"
- addl = "%s %s " % (colorize(self.blocker_style, "b"),
- fetch_symbol)
+ addl = "%s " % (colorize(self.blocker_style, "b"),)
else:
self.blocker_style = "PKG_BLOCKER"
- addl = "%s %s " % (colorize(self.blocker_style, "B"),
- fetch_symbol)
+ addl = "%s " % (colorize(self.blocker_style, "B"),)
addl += self.empty_space_in_brackets()
self.resolved = dep_expand(
- str(pkg.atom).lstrip("!"), mydb=self.vardb,
+ _unicode(blocker.atom).lstrip("!"), mydb=self.vardb,
settings=self.pkgsettings
)
if self.conf.columns and self.conf.quiet:
- addl += " " + colorize(self.blocker_style, str(self.resolved))
+ addl += " " + colorize(self.blocker_style, _unicode(self.resolved))
else:
addl = "[%s %s] %s%s" % \
(colorize(self.blocker_style, "blocks"),
addl, self.indent,
- colorize(self.blocker_style, str(self.resolved))
+ colorize(self.blocker_style, _unicode(self.resolved))
)
- block_parents = self.conf.blocker_parents.parent_nodes(pkg)
- block_parents = set([pnode[2] for pnode in block_parents])
+ block_parents = self.conf.blocker_parents.parent_nodes(blocker)
+ block_parents = set(_unicode(pnode.cpv) for pnode in block_parents)
block_parents = ", ".join(block_parents)
- if self.resolved != pkg[2]:
+ if blocker.atom.blocker.overlap.forbid:
+ blocking_desc = "hard blocking"
+ else:
+ blocking_desc = "blocking"
+ if self.resolved != blocker.atom:
addl += colorize(self.blocker_style,
- " (\"%s\" is blocking %s)") % \
- (str(pkg.atom).lstrip("!"), block_parents)
+ " (\"%s\" is %s %s)" %
+ (_unicode(blocker.atom).lstrip("!"),
+ blocking_desc, block_parents))
else:
addl += colorize(self.blocker_style,
- " (is blocking %s)") % block_parents
- if isinstance(pkg, Blocker) and pkg.satisfied:
- if self.conf.columns:
- return True
- self.print_msg.append(addl)
+ " (is %s %s)" % (blocking_desc, block_parents))
+ if blocker.satisfied:
+ if not self.conf.columns:
+ self.print_msg.append(addl)
else:
self.blockers.append(addl)
- return False
-
-
- def _display_use(self, pkg, myoldbest, myinslotlist):
- """ USE flag display
-
- @param pkg: _emerge.Package.Package instance
- @param myoldbest: list of installed versions
- @param myinslotlist: list of installed slots
- Modifies class globals: self.forced_flags, self.cur_iuse,
- self.old_iuse, self.old_use, self.use_expand
- """
-
- self.forced_flags = set()
- self.forced_flags.update(pkg.use.force)
- self.forced_flags.update(pkg.use.mask)
-
- self.cur_use = [flag for flag in self.conf.pkg_use_enabled(pkg) \
- if flag in pkg.iuse.all]
- self.cur_iuse = sorted(pkg.iuse.all)
-
- if myoldbest and myinslotlist:
- previous_cpv = myoldbest[0].cpv
- else:
- previous_cpv = pkg.cpv
- if self.vardb.cpv_exists(previous_cpv):
- previous_pkg = self.vardb.match_pkgs('=' + previous_cpv)[0]
- self.old_iuse = sorted(previous_pkg.iuse.all)
- self.old_use = previous_pkg.use.enabled
- self.is_new = False
- else:
- self.old_iuse = []
- self.old_use = []
- self.is_new = True
-
- self.old_use = [flag for flag in self.old_use if flag in self.old_iuse]
-
- self.use_expand = pkg.use.expand
- self.use_expand_hidden = pkg.use.expand_hidden
- return
def include_mask_str(self):
return self.conf.verbosity > 1
@@ -219,13 +182,40 @@ class Display(object):
return ret
- def recheck_hidden(self, pkg):
- """ Prevent USE_EXPAND_HIDDEN flags from being hidden if they
- are the only thing that triggered reinstallation.
+ def _display_use(self, pkg, pkg_info):
+ """ USE flag display
@param pkg: _emerge.Package.Package instance
- Modifies self.use_expand_hidden, self.use_expand, self.verboseadd
+ @param pkg_info: PkgInfo instance
+ Modifies self.use_expand_hidden, self.use_expand, self.verboseadd,
+ self.forced_flags
"""
+
+ self.forced_flags = set()
+ self.forced_flags.update(pkg.use.force)
+ self.forced_flags.update(pkg.use.mask)
+
+ cur_use = [flag for flag in self.conf.pkg_use_enabled(pkg) \
+ if flag in pkg.iuse.all]
+ cur_iuse = sorted(pkg.iuse.all)
+
+ if pkg_info.previous_pkg is not None:
+ previous_pkg = pkg_info.previous_pkg
+ old_iuse = sorted(previous_pkg.iuse.all)
+ old_use = previous_pkg.use.enabled
+ is_new = False
+ else:
+ old_iuse = []
+ old_use = []
+ is_new = True
+
+ old_use = [flag for flag in old_use if flag in old_iuse]
+
+ self.use_expand = pkg.use.expand
+ self.use_expand_hidden = pkg.use.expand_hidden
+
+ # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
+ # are the only thing that triggered reinstallation.
reinst_flags_map = {}
reinstall_for_flags = self.conf.reinstall_nodes.get(pkg)
reinst_expand_map = None
@@ -246,13 +236,14 @@ class Display(object):
reinst_expand_map)
cur_iuse_map, iuse_forced = \
- self.map_to_use_expand(self.cur_iuse, forced_flags=True)
- cur_use_map = self.map_to_use_expand(self.cur_use)
- old_iuse_map = self.map_to_use_expand(self.old_iuse)
- old_use_map = self.map_to_use_expand(self.old_use)
+ self.map_to_use_expand(cur_iuse, forced_flags=True)
+ cur_use_map = self.map_to_use_expand(cur_use)
+ old_iuse_map = self.map_to_use_expand(old_iuse)
+ old_use_map = self.map_to_use_expand(old_use)
use_expand = sorted(self.use_expand)
use_expand.insert(0, "USE")
+ feature_flags = _get_feature_flags(_get_eapi_attrs(pkg.eapi))
for key in use_expand:
if key in self.use_expand_hidden:
@@ -260,7 +251,7 @@ class Display(object):
self.verboseadd += _create_use_string(self.conf, key.upper(),
cur_iuse_map[key], iuse_forced[key],
cur_use_map[key], old_iuse_map[key],
- old_use_map[key], self.is_new,
+ old_use_map[key], is_new, feature_flags,
reinst_flags_map.get(key))
return
@@ -318,13 +309,14 @@ class Display(object):
kwargs["myrepo"] = pkg.repo
myfilesdict = None
try:
- myfilesdict = db.getfetchsizes(pkg.cpv, **kwargs)
+ myfilesdict = db.getfetchsizes(pkg.cpv,
+ **portage._native_kwargs(kwargs))
except InvalidDependString as e:
# FIXME: validate SRC_URI earlier
depstr, = db.aux_get(pkg.cpv,
["SRC_URI"], myrepo=pkg.repo)
show_invalid_depstring_notice(
- pkg, depstr, str(e))
+ pkg, depstr, _unicode(e))
raise
except SignatureException:
# missing/invalid binary package SIZE signature
@@ -343,15 +335,13 @@ class Display(object):
if self.quiet_repo_display:
# overlay verbose
# assign index for a previous version in the same slot
- slot_matches = self.vardb.match(pkg.slot_atom)
- if slot_matches:
- repo_name_prev = self.vardb.aux_get(slot_matches[0],
- ["repository"])[0]
+ if pkg_info.previous_pkg is not None:
+ repo_name_prev = pkg_info.previous_pkg.repo
else:
repo_name_prev = None
# now use the data to generate output
- if pkg.installed or not slot_matches:
+ if pkg.installed or pkg_info.previous_pkg is None:
self.repoadd = self.conf.repo_display.repoStr(
pkg_info.repo_path_real)
else:
@@ -370,58 +360,86 @@ class Display(object):
repoadd_set.add(self.repoadd)
- def convert_myoldbest(self, pkg, myoldbest):
+ def convert_myoldbest(self, pkg, pkg_info):
"""converts and colorizes a version list to a string
@param pkg: _emerge.Package.Package instance
- @param myoldbest: list
+ @param pkg_info: dictionary
@rtype string.
"""
+ myoldbest = pkg_info.oldbest_list
# Convert myoldbest from a list to a string.
myoldbest_str = ""
if myoldbest:
versions = []
for pos, old_pkg in enumerate(myoldbest):
- key = catpkgsplit(old_pkg.cpv)[2] + "-" + catpkgsplit(old_pkg.cpv)[3]
+ key = old_pkg.version
if key[-3:] == "-r0":
key = key[:-3]
- if self.conf.verbosity == 3 and not self.quiet_repo_display and (self.verbose_main_repo_display or
- any(x.repo != self.portdb.repositories.mainRepo().name for x in myoldbest + [pkg])):
- key += _repo_separator + old_pkg.repo
+ if self.conf.verbosity == 3:
+ if pkg_info.attr_display.new_slot:
+ key += _slot_separator + old_pkg.slot
+ if old_pkg.slot != old_pkg.sub_slot:
+ key += "/" + old_pkg.sub_slot
+ elif any(x.slot + "/" + x.sub_slot != "0/0" for x in myoldbest + [pkg]):
+ key += _slot_separator + old_pkg.slot
+ if old_pkg.slot != old_pkg.sub_slot or \
+ old_pkg.slot == pkg.slot and old_pkg.sub_slot != pkg.sub_slot:
+ key += "/" + old_pkg.sub_slot
+ if not self.quiet_repo_display and (self.verbose_main_repo_display or
+ self.portdb.repositories.mainRepo() is None or
+ any(x.repo != self.portdb.repositories.mainRepo().name for x in myoldbest + [pkg])):
+ key += _repo_separator + old_pkg.repo
versions.append(key)
myoldbest_str = blue("["+", ".join(versions)+"]")
return myoldbest_str
+ def _append_slot(self, pkg_str, pkg, pkg_info):
+ """Potentially appends slot and subslot to package string.
- def set_interactive(self, pkg, ordered, addl):
- """Increments counters.interactive if the pkg is to
- be merged and it's metadata has interactive set True
+ @param pkg_str: string
+ @param pkg: _emerge.Package.Package instance
+ @param pkg_info: dictionary
+ @rtype string
+ """
+ if pkg_info.attr_display.new_slot:
+ pkg_str += _slot_separator + pkg_info.slot
+ if pkg_info.slot != pkg_info.sub_slot:
+ pkg_str += "/" + pkg_info.sub_slot
+ elif any(x.slot + "/" + x.sub_slot != "0/0" for x in pkg_info.oldbest_list + [pkg]):
+ pkg_str += _slot_separator + pkg_info.slot
+ if pkg_info.slot != pkg_info.sub_slot or \
+ any(x.slot == pkg_info.slot and x.sub_slot != pkg_info.sub_slot for x in pkg_info.oldbest_list):
+ pkg_str += "/" + pkg_info.sub_slot
+ return pkg_str
+
+ def _append_repository(self, pkg_str, pkg, pkg_info):
+ """Potentially appends repository to package string.
+ @param pkg_str: string
@param pkg: _emerge.Package.Package instance
- @param ordered: boolean
- @param addl: already defined string to add to
+ @param pkg_info: dictionary
+ @rtype string
"""
- if 'interactive' in pkg.metadata.properties and \
- pkg.operation == 'merge':
- addl = colorize("WARN", "I") + addl[1:]
- if ordered:
- self.counters.interactive += 1
- return addl
-
- def _set_non_root_columns(self, addl, pkg_info, pkg):
+ if not self.quiet_repo_display and (self.verbose_main_repo_display or
+ self.portdb.repositories.mainRepo() is None or
+ any(x.repo != self.portdb.repositories.mainRepo().name for x in pkg_info.oldbest_list + [pkg])):
+ pkg_str += _repo_separator + pkg.repo
+ return pkg_str
+
+ def _set_non_root_columns(self, pkg, pkg_info):
"""sets the indent level and formats the output
- @param addl: already defined string to add to
- @param pkg_info: dictionary
@param pkg: _emerge.Package.Package instance
+ @param pkg_info: dictionary
@rtype string
"""
ver_str = pkg_info.ver
- if self.conf.verbosity == 3 and not self.quiet_repo_display and (self.verbose_main_repo_display or
- any(x.repo != self.portdb.repositories.mainRepo().name for x in pkg_info.oldbest_list + [pkg])):
- ver_str += _repo_separator + pkg.repo
+ if self.conf.verbosity == 3:
+ ver_str = self._append_slot(ver_str, pkg, pkg_info)
+ ver_str = self._append_repository(ver_str, pkg, pkg_info)
if self.conf.quiet:
- myprint = addl + " " + self.indent + \
+ myprint = _unicode(pkg_info.attr_display) + " " + self.indent + \
self.pkgprint(pkg_info.cp, pkg_info)
myprint = myprint+darkblue(" "+ver_str)+" "
myprint = myprint+pkg_info.oldbest
@@ -434,7 +452,8 @@ class Display(object):
self.indent, self.pkgprint(pkg.cp, pkg_info))
else:
myprint = "[%s %s] %s%s" % \
- (self.pkgprint(pkg.type_name, pkg_info), addl,
+ (self.pkgprint(pkg.type_name, pkg_info),
+ pkg_info.attr_display,
self.indent, self.pkgprint(pkg.cp, pkg_info))
if (self.newlp-nc_len(myprint)) > 0:
myprint = myprint+(" "*(self.newlp-nc_len(myprint)))
@@ -446,21 +465,20 @@ class Display(object):
return myprint
- def _set_root_columns(self, addl, pkg_info, pkg):
+ def _set_root_columns(self, pkg, pkg_info):
"""sets the indent level and formats the output
- @param addl: already defined string to add to
- @param pkg_info: dictionary
@param pkg: _emerge.Package.Package instance
+ @param pkg_info: dictionary
@rtype string
Modifies self.verboseadd
"""
ver_str = pkg_info.ver
- if self.conf.verbosity == 3 and not self.quiet_repo_display and (self.verbose_main_repo_display or
- any(x.repo != self.portdb.repositories.mainRepo().name for x in pkg_info.oldbest_list + [pkg])):
- ver_str += _repo_separator + pkg.repo
+ if self.conf.verbosity == 3:
+ ver_str = self._append_slot(ver_str, pkg, pkg_info)
+ ver_str = self._append_repository(ver_str, pkg, pkg_info)
if self.conf.quiet:
- myprint = addl + " " + self.indent + \
+ myprint = _unicode(pkg_info.attr_display) + " " + self.indent + \
self.pkgprint(pkg_info.cp, pkg_info)
myprint = myprint+" "+green(ver_str)+" "
myprint = myprint+pkg_info.oldbest
@@ -473,7 +491,8 @@ class Display(object):
addl, self.indent, self.pkgprint(pkg.cp, pkg_info))
else:
myprint = "[%s %s] %s%s" % \
- (self.pkgprint(pkg.type_name, pkg_info), addl,
+ (self.pkgprint(pkg.type_name, pkg_info),
+ pkg_info.attr_display,
self.indent, self.pkgprint(pkg.cp, pkg_info))
if (self.newlp-nc_len(myprint)) > 0:
myprint = myprint+(" "*(self.newlp-nc_len(myprint)))
@@ -484,18 +503,17 @@ class Display(object):
return myprint
- def _set_no_columns(self, pkg, pkg_info, addl):
+ def _set_no_columns(self, pkg, pkg_info):
"""prints pkg info without column indentation.
@param pkg: _emerge.Package.Package instance
@param pkg_info: dictionary
- @param addl: the current text to add for the next line to output
@rtype the updated addl
"""
pkg_str = pkg.cpv
- if self.conf.verbosity == 3 and not self.quiet_repo_display and (self.verbose_main_repo_display or
- any(x.repo != self.portdb.repositories.mainRepo().name for x in pkg_info.oldbest_list + [pkg])):
- pkg_str += _repo_separator + pkg.repo
+ if self.conf.verbosity == 3:
+ pkg_str = self._append_slot(pkg_str, pkg, pkg_info)
+ pkg_str = self._append_repository(pkg_str, pkg, pkg_info)
if not pkg_info.merge:
addl = self.empty_space_in_brackets()
myprint = "[%s%s] %s%s %s" % \
@@ -506,46 +524,10 @@ class Display(object):
else:
myprint = "[%s %s] %s%s %s" % \
(self.pkgprint(pkg.type_name, pkg_info),
- addl, self.indent,
+ pkg_info.attr_display, self.indent,
self.pkgprint(pkg_str, pkg_info), pkg_info.oldbest)
return myprint
-
- def _insert_slot(self, pkg, pkg_info, myinslotlist):
- """Adds slot info to the message
-
- @return addl: formatted slot info
- @return myoldbest: installed version list
- Modifies self.counters.downgrades, self.counters.upgrades
- """
- addl = " " + pkg_info.fetch_symbol
- if not cpvequal(pkg.cpv,
- best([pkg.cpv] + [x.cpv for x in myinslotlist])):
- # Downgrade in slot
- addl += turquoise("U")+blue("D")
- if pkg_info.ordered:
- self.counters.downgrades += 1
- else:
- # Update in slot
- addl += turquoise("U") + " "
- if pkg_info.ordered:
- self.counters.upgrades += 1
- return addl
-
-
- def _new_slot(self, pkg, pkg_info):
- """New slot, mark it new.
-
- @return addl: formatted slot info
- @return myoldbest: installed version list
- Modifies self.counters.newslot
- """
- addl = " " + green("NS") + pkg_info.fetch_symbol + " "
- if pkg_info.ordered:
- self.counters.newslot += 1
- return addl
-
-
def print_messages(self, show_repos):
"""Performs the actual output printing of the pre-formatted
messages
@@ -581,9 +563,9 @@ class Display(object):
"""
writemsg_stdout('\n%s\n' % (self.counters,), noiselevel=-1)
if show_repos:
- # Use _unicode_decode() to force unicode format string so
+ # Use unicode_literals to force unicode format string so
# that RepoDisplay.__unicode__() is called in python2.
- writemsg_stdout(_unicode_decode("%s") % (self.conf.repo_display,),
+ writemsg_stdout("%s" % (self.conf.repo_display,),
noiselevel=-1)
return
@@ -635,15 +617,18 @@ class Display(object):
self.counters.restrict_fetch_satisfied
"""
pkg_info = PkgInfo()
+ pkg_info.cp = pkg.cp
+ pkg_info.ver = self.get_ver_str(pkg)
+ pkg_info.slot = pkg.slot
+ pkg_info.sub_slot = pkg.sub_slot
+ pkg_info.repo_name = pkg.repo
pkg_info.ordered = ordered
- pkg_info.fetch_symbol = " "
pkg_info.operation = pkg.operation
pkg_info.merge = ordered and pkg_info.operation == "merge"
if not pkg_info.merge and pkg_info.operation == "merge":
pkg_info.operation = "nomerge"
pkg_info.built = pkg.type_name != "ebuild"
pkg_info.ebuild_path = None
- pkg_info.repo_name = pkg.repo
if ordered:
if pkg_info.merge:
if pkg.type_name == "binary":
@@ -659,22 +644,30 @@ class Display(object):
pkg_info.repo_path_real = os.path.dirname(os.path.dirname(
os.path.dirname(pkg_info.ebuild_path)))
else:
- pkg_info.repo_path_real = \
- self.portdb.getRepositoryPath(pkg.metadata["repository"])
+ pkg_info.repo_path_real = self.portdb.getRepositoryPath(pkg.repo)
pkg_info.use = list(self.conf.pkg_use_enabled(pkg))
if not pkg.built and pkg.operation == 'merge' and \
- 'fetch' in pkg.metadata.restrict:
+ 'fetch' in pkg.restrict:
if pkg_info.ordered:
self.counters.restrict_fetch += 1
+ pkg_info.attr_display.fetch_restrict = True
if not self.portdb.getfetchsizes(pkg.cpv,
useflags=pkg_info.use, myrepo=pkg.repo):
- pkg_info.fetch_symbol = green("f")
+ pkg_info.attr_display.fetch_restrict_satisfied = True
if pkg_info.ordered:
self.counters.restrict_fetch_satisfied += 1
else:
- pkg_info.fetch_symbol = red("F")
if pkg_info.ebuild_path is not None:
self.restrict_fetch_list[pkg] = pkg_info
+
+ if self.vardb.cpv_exists(pkg.cpv):
+ # Do a cpv match first, in case the SLOT has changed.
+ pkg_info.previous_pkg = self.vardb.match_pkgs('=' + pkg.cpv)[0]
+ else:
+ slot_matches = self.vardb.match_pkgs(pkg.slot_atom)
+ if slot_matches:
+ pkg_info.previous_pkg = slot_matches[0]
+
return pkg_info
@@ -685,15 +678,14 @@ class Display(object):
@param pkg_info: dictionay
Modifies self.changelogs
"""
- inst_matches = self.vardb.match(pkg.slot_atom)
- if inst_matches:
+ if pkg_info.previous_pkg is not None:
ebuild_path_cl = pkg_info.ebuild_path
if ebuild_path_cl is None:
# binary package
ebuild_path_cl = self.portdb.findname(pkg.cpv, myrepo=pkg.repo)
if ebuild_path_cl is not None:
self.changelogs.extend(_calc_changelog(
- ebuild_path_cl, inst_matches[0], pkg.cpv))
+ ebuild_path_cl, pkg_info.previous_pkg, pkg.cpv))
return
@@ -733,12 +725,10 @@ class Display(object):
@param pkg: _emerge.Package.Package instance
@rtype string
"""
- ver_str = list(catpkgsplit(pkg.cpv)[2:])
- if ver_str[1] == "r0":
- ver_str[1] = ""
- else:
- ver_str[1] = "-" + ver_str[1]
- return ver_str[0]+ver_str[1]
+ ver_str = pkg.cpv.version
+ if ver_str.endswith("-r0"):
+ ver_str = ver_str[:-3]
+ return ver_str
def _get_installed_best(self, pkg, pkg_info):
@@ -757,9 +747,10 @@ class Display(object):
myinslotlist = None
installed_versions = self.vardb.match_pkgs(pkg.cp)
if self.vardb.cpv_exists(pkg.cpv):
- addl = " "+yellow("R")+pkg_info.fetch_symbol+" "
- installed_version = self.vardb.match_pkgs(pkg.cpv)[0]
- if not self.quiet_repo_display and installed_version.repo != pkg.repo:
+ pkg_info.attr_display.replace = True
+ installed_version = pkg_info.previous_pkg
+ if installed_version.slot != pkg.slot or installed_version.sub_slot != pkg.sub_slot or \
+ not self.quiet_repo_display and installed_version.repo != pkg.repo:
myoldbest = [installed_version]
if pkg_info.ordered:
if pkg_info.merge:
@@ -775,17 +766,31 @@ class Display(object):
myinslotlist = None
if myinslotlist:
myoldbest = myinslotlist[:]
- addl = self._insert_slot(pkg, pkg_info, myinslotlist)
+ if not cpvequal(pkg.cpv,
+ best([pkg.cpv] + [x.cpv for x in myinslotlist])):
+ # Downgrade in slot
+ pkg_info.attr_display.new_version = True
+ pkg_info.attr_display.downgrade = True
+ if pkg_info.ordered:
+ self.counters.downgrades += 1
+ else:
+ # Update in slot
+ pkg_info.attr_display.new_version = True
+ if pkg_info.ordered:
+ self.counters.upgrades += 1
else:
myoldbest = installed_versions
- addl = self._new_slot(pkg, pkg_info)
+ pkg_info.attr_display.new = True
+ pkg_info.attr_display.new_slot = True
+ if pkg_info.ordered:
+ self.counters.newslot += 1
if self.conf.changelog:
self.do_changelog(pkg, pkg_info)
else:
- addl = " " + green("N") + " " + pkg_info.fetch_symbol + " "
+ pkg_info.attr_display.new = True
if pkg_info.ordered:
self.counters.new += 1
- return addl, myoldbest, myinslotlist
+ return myoldbest, myinslotlist
def __call__(self, depgraph, mylist, favorites=None, verbosity=None):
@@ -806,7 +811,7 @@ class Display(object):
# files to fetch list - avoids counting a same file twice
# in size display (verbose mode)
self.myfetchlist = set()
-
+
self.quiet_repo_display = "--quiet-repo-display" in depgraph._frozen_config.myopts
if self.quiet_repo_display:
# Use this set to detect when all the "repoadd" strings are "[0]"
@@ -824,47 +829,52 @@ class Display(object):
self.indent = " " * depth
if isinstance(pkg, Blocker):
- if self._blockers(pkg, fetch_symbol=" "):
- continue
+ self._blockers(pkg)
else:
pkg_info = self.set_pkg_info(pkg, ordered)
- addl, pkg_info.oldbest_list, myinslotlist = \
+ pkg_info.oldbest_list, myinslotlist = \
self._get_installed_best(pkg, pkg_info)
+ if ordered and pkg_info.merge and \
+ not pkg_info.attr_display.new:
+ for arg, atom in depgraph._iter_atoms_for_pkg(pkg):
+ if arg.force_reinstall:
+ pkg_info.attr_display.force_reinstall = True
+ break
+
self.verboseadd = ""
if self.quiet_repo_display:
self.repoadd = None
- self._display_use(pkg, pkg_info.oldbest_list, myinslotlist)
- self.recheck_hidden(pkg)
+ self._display_use(pkg, pkg_info)
if self.conf.verbosity == 3:
if self.quiet_repo_display:
self.verbose_size(pkg, repoadd_set, pkg_info)
else:
self.verbose_size(pkg, None, pkg_info)
- pkg_info.cp = pkg.cp
- pkg_info.ver = self.get_ver_str(pkg)
-
self.oldlp = self.conf.columnwidth - 30
self.newlp = self.oldlp - 30
- pkg_info.oldbest = self.convert_myoldbest(pkg, pkg_info.oldbest_list)
+ pkg_info.oldbest = self.convert_myoldbest(pkg, pkg_info)
pkg_info.system, pkg_info.world = \
self.check_system_world(pkg)
- addl = self.set_interactive(pkg, pkg_info.ordered, addl)
+ if 'interactive' in pkg.properties and \
+ pkg.operation == 'merge':
+ pkg_info.attr_display.interactive = True
+ if ordered:
+ self.counters.interactive += 1
if self.include_mask_str():
- addl += self.gen_mask_str(pkg)
+ pkg_info.attr_display.mask = self.gen_mask_str(pkg)
if pkg.root_config.settings["ROOT"] != "/":
if pkg_info.oldbest:
pkg_info.oldbest += " "
if self.conf.columns:
- myprint = self._set_non_root_columns(
- addl, pkg_info, pkg)
+ myprint = self._set_non_root_columns(pkg, pkg_info)
else:
pkg_str = pkg.cpv
- if self.conf.verbosity == 3 and not self.quiet_repo_display and (self.verbose_main_repo_display or
- any(x.repo != self.portdb.repositories.mainRepo().name for x in pkg_info.oldbest_list + [pkg])):
- pkg_str += _repo_separator + pkg.repo
+ if self.conf.verbosity == 3:
+ pkg_str = self._append_slot(pkg_str, pkg, pkg_info)
+ pkg_str = self._append_repository(pkg_str, pkg, pkg_info)
if not pkg_info.merge:
addl = self.empty_space_in_brackets()
myprint = "[%s%s] " % (
@@ -873,17 +883,16 @@ class Display(object):
)
else:
myprint = "[%s %s] " % (
- self.pkgprint(pkg.type_name, pkg_info), addl)
+ self.pkgprint(pkg.type_name, pkg_info),
+ pkg_info.attr_display)
myprint += self.indent + \
self.pkgprint(pkg_str, pkg_info) + " " + \
pkg_info.oldbest + darkgreen("to " + pkg.root)
else:
if self.conf.columns:
- myprint = self._set_root_columns(
- addl, pkg_info, pkg)
+ myprint = self._set_root_columns(pkg, pkg_info)
else:
- myprint = self._set_no_columns(
- pkg, pkg_info, addl)
+ myprint = self._set_no_columns(pkg, pkg_info)
if self.conf.columns and pkg.operation == "uninstall":
continue
@@ -908,3 +917,105 @@ class Display(object):
self.print_changelog()
return os.EX_OK
+
+
+def format_unmatched_atom(pkg, atom, pkg_use_enabled):
+ """
+ Returns two strings. The first string contains the
+ 'atom' with parts of the atom colored, which 'pkg'
+ doesn't match. The second string has the same number
+ of characters as the first one, but consists of only
+ white space or ^. The ^ characters have the same position
+ as the colored parts of the first string.
+ """
+ # Things to check:
+ # 1. Version
+ # 2. cp
+ # 3. slot/sub_slot
+ # 4. repository
+ # 5. USE
+
+ highlight = set()
+
+ def perform_coloring():
+ atom_str = ""
+ marker_str = ""
+ for ii, x in enumerate(atom):
+ if ii in highlight:
+ atom_str += colorize("BAD", x)
+ marker_str += "^"
+ else:
+ atom_str += x
+ marker_str += " "
+ return atom_str, marker_str
+
+ if atom.cp != pkg.cp:
+ # Highlight the cp part only.
+ ii = atom.find(atom.cp)
+ highlight.update(range(ii, ii + len(atom.cp)))
+ return perform_coloring()
+
+ version_atom = atom.without_repo.without_slot.without_use
+ version_atom_set = InternalPackageSet(initial_atoms=(version_atom,))
+ highlight_version = not bool(version_atom_set.findAtomForPackage(pkg,
+ modified_use=pkg_use_enabled(pkg)))
+
+ highlight_slot = False
+ if (atom.slot and atom.slot != pkg.slot) or \
+ (atom.sub_slot and atom.sub_slot != pkg.sub_slot):
+ highlight_slot = True
+
+ if highlight_version:
+ op = atom.operator
+ ver = None
+ if atom.cp != atom.cpv:
+ ver = cpv_getversion(atom.cpv)
+
+ if op == "=*":
+ op = "="
+ ver += "*"
+
+ if op is not None:
+ highlight.update(range(len(op)))
+
+ if ver is not None:
+ start = atom.rfind(ver)
+ end = start + len(ver)
+ highlight.update(range(start, end))
+
+ if highlight_slot:
+ slot_str = ":" + atom.slot
+ if atom.sub_slot:
+ slot_str += "/" + atom.sub_slot
+ if atom.slot_operator:
+ slot_str += atom.slot_operator
+ start = atom.find(slot_str)
+ end = start + len(slot_str)
+ highlight.update(range(start, end))
+
+ highlight_use = set()
+ if atom.use:
+ use_atom = "%s[%s]" % (atom.cp, str(atom.use))
+ use_atom_set = InternalPackageSet(initial_atoms=(use_atom,))
+ if not use_atom_set.findAtomForPackage(pkg, \
+ modified_use=pkg_use_enabled(pkg)):
+ missing_iuse = pkg.iuse.get_missing_iuse(
+ atom.unevaluated_atom.use.required)
+ if missing_iuse:
+ highlight_use = set(missing_iuse)
+ else:
+ #Use conditionals not met.
+ violated_atom = atom.violated_conditionals(
+ pkg_use_enabled(pkg), pkg.iuse.is_valid_flag)
+ if violated_atom.use is not None:
+ highlight_use = set(violated_atom.use.enabled.union(
+ violated_atom.use.disabled))
+
+ if highlight_use:
+ ii = atom.find("[") + 1
+ for token in atom.use.tokens:
+ if token.lstrip("-!").rstrip("=?") in highlight_use:
+ highlight.update(range(ii, ii + len(token)))
+ ii += len(token) + 1
+
+ return perform_coloring()
diff --git a/pym/_emerge/resolver/output_helpers.py b/pym/_emerge/resolver/output_helpers.py
index e751dd8e4..58b26945a 100644
--- a/pym/_emerge/resolver/output_helpers.py
+++ b/pym/_emerge/resolver/output_helpers.py
@@ -1,9 +1,12 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
"""Contains private support functions for the Display class
in output.py
"""
+
+from __future__ import unicode_literals
+
__all__ = (
)
@@ -15,9 +18,10 @@ from portage import os
from portage import _encodings, _unicode_encode
from portage._sets.base import InternalPackageSet
from portage.output import (blue, bold, colorize, create_color_func,
- green, red, teal, yellow)
+ green, red, teal, turquoise, yellow)
bad = create_color_func("BAD")
from portage.util import shlex_split, writemsg
+from portage.util.SlotObject import SlotObject
from portage.versions import catpkgsplit
from _emerge.Blocker import Blocker
@@ -223,7 +227,7 @@ class _DisplayConfig(object):
self.reinstall_nodes = dynamic_config._reinstall_nodes
self.digraph = dynamic_config.digraph
self.blocker_uninstalls = dynamic_config._blocker_uninstalls
- self.slot_pkg_map = dynamic_config._slot_pkg_map
+ self.package_tracker = dynamic_config._package_tracker
self.set_nodes = dynamic_config._set_nodes
self.pkg_use_enabled = depgraph._pkg_use_enabled
@@ -245,10 +249,9 @@ def _format_size(mysize):
mystr=mystr[:mycount]+","+mystr[mycount:]
return mystr+" kB"
-
def _create_use_string(conf, name, cur_iuse, iuse_forced, cur_use,
old_iuse, old_use,
- is_new, reinst_flags):
+ is_new, feature_flags, reinst_flags):
if not conf.print_use_string:
return ""
@@ -266,6 +269,7 @@ def _create_use_string(conf, name, cur_iuse, iuse_forced, cur_use,
any_iuse = cur_iuse.union(old_iuse)
any_iuse = list(any_iuse)
any_iuse.sort()
+
for flag in any_iuse:
flag_str = None
isEnabled = False
@@ -299,7 +303,9 @@ def _create_use_string(conf, name, cur_iuse, iuse_forced, cur_use,
elif flag in old_use:
flag_str = green("-" + flag) + "*"
if flag_str:
- if flag in iuse_forced:
+ if flag in feature_flags:
+ flag_str = "{" + flag_str + "}"
+ elif flag in iuse_forced:
flag_str = "(" + flag_str + ")"
if isEnabled:
enabled.append(flag_str)
@@ -364,8 +370,9 @@ def _tree_display(conf, mylist):
# If the uninstall task did not need to be executed because
# of an upgrade, display Blocker -> Upgrade edges since the
# corresponding Blocker -> Uninstall edges will not be shown.
- upgrade_node = \
- conf.slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
+ upgrade_node = next(conf.package_tracker.match(
+ uninstall.root, uninstall.slot_atom), None)
+
if upgrade_node is not None and \
uninstall not in executed_uninstalls:
for blocker in uninstall_parents:
@@ -611,9 +618,10 @@ class PkgInfo(object):
information about the pkg being printed.
"""
- __slots__ = ("built", "cp", "ebuild_path", "fetch_symbol", "merge",
- "oldbest", "oldbest_list", "operation", "ordered",
- "repo_name", "repo_path_real", "system", "use", "ver", "world")
+ __slots__ = ("attr_display", "built", "cp",
+ "ebuild_path", "fetch_symbol", "merge",
+ "oldbest", "oldbest_list", "operation", "ordered", "previous_pkg",
+ "repo_name", "repo_path_real", "slot", "sub_slot", "system", "use", "ver", "world")
def __init__(self):
@@ -626,9 +634,74 @@ class PkgInfo(object):
self.oldbest_list = []
self.operation = ''
self.ordered = False
+ self.previous_pkg = None
self.repo_path_real = ''
self.repo_name = ''
+ self.slot = ''
+ self.sub_slot = ''
self.system = False
self.use = ''
self.ver = ''
self.world = False
+ self.attr_display = PkgAttrDisplay()
+
+class PkgAttrDisplay(SlotObject):
+
+ __slots__ = ("downgrade", "fetch_restrict", "fetch_restrict_satisfied",
+ "force_reinstall",
+ "interactive", "mask", "new", "new_slot", "new_version", "replace")
+
+ def __str__(self):
+ output = []
+
+ if self.interactive:
+ output.append(colorize("WARN", "I"))
+ else:
+ output.append(" ")
+
+ if self.new or self.force_reinstall:
+ if self.force_reinstall:
+ output.append(red("r"))
+ else:
+ output.append(green("N"))
+ else:
+ output.append(" ")
+
+ if self.new_slot or self.replace:
+ if self.replace:
+ output.append(yellow("R"))
+ else:
+ output.append(green("S"))
+ else:
+ output.append(" ")
+
+ if self.fetch_restrict or self.fetch_restrict_satisfied:
+ if self.fetch_restrict_satisfied:
+ output.append(green("f"))
+ else:
+ output.append(red("F"))
+ else:
+ output.append(" ")
+
+ if self.new_version:
+ output.append(turquoise("U"))
+ else:
+ output.append(" ")
+
+ if self.downgrade:
+ output.append(blue("D"))
+ else:
+ output.append(" ")
+
+ if self.mask is not None:
+ output.append(self.mask)
+
+ return "".join(output)
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(),
+ encoding=_encodings['content'])
diff --git a/pym/_emerge/resolver/package_tracker.py b/pym/_emerge/resolver/package_tracker.py
new file mode 100644
index 000000000..5982750a0
--- /dev/null
+++ b/pym/_emerge/resolver/package_tracker.py
@@ -0,0 +1,301 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import collections
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.dep:Atom,match_from_list',
+ 'portage.util:cmp_sort_key',
+ 'portage.versions:vercmp',
+)
+
+_PackageConflict = collections.namedtuple("_PackageConflict", ["root", "pkgs", "atom", "description"])
+
+class PackageConflict(_PackageConflict):
+ """
+ Class to track the reason for a conflict and the conflicting packages.
+ """
+ def __iter__(self):
+ return iter(self.pkgs)
+
+ def __contains__(self, pkg):
+ return pkg in self.pkgs
+
+ def __len__(self):
+ return len(self.pkgs)
+
+
+class PackageTracker(object):
+ """
+ This class tracks packages which are currently
+ installed and packages which have been pulled into
+ the dependency graph.
+
+ It automatically tracks conflicts between packages.
+
+ Possible conflicts:
+ 1) Packages that share the same SLOT.
+ 2) Packages with the same cpv.
+ Not yet implemented:
+ 3) Packages that block each other.
+ """
+
+ def __init__(self):
+ # Mapping from package keys to set of packages.
+ self._cp_pkg_map = collections.defaultdict(list)
+ self._cp_vdb_pkg_map = collections.defaultdict(list)
+ # List of package keys that may contain conflicts.
+ # The insetation order must be preserved.
+ self._multi_pkgs = []
+
+ # Cache for result of conflicts().
+ self._conflicts_cache = None
+
+ # Records for each pulled package which installed package
+ # are replaced.
+ self._replacing = collections.defaultdict(list)
+ # Records which pulled packages replace this package.
+ self._replaced_by = collections.defaultdict(list)
+
+ self._match_cache = collections.defaultdict(dict)
+
+ def add_pkg(self, pkg):
+ """
+ Add a new package to the tracker. Records conflicts as necessary.
+ """
+ cp_key = pkg.root, pkg.cp
+
+ if any(other is pkg for other in self._cp_pkg_map[cp_key]):
+ return
+
+ self._cp_pkg_map[cp_key].append(pkg)
+
+ if len(self._cp_pkg_map[cp_key]) > 1:
+ self._conflicts_cache = None
+ if len(self._cp_pkg_map[cp_key]) == 2:
+ self._multi_pkgs.append(cp_key)
+
+ self._replacing[pkg] = []
+ for installed in self._cp_vdb_pkg_map.get(cp_key, []):
+ if installed.slot_atom == pkg.slot_atom or \
+ installed.cpv == pkg.cpv:
+ self._replacing[pkg].append(installed)
+ self._replaced_by[installed].append(pkg)
+
+ self._match_cache.pop(cp_key, None)
+
+ def add_installed_pkg(self, installed):
+ """
+ Add an installed package during vdb load. These packages
+ are not returned by matched_pull as long as add_pkg hasn't
+ been called with them. They are only returned by match_final.
+ """
+ cp_key = installed.root, installed.cp
+ if any(other is installed for other in self._cp_vdb_pkg_map[cp_key]):
+ return
+
+ self._cp_vdb_pkg_map[cp_key].append(installed)
+
+ for pkg in self._cp_pkg_map.get(cp_key, []):
+ if installed.slot_atom == pkg.slot_atom or \
+ installed.cpv == pkg.cpv:
+ self._replacing[pkg].append(installed)
+ self._replaced_by[installed].append(pkg)
+
+ self._match_cache.pop(cp_key, None)
+
+ def remove_pkg(self, pkg):
+ """
+ Removes the package from the tracker.
+ Raises KeyError if it isn't present.
+ """
+ cp_key = pkg.root, pkg.cp
+ try:
+ self._cp_pkg_map.get(cp_key, []).remove(pkg)
+ except ValueError:
+ raise KeyError(pkg)
+
+ if self._cp_pkg_map[cp_key]:
+ self._conflicts_cache = None
+
+ if not self._cp_pkg_map[cp_key]:
+ del self._cp_pkg_map[cp_key]
+ elif len(self._cp_pkg_map[cp_key]) == 1:
+ self._multi_pkgs = [other_cp_key for other_cp_key in self._multi_pkgs \
+ if other_cp_key != cp_key]
+
+ for installed in self._replacing[pkg]:
+ self._replaced_by[installed].remove(pkg)
+ if not self._replaced_by[installed]:
+ del self._replaced_by[installed]
+ del self._replacing[pkg]
+
+ self._match_cache.pop(cp_key, None)
+
+ def discard_pkg(self, pkg):
+ """
+ Removes the package from the tracker.
+ Does not raises KeyError if it is not present.
+ """
+ try:
+ self.remove_pkg(pkg)
+ except KeyError:
+ pass
+
+ def match(self, root, atom, installed=True):
+ """
+ Iterates over the packages matching 'atom'.
+ If 'installed' is True, installed non-replaced
+ packages may also be returned.
+ """
+ cp_key = root, atom.cp
+ cache_key = root, atom, installed
+ try:
+ return iter(self._match_cache.get(cp_key, {})[cache_key])
+ except KeyError:
+ pass
+
+ candidates = self._cp_pkg_map.get(cp_key, [])[:]
+
+ if installed:
+ for installed in self._cp_vdb_pkg_map.get(cp_key, []):
+ if installed not in self._replaced_by:
+ candidates.append(installed)
+
+ ret = match_from_list(atom, candidates)
+ ret.sort(key=cmp_sort_key(lambda x, y: vercmp(x.version, y.version)))
+ self._match_cache[cp_key][cache_key] = ret
+
+ return iter(ret)
+
+ def conflicts(self):
+ """
+ Iterates over the curently existing conflicts.
+ """
+ if self._conflicts_cache is None:
+ self._conflicts_cache = []
+
+ for cp_key in self._multi_pkgs:
+
+ # Categorize packages according to cpv and slot.
+ slot_map = collections.defaultdict(list)
+ cpv_map = collections.defaultdict(list)
+ for pkg in self._cp_pkg_map[cp_key]:
+ slot_key = pkg.root, pkg.slot_atom
+ cpv_key = pkg.root, pkg.cpv
+ slot_map[slot_key].append(pkg)
+ cpv_map[cpv_key].append(pkg)
+
+ # Slot conflicts.
+ for slot_key in slot_map:
+ slot_pkgs = slot_map[slot_key]
+ if len(slot_pkgs) > 1:
+ self._conflicts_cache.append(PackageConflict(
+ description = "slot conflict",
+ root = slot_key[0],
+ atom = slot_key[1],
+ pkgs = tuple(slot_pkgs),
+ ))
+
+ # CPV conflicts.
+ for cpv_key in cpv_map:
+ cpv_pkgs = cpv_map[cpv_key]
+ if len(cpv_pkgs) > 1:
+ # Make sure this cpv conflict is not a slot conflict at the same time.
+ # Ignore it if it is.
+ slots = set(pkg.slot for pkg in cpv_pkgs)
+ if len(slots) > 1:
+ self._conflicts_cache.append(PackageConflict(
+ description = "cpv conflict",
+ root = cpv_key[0],
+ atom = cpv_key[1],
+ pkgs = tuple(cpv_pkgs),
+ ))
+
+ return iter(self._conflicts_cache)
+
+ def slot_conflicts(self):
+ """
+ Iterates over present slot conflicts.
+ This is only intended for consumers that haven't been
+ updated to deal with other kinds of conflicts.
+ This funcion should be removed once all consumers are updated.
+ """
+ return (conflict for conflict in self.conflicts() \
+ if conflict.description == "slot conflict")
+
+ def all_pkgs(self, root):
+ """
+ Iterates over all packages for the given root
+ present in the tracker, including the installed
+ packages.
+ """
+ for cp_key in self._cp_pkg_map:
+ if cp_key[0] == root:
+ for pkg in self._cp_pkg_map[cp_key]:
+ yield pkg
+
+ for cp_key in self._cp_vdb_pkg_map:
+ if cp_key[0] == root:
+ for installed in self._cp_vdb_pkg_map[cp_key]:
+ if installed not in self._replaced_by:
+ yield installed
+
+ def contains(self, pkg, installed=True):
+ """
+ Checks if the package is in the tracker.
+ If 'installed' is True, returns True for
+ non-replaced installed packages.
+ """
+ cp_key = pkg.root, pkg.cp
+ for other in self._cp_pkg_map.get(cp_key, []):
+ if other is pkg:
+ return True
+
+ if installed:
+ for installed in self._cp_vdb_pkg_map.get(cp_key, []):
+ if installed is pkg and \
+ installed not in self._replaced_by:
+ return True
+
+ return False
+
+ def __contains__(self, pkg):
+ """
+ Checks if the package is in the tracker.
+ Returns True for non-replaced installed packages.
+ """
+ return self.contains(pkg, installed=True)
+
+
+class PackageTrackerDbapiWrapper(object):
+ """
+ A wrpper class that provides parts of the legacy
+ dbapi interface. Remove it once all consumers have
+ died.
+ """
+ def __init__(self, root, package_tracker):
+ self._root = root
+ self._package_tracker = package_tracker
+
+ def cpv_inject(self, pkg):
+ self._package_tracker.add_pkg(pkg)
+
+ def match_pkgs(self, atom):
+ if not isinstance(atom, Atom):
+ atom = Atom(atom)
+ ret = sorted(self._package_tracker.match(self._root, atom),
+ key=cmp_sort_key(lambda x, y: vercmp(x.version, y.version)))
+ return ret
+
+ def __iter__(self):
+ return self._package_tracker.all_pkgs(self._root)
+
+ def match(self, atom, use_cache=None):
+ return self.match_pkgs(atom)
+
+ def cp_list(self, cp):
+ return self.match_pkgs(cp)
diff --git a/pym/_emerge/resolver/slot_collision.py b/pym/_emerge/resolver/slot_collision.py
index 783a6483d..baeab080a 100644
--- a/pym/_emerge/resolver/slot_collision.py
+++ b/pym/_emerge/resolver/slot_collision.py
@@ -1,10 +1,11 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-from __future__ import print_function
+from __future__ import print_function, unicode_literals
import sys
+from portage import _encodings, _unicode_encode
from _emerge.AtomArg import AtomArg
from _emerge.Package import Package
from _emerge.PackageArg import PackageArg
@@ -88,10 +89,11 @@ class slot_conflict_handler(object):
self.debug = "--debug" in self.myopts
if self.debug:
writemsg("Starting slot conflict handler\n", noiselevel=-1)
- #slot_collision_info is a dict mapping (slot atom, root) to set
- #of packages. The packages in the set all belong to the same
- #slot.
- self.slot_collision_info = depgraph._dynamic_config._slot_collision_info
+
+ # List of tuples, where each tuple represents a slot conflict.
+ self.all_conflicts = []
+ for conflict in depgraph._dynamic_config._package_tracker.slot_conflicts():
+ self.all_conflicts.append((conflict.root, conflict.atom, conflict.pkgs))
#A dict mapping packages to pairs of parent package
#and parent atom
@@ -108,8 +110,7 @@ class slot_conflict_handler(object):
all_conflict_atoms_by_slotatom = []
#fill conflict_pkgs, all_conflict_atoms_by_slotatom
- for (atom, root), pkgs \
- in self.slot_collision_info.items():
+ for root, atom, pkgs in self.all_conflicts:
conflict_pkgs.append(list(pkgs))
all_conflict_atoms_by_slotatom.append(set())
@@ -150,7 +151,7 @@ class slot_conflict_handler(object):
if self.debug:
writemsg("\nNew configuration:\n", noiselevel=-1)
for pkg in config:
- writemsg(" " + str(pkg) + "\n", noiselevel=-1)
+ writemsg(" %s\n" % (pkg,), noiselevel=-1)
writemsg("\n", noiselevel=-1)
new_solutions = self._check_configuration(config, all_conflict_atoms_by_slotatom, conflict_nodes)
@@ -225,10 +226,14 @@ class slot_conflict_handler(object):
new_change = {}
for pkg in solution:
for flag, state in solution[pkg].items():
+ real_flag = pkg.iuse.get_real_flag(flag)
+ if real_flag is None:
+ # Triggered by use-dep defaults.
+ continue
if state == "enabled" and flag not in _pkg_use_enabled(pkg):
- new_change.setdefault(pkg, {})[flag] = True
+ new_change.setdefault(pkg, {})[real_flag] = True
elif state == "disabled" and flag in _pkg_use_enabled(pkg):
- new_change.setdefault(pkg, {})[flag] = False
+ new_change.setdefault(pkg, {})[real_flag] = False
return new_change
def _prepare_conflict_msg_and_check_for_specificity(self):
@@ -236,6 +241,7 @@ class slot_conflict_handler(object):
Print all slot conflicts in a human readable way.
"""
_pkg_use_enabled = self.depgraph._pkg_use_enabled
+ verboseconflicts = "--verbose-conflicts" in self.myopts
msg = self.conflict_msg
indent = " "
msg.append("\n!!! Multiple package instances within a single " + \
@@ -243,16 +249,15 @@ class slot_conflict_handler(object):
msg.append("!!! into the dependency graph, resulting" + \
" in a slot conflict:\n\n")
- for (slot_atom, root), pkgs \
- in self.slot_collision_info.items():
- msg.append(str(slot_atom))
+ for root, slot_atom, pkgs in self.all_conflicts:
+ msg.append("%s" % (slot_atom,))
if root != self.depgraph._frozen_config._running_root.root:
msg.append(" for %s" % (root,))
msg.append("\n\n")
for pkg in pkgs:
msg.append(indent)
- msg.append(str(pkg))
+ msg.append("%s" % (pkg,))
parent_atoms = self.all_parents.get(pkg)
if parent_atoms:
#Create a list of collision reasons and map them to sets
@@ -268,12 +273,14 @@ class slot_conflict_handler(object):
for ppkg, atom in parent_atoms:
atom_set = InternalPackageSet(initial_atoms=(atom,))
atom_without_use_set = InternalPackageSet(initial_atoms=(atom.without_use,))
+ atom_without_use_and_slot_set = InternalPackageSet(initial_atoms=(
+ atom.without_use.without_slot,))
for other_pkg in pkgs:
if other_pkg == pkg:
continue
- if not atom_without_use_set.findAtomForPackage(other_pkg, \
+ if not atom_without_use_and_slot_set.findAtomForPackage(other_pkg, \
modified_use=_pkg_use_enabled(other_pkg)):
if atom.operator is not None:
# The version range does not match.
@@ -290,9 +297,11 @@ class slot_conflict_handler(object):
atoms.add((ppkg, atom, other_pkg))
num_all_specific_atoms += 1
collision_reasons[key] = atoms
- else:
- # The slot_abi does not match.
- key = ("sub-slot", atom.slot_abi)
+
+ elif not atom_without_use_set.findAtomForPackage(other_pkg, \
+ modified_use=_pkg_use_enabled(other_pkg)):
+ # The slot and/or sub_slot does not match.
+ key = ("slot", (atom.slot, atom.sub_slot, atom.slot_operator))
atoms = collision_reasons.get(key, set())
atoms.add((ppkg, atom, other_pkg))
num_all_specific_atoms += 1
@@ -312,11 +321,36 @@ class slot_conflict_handler(object):
#Use conditionals not met.
violated_atom = atom.violated_conditionals(_pkg_use_enabled(other_pkg), \
other_pkg.iuse.is_valid_flag)
+ if violated_atom.use is None:
+ # Something like bug #453400 caused the
+ # above findAtomForPackage call to
+ # return None unexpectedly.
+ msg = ("\n\n!!! BUG: Detected "
+ "USE dep match inconsistency:\n"
+ "\tppkg: %s\n"
+ "\tviolated_atom: %s\n"
+ "\tatom: %s unevaluated: %s\n"
+ "\tother_pkg: %s IUSE: %s USE: %s\n" %
+ (ppkg,
+ violated_atom,
+ atom,
+ atom.unevaluated_atom,
+ other_pkg,
+ sorted(other_pkg.iuse.all),
+ sorted(_pkg_use_enabled(other_pkg))))
+ writemsg(msg, noiselevel=-2)
+ raise AssertionError(
+ 'BUG: USE dep match inconsistency')
for flag in violated_atom.use.enabled.union(violated_atom.use.disabled):
atoms = collision_reasons.get(("use", flag), set())
atoms.add((ppkg, atom, other_pkg))
collision_reasons[("use", flag)] = atoms
num_all_specific_atoms += 1
+ elif isinstance(ppkg, AtomArg) and other_pkg.installed:
+ parent_atoms = collision_reasons.get(("AtomArg", None), set())
+ parent_atoms.add((ppkg, atom))
+ collision_reasons[("AtomArg", None)] = parent_atoms
+ num_all_specific_atoms += 1
msg.append(" pulled in by\n")
@@ -342,10 +376,16 @@ class slot_conflict_handler(object):
best_matches[atom.cp] = (ppkg, atom)
else:
best_matches[atom.cp] = (ppkg, atom)
- selected_for_display.update(best_matches.values())
- elif type == "sub-slot":
+ if verboseconflicts:
+ selected_for_display.add((ppkg, atom))
+ if not verboseconflicts:
+ selected_for_display.update(
+ best_matches.values())
+ elif type == "slot":
for ppkg, atom, other_pkg in parents:
selected_for_display.add((ppkg, atom))
+ if not verboseconflicts:
+ break
elif type == "use":
#Prefer atoms with unconditional use deps over, because it's
#not possible to change them on the parent, which means there
@@ -387,21 +427,50 @@ class slot_conflict_handler(object):
# If the list is long, people can simply
# use a pager.
selected_for_display.add((ppkg, atom))
+ elif type == "AtomArg":
+ for ppkg, atom in parents:
+ selected_for_display.add((ppkg, atom))
- def highlight_violations(atom, version, use=[]):
+ def highlight_violations(atom, version, use, slot_violated):
"""Colorize parts of an atom"""
- atom_str = str(atom)
+ atom_str = "%s" % (atom,)
+ colored_idx = set()
if version:
op = atom.operator
ver = None
if atom.cp != atom.cpv:
ver = cpv_getversion(atom.cpv)
slot = atom.slot
+ sub_slot = atom.sub_slot
+ slot_operator = atom.slot_operator
if op == "=*":
op = "="
ver += "*"
+ slot_str = ""
+ if slot:
+ slot_str = ":" + slot
+ if sub_slot:
+ slot_str += "/" + sub_slot
+ if slot_operator:
+ slot_str += slot_operator
+
+ # Compute color_idx before adding the color codes
+ # as these change the indices of the letters.
+ if op is not None:
+ colored_idx.update(range(len(op)))
+
+ if ver is not None:
+ start = atom_str.rfind(ver)
+ end = start + len(ver)
+ colored_idx.update(range(start, end))
+
+ if slot_str:
+ ii = atom_str.find(slot_str)
+ colored_idx.update(range(ii, ii + len(slot_str)))
+
+
if op is not None:
atom_str = atom_str.replace(op, colorize("BAD", op), 1)
@@ -411,25 +480,48 @@ class slot_conflict_handler(object):
atom_str = atom_str[:start] + \
colorize("BAD", ver) + \
atom_str[end:]
+
+ if slot_str:
+ atom_str = atom_str.replace(slot_str, colorize("BAD", slot_str), 1)
+
+ elif slot_violated:
+ slot = atom.slot
+ sub_slot = atom.sub_slot
+ slot_operator = atom.slot_operator
+
+ slot_str = ""
if slot:
- atom_str = atom_str.replace(":" + slot, colorize("BAD", ":" + slot))
+ slot_str = ":" + slot
+ if sub_slot:
+ slot_str += "/" + sub_slot
+ if slot_operator:
+ slot_str += slot_operator
+
+ if slot_str:
+ ii = atom_str.find(slot_str)
+ colored_idx.update(range(ii, ii + len(slot_str)))
+ atom_str = atom_str.replace(slot_str, colorize("BAD", slot_str), 1)
if use and atom.use.tokens:
use_part_start = atom_str.find("[")
use_part_end = atom_str.find("]")
new_tokens = []
+ # Compute start index in non-colored atom.
+ ii = str(atom).find("[") + 1
for token in atom.use.tokens:
if token.lstrip("-!").rstrip("=?") in use:
new_tokens.append(colorize("BAD", token))
+ colored_idx.update(range(ii, ii + len(token)))
else:
new_tokens.append(token)
+ ii += 1 + len(token)
atom_str = atom_str[:use_part_start] \
+ "[%s]" % (",".join(new_tokens),) + \
atom_str[use_part_end+1:]
- return atom_str
+ return atom_str, colored_idx
# Show unconditional use deps first, since those
# are more problematic than the conditional kind.
@@ -440,37 +532,49 @@ class slot_conflict_handler(object):
ordered_list.append(parent_atom)
for parent_atom in ordered_list:
parent, atom = parent_atom
- msg.append(2*indent)
- if isinstance(parent,
- (PackageArg, AtomArg)):
- # For PackageArg and AtomArg types, it's
+ if isinstance(parent, PackageArg):
+ # For PackageArg it's
# redundant to display the atom attribute.
- msg.append(str(parent))
+ msg.append("%s\n" % (parent,))
+ elif isinstance(parent, AtomArg):
+ msg.append(2*indent)
+ msg.append("%s (Argument)\n" % (atom,))
else:
# Display the specific atom from SetArg or
# Package types.
version_violated = False
- sub_slot_violated = False
+ slot_violated = False
use = []
for (type, sub_type), parents in collision_reasons.items():
for x in parents:
if parent == x[0] and atom == x[1]:
if type == "version":
version_violated = True
- elif type == "sub-slot":
- sub_slot_violated = True
+ elif type == "slot":
+ slot_violated = True
elif type == "use":
use.append(sub_type)
break
- atom_str = highlight_violations(atom.unevaluated_atom, version_violated, use)
+ atom_str, colored_idx = highlight_violations(atom.unevaluated_atom,
+ version_violated, use, slot_violated)
- if version_violated or sub_slot_violated:
+ if version_violated or slot_violated:
self.is_a_version_conflict = True
- msg.append("%s required by %s" % (atom_str, parent))
- msg.append("\n")
-
+ cur_line = "%s required by %s\n" % (atom_str, parent)
+ marker_line = ""
+ for ii in range(len(cur_line)):
+ if ii in colored_idx:
+ marker_line += "^"
+ else:
+ marker_line += " "
+ marker_line += "\n"
+ msg.append(2*indent)
+ msg.append(cur_line)
+ msg.append(2*indent)
+ msg.append(marker_line)
+
if not selected_for_display:
msg.append(2*indent)
msg.append("(no parents that aren't satisfied by other packages in this slot)\n")
@@ -490,7 +594,6 @@ class slot_conflict_handler(object):
def get_explanation(self):
msg = ""
- _pkg_use_enabled = self.depgraph._pkg_use_enabled
if self.is_a_version_conflict:
return None
@@ -506,13 +609,13 @@ class slot_conflict_handler(object):
return None
if len(solutions)==1:
- if len(self.slot_collision_info)==1:
+ if len(self.all_conflicts) == 1:
msg += "It might be possible to solve this slot collision\n"
else:
msg += "It might be possible to solve these slot collisions\n"
msg += "by applying all of the following changes:\n"
else:
- if len(self.slot_collision_info)==1:
+ if len(self.all_conflicts) == 1:
msg += "It might be possible to solve this slot collision\n"
else:
msg += "It might be possible to solve these slot collisions\n"
@@ -553,8 +656,7 @@ class slot_conflict_handler(object):
if not pkg.installed:
continue
- for (atom, root), pkgs \
- in self.slot_collision_info.items():
+ for root, atom, pkgs in self.all_conflicts:
if pkg not in pkgs:
continue
for other_pkg in pkgs:
@@ -563,7 +665,9 @@ class slot_conflict_handler(object):
if pkg.iuse.all.symmetric_difference(other_pkg.iuse.all) \
or _pkg_use_enabled(pkg).symmetric_difference(_pkg_use_enabled(other_pkg)):
if self.debug:
- writemsg(str(pkg) + " has pending USE changes. Rejecting configuration.\n", noiselevel=-1)
+ writemsg(("%s has pending USE changes. "
+ "Rejecting configuration.\n") % (pkg,),
+ noiselevel=-1)
return False
#A list of dicts. Keeps one dict per slot conflict. [ { flag1: "enabled" }, { flag2: "disabled" } ]
@@ -586,16 +690,18 @@ class slot_conflict_handler(object):
if not i.findAtomForPackage(pkg, modified_use=_pkg_use_enabled(pkg)):
#Version range does not match.
if self.debug:
- writemsg(str(pkg) + " does not satify all version requirements." + \
- " Rejecting configuration.\n", noiselevel=-1)
+ writemsg(("%s does not satify all version "
+ "requirements. Rejecting configuration.\n") %
+ (pkg,), noiselevel=-1)
return False
if not pkg.iuse.is_valid_flag(atom.unevaluated_atom.use.required):
#Missing IUSE.
#FIXME: This needs to support use dep defaults.
if self.debug:
- writemsg(str(pkg) + " misses needed flags from IUSE." + \
- " Rejecting configuration.\n", noiselevel=-1)
+ writemsg(("%s misses needed flags from IUSE."
+ " Rejecting configuration.\n") % (pkg,),
+ noiselevel=-1)
return False
if not isinstance(ppkg, Package) or ppkg.installed:
@@ -620,8 +726,9 @@ class slot_conflict_handler(object):
#We can't change USE of an installed package (only of an ebuild, but that is already
#part of the conflict, isn't it?
if self.debug:
- writemsg(str(pkg) + ": installed package would need USE changes." + \
- " Rejecting configuration.\n", noiselevel=-1)
+ writemsg(("%s: installed package would need USE"
+ " changes. Rejecting configuration.\n") % (pkg,),
+ noiselevel=-1)
return False
#Compute the required USE changes. A flag can be forced to "enabled" or "disabled",
@@ -675,7 +782,7 @@ class slot_conflict_handler(object):
if self.debug:
writemsg("All involved flags:\n", noiselevel=-1)
for id, involved_flags in enumerate(all_involved_flags):
- writemsg(" " + str(config[id]) + "\n", noiselevel=-1)
+ writemsg(" %s\n" % (config[id],), noiselevel=-1)
for flag, state in involved_flags.items():
writemsg(" " + flag + ": " + state + "\n", noiselevel=-1)
@@ -758,7 +865,7 @@ class slot_conflict_handler(object):
inner_first = False
else:
msg += ", "
- msg += flag + ": " + str(state)
+ msg += flag + ": %s" % (state,)
msg += "}"
msg += "]\n"
writemsg(msg, noiselevel=-1)
@@ -862,8 +969,9 @@ class slot_conflict_handler(object):
#We managed to create a new problem with our changes.
is_valid_solution = False
if self.debug:
- writemsg("new conflict introduced: " + str(pkg) + \
- " does not match " + new_atom + " from " + str(ppkg) + "\n", noiselevel=-1)
+ writemsg(("new conflict introduced: %s"
+ " does not match %s from %s\n") %
+ (pkg, new_atom, ppkg), noiselevel=-1)
break
if not is_valid_solution:
@@ -871,7 +979,7 @@ class slot_conflict_handler(object):
#Make sure the changes don't violate REQUIRED_USE
for pkg in required_changes:
- required_use = pkg.metadata.get("REQUIRED_USE")
+ required_use = pkg._metadata.get("REQUIRED_USE")
if not required_use:
continue
@@ -950,8 +1058,16 @@ class _solution_candidate_generator(object):
else:
return self.value == other.value
def __str__(self):
- return str(self.value)
-
+ return "%s" % (self.value,)
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(),
+ encoding=_encodings['content'], errors='backslashreplace')
+
def __init__(self, all_involved_flags):
#A copy of all_involved_flags with all "cond" values
#replaced by a _value_helper object.
diff --git a/pym/_emerge/search.py b/pym/_emerge/search.py
index 5abc8a00c..bd74fb7b1 100644
--- a/pym/_emerge/search.py
+++ b/pym/_emerge/search.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -69,7 +69,7 @@ class search(object):
return db.aux_get(*args, **kwargs)
except KeyError:
pass
- raise
+ raise KeyError(args[0])
def _findname(self, *args, **kwargs):
for db in self._dbs:
diff --git a/pym/_emerge/stdout_spinner.py b/pym/_emerge/stdout_spinner.py
index 5ad31f001..670686adf 100644
--- a/pym/_emerge/stdout_spinner.py
+++ b/pym/_emerge/stdout_spinner.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2009 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import platform
@@ -53,17 +53,18 @@ class stdout_spinner(object):
def update_basic(self):
self.spinpos = (self.spinpos + 1) % 500
if self._return_early():
- return
+ return True
if (self.spinpos % 100) == 0:
if self.spinpos == 0:
sys.stdout.write(". ")
else:
sys.stdout.write(".")
sys.stdout.flush()
+ return True
def update_scroll(self):
if self._return_early():
- return
+ return True
if(self.spinpos >= len(self.scroll_sequence)):
sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
@@ -71,13 +72,15 @@ class stdout_spinner(object):
sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
sys.stdout.flush()
self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
+ return True
def update_twirl(self):
self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
if self._return_early():
- return
+ return True
sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
sys.stdout.flush()
+ return True
def update_quiet(self):
- return
+ return True
diff --git a/pym/_emerge/unmerge.py b/pym/_emerge/unmerge.py
index b46b89cb8..b04f8f376 100644
--- a/pym/_emerge/unmerge.py
+++ b/pym/_emerge/unmerge.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -457,9 +457,6 @@ def _unmerge_display(root_config, myopts, unmerge_action,
writemsg_level(colorize("WARN","!!! Unmerging it may " + \
"be damaging to your system.\n\n"),
level=logging.WARNING, noiselevel=-1)
- if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
- countdown(int(settings["EMERGE_WARNING_DELAY"]),
- colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
if not quiet:
writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
else:
diff --git a/pym/portage/__init__.py b/pym/portage/__init__.py
index 46bdc961c..fdbc4a8c2 100644
--- a/pym/portage/__init__.py
+++ b/pym/portage/__init__.py
@@ -1,8 +1,9 @@
-# portage.py -- core Portage functionality
-# Copyright 1998-2011 Gentoo Foundation
+# Copyright 1998-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-VERSION="HEAD"
+from __future__ import unicode_literals
+
+VERSION = "HEAD"
# ===========================================================================
# START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
@@ -16,14 +17,6 @@ try:
errno.ESTALE = -1
import re
import types
-
- # Try the commands module first, since this allows us to eliminate
- # the subprocess module from the baseline imports under python2.
- try:
- from commands import getstatusoutput as subprocess_getstatusoutput
- except ImportError:
- from subprocess import getstatusoutput as subprocess_getstatusoutput
-
import platform
# Temporarily delete these imports, to ensure that only the
@@ -41,7 +34,7 @@ except ImportError as e:
sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
- sys.stderr.write(" "+str(e)+"\n\n");
+ sys.stderr.write(" "+str(e)+"\n\n")
raise
try:
@@ -70,6 +63,7 @@ try:
'match_from_list,match_to_list',
'portage.dep.dep_check:dep_check,dep_eval,dep_wordreduce,dep_zapdeps',
'portage.eclass_cache',
+ 'portage.elog',
'portage.exception',
'portage.getbinpkg',
'portage.locks',
@@ -114,6 +108,7 @@ try:
'cpv_getkey@getCPFromCPV,endversion_keys,' + \
'suffix_value@endversion,pkgcmp,pkgsplit,vercmp,ververify',
'portage.xpak',
+ 'subprocess',
'time',
)
@@ -145,6 +140,7 @@ except ImportError as e:
raise
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
long = int
@@ -178,6 +174,15 @@ _encodings = {
}
if sys.hexversion >= 0x3000000:
+
+ def _decode_argv(argv):
+ # With Python 3, the surrogateescape encoding error handler makes it
+ # possible to access the original argv bytes, which can be useful
+ # if their actual encoding does no match the filesystem encoding.
+ fs_encoding = sys.getfilesystemencoding()
+ return [_unicode_decode(x.encode(fs_encoding, 'surrogateescape'))
+ for x in argv]
+
def _unicode_encode(s, encoding=_encodings['content'], errors='backslashreplace'):
if isinstance(s, str):
s = s.encode(encoding, errors)
@@ -187,7 +192,13 @@ if sys.hexversion >= 0x3000000:
if isinstance(s, bytes):
s = str(s, encoding=encoding, errors=errors)
return s
+
+ _native_string = _unicode_decode
else:
+
+ def _decode_argv(argv):
+ return [_unicode_decode(x) for x in argv]
+
def _unicode_encode(s, encoding=_encodings['content'], errors='backslashreplace'):
if isinstance(s, unicode):
s = s.encode(encoding, errors)
@@ -198,6 +209,17 @@ else:
s = unicode(s, encoding=encoding, errors=errors)
return s
+ _native_string = _unicode_encode
+
+if sys.hexversion >= 0x20605f0:
+ def _native_kwargs(kwargs):
+ return kwargs
+else:
+ # Avoid "TypeError: keywords must be strings" issue triggered
+ # by unicode_literals: http://bugs.python.org/issue4978
+ def _native_kwargs(kwargs):
+ return dict((_native_string(k), v) for k, v in kwargs.iteritems())
+
class _unicode_func_wrapper(object):
"""
Wraps a function, converts arguments from unicode to bytes,
@@ -215,7 +237,7 @@ class _unicode_func_wrapper(object):
self._func = func
self._encoding = encoding
- def __call__(self, *args, **kwargs):
+ def _process_args(self, args, kwargs):
encoding = self._encoding
wrapped_args = [_unicode_encode(x, encoding=encoding, errors='strict')
@@ -227,6 +249,13 @@ class _unicode_func_wrapper(object):
else:
wrapped_kwargs = {}
+ return (wrapped_args, wrapped_kwargs)
+
+ def __call__(self, *args, **kwargs):
+
+ encoding = self._encoding
+ wrapped_args, wrapped_kwargs = self._process_args(args, kwargs)
+
rval = self._func(*wrapped_args, **wrapped_kwargs)
# Don't use isinstance() since we don't want to convert subclasses
@@ -294,12 +323,17 @@ class _unicode_module_wrapper(object):
import os as _os
_os_overrides = {
id(_os.fdopen) : _os.fdopen,
- id(_os.mkfifo) : _os.mkfifo,
id(_os.popen) : _os.popen,
id(_os.read) : _os.read,
id(_os.system) : _os.system,
}
+
+try:
+ _os_overrides[id(_os.mkfifo)] = _os.mkfifo
+except AttributeError:
+ pass # Jython
+
if hasattr(_os, 'statvfs'):
_os_overrides[id(_os.statvfs)] = _os.statvfs
@@ -334,6 +368,25 @@ except (ImportError, OSError) as e:
_python_interpreter = os.path.realpath(sys.executable)
_bin_path = PORTAGE_BIN_PATH
_pym_path = PORTAGE_PYM_PATH
+_not_installed = os.path.isfile(os.path.join(PORTAGE_BASE_PATH, ".portage_not_installed"))
+
+# Api consumers included in portage should set this to True.
+_internal_caller = False
+
+_sync_mode = False
+
+def _get_stdin():
+ """
+ Buggy code in python's multiprocessing/process.py closes sys.stdin
+ and reassigns it to open(os.devnull), but fails to update the
+ corresponding __stdin__ reference. So, detect that case and handle
+ it appropriately.
+ """
+ if not sys.__stdin__.closed:
+ return sys.__stdin__
+ return sys.stdin
+
+_shell_quote_re = re.compile(r"[\s><=*\\\"'$`]")
def _shell_quote(s):
"""
@@ -341,6 +394,8 @@ def _shell_quote(s):
escape any backslashes, double-quotes, dollar signs, or
backquotes in the string.
"""
+ if _shell_quote_re.search(s) is None:
+ return s
for letter in "\\\"$`":
if letter in s:
s = s.replace(letter, "\\" + letter)
@@ -354,8 +409,27 @@ if platform.system() in ('FreeBSD',):
@classmethod
def chflags(cls, path, flags, opts=""):
- cmd = 'chflags %s %o %s' % (opts, flags, _shell_quote(path))
- status, output = subprocess_getstatusoutput(cmd)
+ cmd = ['chflags']
+ if opts:
+ cmd.append(opts)
+ cmd.append('%o' % (flags,))
+ cmd.append(path)
+
+ if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000:
+ # Python 3.1 _execvp throws TypeError for non-absolute executable
+ # path passed as bytes (see http://bugs.python.org/issue8513).
+ fullname = process.find_binary(cmd[0])
+ if fullname is None:
+ raise exception.CommandNotFound(cmd[0])
+ cmd[0] = fullname
+
+ encoding = _encodings['fs']
+ cmd = [_unicode_encode(x, encoding=encoding, errors='strict')
+ for x in cmd]
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ output = proc.communicate()[0]
+ status = proc.wait()
if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
return
# Try to generate an ENOENT error if appropriate.
@@ -368,6 +442,7 @@ if platform.system() in ('FreeBSD',):
raise portage.exception.CommandNotFound('chflags')
# Now we're not sure exactly why it failed or what
# the real errno was, so just report EPERM.
+ output = _unicode_decode(output, encoding=encoding)
e = OSError(errno.EPERM, output)
e.errno = errno.EPERM
e.filename = path
@@ -396,20 +471,29 @@ def getcwd():
getcwd()
def abssymlink(symlink, target=None):
- "This reads symlinks, resolving the relative symlinks, and returning the absolute."
+ """
+ This reads symlinks, resolving the relative symlinks,
+ and returning the absolute.
+ @param symlink: path of symlink (must be absolute)
+ @param target: the target of the symlink (as returned
+ by readlink)
+ @rtype: str
+ @return: the absolute path of the symlink target
+ """
if target is not None:
mylink = target
else:
mylink = os.readlink(symlink)
if mylink[0] != '/':
- mydir=os.path.dirname(symlink)
- mylink=mydir+"/"+mylink
+ mydir = os.path.dirname(symlink)
+ mylink = mydir + "/" + mylink
return os.path.normpath(mylink)
_doebuild_manifest_exempt_depend = 0
-_testing_eapis = frozenset(["4-python", "4-slot-abi"])
-_deprecated_eapis = frozenset(["4_pre1", "3_pre2", "3_pre1"])
+_testing_eapis = frozenset(["4-python", "4-slot-abi", "5-progress", "5-hdepend"])
+_deprecated_eapis = frozenset(["4_pre1", "3_pre2", "3_pre1", "5_pre1", "5_pre2"])
+_supported_eapis = frozenset([str(x) for x in range(portage.const.EAPI)] + list(_testing_eapis) + list(_deprecated_eapis))
def _eapi_is_deprecated(eapi):
return eapi in _deprecated_eapis
@@ -466,13 +550,13 @@ auxdbkeys = (
'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
'KEYWORDS', 'INHERITED', 'IUSE', 'REQUIRED_USE',
'PDEPEND', 'PROVIDE', 'EAPI',
- 'PROPERTIES', 'DEFINED_PHASES', 'UNUSED_05', 'UNUSED_04',
+ 'PROPERTIES', 'DEFINED_PHASES', 'HDEPEND', 'UNUSED_04',
'UNUSED_03', 'UNUSED_02', 'UNUSED_01',
)
-auxdbkeylen=len(auxdbkeys)
+auxdbkeylen = len(auxdbkeys)
def portageexit():
- close_portdbapi_caches()
+ pass
class _trees_dict(dict):
__slots__ = ('_running_eroot', '_target_eroot',)
@@ -483,13 +567,6 @@ class _trees_dict(dict):
def create_trees(config_root=None, target_root=None, trees=None, env=None,
eprefix=None):
- if trees is not None:
- # clean up any existing portdbapi instances
- for myroot in trees:
- portdb = trees[myroot]["porttree"].dbapi
- portdb.close_caches()
- portdbapi.portdbapi_instances.remove(portdb)
- del trees[myroot]["porttree"], myroot, portdb
if trees is None:
trees = _trees_dict()
@@ -507,7 +584,7 @@ def create_trees(config_root=None, target_root=None, trees=None, env=None,
trees._target_eroot = settings['EROOT']
myroots = [(settings['EROOT'], settings)]
- if settings["ROOT"] == "/":
+ if settings["ROOT"] == "/" and settings["EPREFIX"] == const.EPREFIX:
trees._running_eroot = trees._target_eroot
else:
@@ -515,15 +592,15 @@ def create_trees(config_root=None, target_root=None, trees=None, env=None,
# environment to apply to the config that's associated
# with ROOT != "/", so pass a nearly empty dict for the env parameter.
clean_env = {}
- for k in ('PATH', 'PORTAGE_GRPNAME', 'PORTAGE_USERNAME',
- 'SSH_AGENT_PID', 'SSH_AUTH_SOCK', 'TERM',
+ for k in ('PATH', 'PORTAGE_GRPNAME', 'PORTAGE_REPOSITORIES', 'PORTAGE_USERNAME',
+ 'PYTHONPATH', 'SSH_AGENT_PID', 'SSH_AUTH_SOCK', 'TERM',
'ftp_proxy', 'http_proxy', 'no_proxy',
'__PORTAGE_TEST_HARDLINK_LOCKS'):
v = settings.get(k)
if v is not None:
clean_env[k] = v
settings = config(config_root=None, target_root="/",
- env=clean_env, eprefix=eprefix)
+ env=clean_env, eprefix=None)
settings.lock()
trees._running_eroot = settings['EROOT']
myroots.append((settings['EROOT'], settings))
@@ -547,11 +624,17 @@ if VERSION == 'HEAD':
if VERSION is not self:
return VERSION
if os.path.isdir(os.path.join(PORTAGE_BASE_PATH, '.git')):
- status, output = subprocess_getstatusoutput((
- "cd %s ; git describe --tags || exit $? ; " + \
+ encoding = _encodings['fs']
+ cmd = [BASH_BINARY, "-c", ("cd %s ; git describe --tags || exit $? ; " + \
"if [ -n \"`git diff-index --name-only --diff-filter=M HEAD`\" ] ; " + \
"then echo modified ; git rev-list --format=%%ct -n 1 HEAD ; fi ; " + \
- "exit 0") % _shell_quote(PORTAGE_BASE_PATH))
+ "exit 0") % _shell_quote(PORTAGE_BASE_PATH)]
+ cmd = [_unicode_encode(x, encoding=encoding, errors='strict')
+ for x in cmd]
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ output = _unicode_decode(proc.communicate()[0], encoding=encoding)
+ status = proc.wait()
if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
output_lines = output.splitlines()
if output_lines:
@@ -561,7 +644,7 @@ if VERSION == 'HEAD':
patchlevel = False
if len(version_split) > 1:
patchlevel = True
- VERSION = "%s_p%s" %(VERSION, version_split[1])
+ VERSION = "%s_p%s" % (VERSION, version_split[1])
if len(output_lines) > 1 and output_lines[1] == 'modified':
head_timestamp = None
if len(output_lines) > 3:
@@ -580,34 +663,17 @@ if VERSION == 'HEAD':
return VERSION
VERSION = _LazyVersion()
-if "_legacy_globals_constructed" in globals():
- # The module has been reloaded, so perform any relevant cleanup
- # and prevent memory leaks.
- if "db" in _legacy_globals_constructed:
- try:
- db
- except NameError:
- pass
- else:
- if isinstance(db, dict) and db:
- for _x in db.values():
- try:
- if "porttree" in _x.lazy_items:
- continue
- except (AttributeError, TypeError):
- continue
- try:
- _x = _x["porttree"].dbapi
- except (AttributeError, KeyError):
- continue
- if not isinstance(_x, portdbapi):
- continue
- _x.close_caches()
- try:
- portdbapi.portdbapi_instances.remove(_x)
- except ValueError:
- pass
- del _x
+_legacy_global_var_names = ("archlist", "db", "features",
+ "groups", "mtimedb", "mtimedbfile", "pkglines",
+ "portdb", "profiledir", "root", "selinux_enabled",
+ "settings", "thirdpartymirrors")
+
+def _reset_legacy_globals():
+
+ global _legacy_globals_constructed
+ _legacy_globals_constructed = set()
+ for k in _legacy_global_var_names:
+ globals()[k] = _LegacyGlobalProxy(k)
class _LegacyGlobalProxy(proxy.objectproxy.ObjectProxy):
@@ -622,16 +688,7 @@ class _LegacyGlobalProxy(proxy.objectproxy.ObjectProxy):
from portage._legacy_globals import _get_legacy_global
return _get_legacy_global(name)
-_legacy_global_var_names = ("archlist", "db", "features",
- "groups", "mtimedb", "mtimedbfile", "pkglines",
- "portdb", "profiledir", "root", "selinux_enabled",
- "settings", "thirdpartymirrors")
-
-for k in _legacy_global_var_names:
- globals()[k] = _LegacyGlobalProxy(k)
-del k
-
-_legacy_globals_constructed = set()
+_reset_legacy_globals()
def _disable_legacy_globals():
"""
diff --git a/pym/portage/_emirrordist/Config.py b/pym/portage/_emirrordist/Config.py
new file mode 100644
index 000000000..db4bfebd4
--- /dev/null
+++ b/pym/portage/_emirrordist/Config.py
@@ -0,0 +1,132 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import copy
+import io
+import logging
+import shelve
+import sys
+import time
+
+import portage
+from portage import os
+from portage.util import grabdict, grablines
+from portage.util._ShelveUnicodeWrapper import ShelveUnicodeWrapper
+
+class Config(object):
+ def __init__(self, options, portdb, event_loop):
+ self.options = options
+ self.portdb = portdb
+ self.event_loop = event_loop
+ self.added_byte_count = 0
+ self.added_file_count = 0
+ self.scheduled_deletion_count = 0
+ self.delete_count = 0
+ self.file_owners = {}
+ self.file_failures = {}
+ self.start_time = time.time()
+ self._open_files = []
+
+ self.log_success = self._open_log('success', options.success_log, 'a')
+ self.log_failure = self._open_log('failure', options.failure_log, 'a')
+
+ self.distfiles = None
+ if options.distfiles is not None:
+ self.distfiles = options.distfiles
+
+ self.mirrors = copy.copy(portdb.settings.thirdpartymirrors())
+
+ if options.mirror_overrides is not None:
+ self.mirrors.update(grabdict(options.mirror_overrides))
+
+ if options.mirror_skip is not None:
+ for x in options.mirror_skip.split(","):
+ self.mirrors[x] = []
+
+ self.whitelist = None
+ if options.whitelist_from is not None:
+ self.whitelist = set()
+ for filename in options.whitelist_from:
+ for line in grablines(filename):
+ line = line.strip()
+ if line and not line.startswith("#"):
+ self.whitelist.add(line)
+
+ self.restrict_mirror_exemptions = None
+ if options.restrict_mirror_exemptions is not None:
+ self.restrict_mirror_exemptions = frozenset(
+ options.restrict_mirror_exemptions.split(","))
+
+ self.recycle_db = None
+ if options.recycle_db is not None:
+ self.recycle_db = self._open_shelve(
+ options.recycle_db, 'recycle')
+
+ self.distfiles_db = None
+ if options.distfiles_db is not None:
+ self.distfiles_db = self._open_shelve(
+ options.distfiles_db, 'distfiles')
+
+ self.deletion_db = None
+ if options.deletion_db is not None:
+ self.deletion_db = self._open_shelve(
+ options.deletion_db, 'deletion')
+
+ def _open_log(self, log_desc, log_path, mode):
+
+ if log_path is None or self.options.dry_run:
+ log_func = logging.info
+ line_format = "%s: %%s" % log_desc
+ add_newline = False
+ if log_path is not None:
+ logging.warn(("dry-run: %s log "
+ "redirected to logging.info") % log_desc)
+ else:
+ self._open_files.append(io.open(log_path, mode=mode,
+ encoding='utf_8'))
+ line_format = "%s\n"
+ log_func = self._open_files[-1].write
+
+ return self._LogFormatter(line_format, log_func)
+
+ class _LogFormatter(object):
+
+ __slots__ = ('_line_format', '_log_func')
+
+ def __init__(self, line_format, log_func):
+ self._line_format = line_format
+ self._log_func = log_func
+
+ def __call__(self, msg):
+ self._log_func(self._line_format % (msg,))
+
+ def _open_shelve(self, db_file, db_desc):
+ if self.options.dry_run:
+ open_flag = "r"
+ else:
+ open_flag = "c"
+
+ if self.options.dry_run and not os.path.exists(db_file):
+ db = {}
+ else:
+ db = shelve.open(db_file, flag=open_flag)
+ if sys.hexversion < 0x3000000:
+ db = ShelveUnicodeWrapper(db)
+
+ if self.options.dry_run:
+ logging.warn("dry-run: %s db opened in readonly mode" % db_desc)
+ if not isinstance(db, dict):
+ volatile_db = dict((k, db[k]) for k in db)
+ db.close()
+ db = volatile_db
+ else:
+ self._open_files.append(db)
+
+ return db
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ while self._open_files:
+ self._open_files.pop().close()
diff --git a/pym/portage/_emirrordist/DeletionIterator.py b/pym/portage/_emirrordist/DeletionIterator.py
new file mode 100644
index 000000000..dff52c042
--- /dev/null
+++ b/pym/portage/_emirrordist/DeletionIterator.py
@@ -0,0 +1,83 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import logging
+import stat
+
+from portage import os
+from .DeletionTask import DeletionTask
+
+class DeletionIterator(object):
+
+ def __init__(self, config):
+ self._config = config
+
+ def __iter__(self):
+ distdir = self._config.options.distfiles
+ file_owners = self._config.file_owners
+ whitelist = self._config.whitelist
+ distfiles_local = self._config.options.distfiles_local
+ deletion_db = self._config.deletion_db
+ deletion_delay = self._config.options.deletion_delay
+ start_time = self._config.start_time
+ distfiles_set = set(os.listdir(self._config.options.distfiles))
+ for filename in distfiles_set:
+ try:
+ st = os.stat(os.path.join(distdir, filename))
+ except OSError as e:
+ logging.error("stat failed on '%s' in distfiles: %s\n" %
+ (filename, e))
+ continue
+ if not stat.S_ISREG(st.st_mode):
+ continue
+ elif filename in file_owners:
+ if deletion_db is not None:
+ try:
+ del deletion_db[filename]
+ except KeyError:
+ pass
+ elif whitelist is not None and filename in whitelist:
+ if deletion_db is not None:
+ try:
+ del deletion_db[filename]
+ except KeyError:
+ pass
+ elif distfiles_local is not None and \
+ os.path.exists(os.path.join(distfiles_local, filename)):
+ if deletion_db is not None:
+ try:
+ del deletion_db[filename]
+ except KeyError:
+ pass
+ else:
+ self._config.scheduled_deletion_count += 1
+
+ if deletion_db is None or deletion_delay is None:
+
+ yield DeletionTask(background=True,
+ distfile=filename,
+ config=self._config)
+
+ else:
+ deletion_entry = deletion_db.get(filename)
+
+ if deletion_entry is None:
+ logging.debug("add '%s' to deletion db" % filename)
+ deletion_db[filename] = start_time
+
+ elif deletion_entry + deletion_delay <= start_time:
+
+ yield DeletionTask(background=True,
+ distfile=filename,
+ config=self._config)
+
+ if deletion_db is not None:
+ for filename in list(deletion_db):
+ if filename not in distfiles_set:
+ try:
+ del deletion_db[filename]
+ except KeyError:
+ pass
+ else:
+ logging.debug("drop '%s' from deletion db" %
+ filename)
diff --git a/pym/portage/_emirrordist/DeletionTask.py b/pym/portage/_emirrordist/DeletionTask.py
new file mode 100644
index 000000000..7d10957fa
--- /dev/null
+++ b/pym/portage/_emirrordist/DeletionTask.py
@@ -0,0 +1,129 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import logging
+
+from portage import os
+from portage.util._async.FileCopier import FileCopier
+from _emerge.CompositeTask import CompositeTask
+
+class DeletionTask(CompositeTask):
+
+ __slots__ = ('distfile', 'config')
+
+ def _start(self):
+
+ distfile_path = os.path.join(
+ self.config.options.distfiles, self.distfile)
+
+ if self.config.options.recycle_dir is not None:
+ distfile_path = os.path.join(self.config.options.distfiles, self.distfile)
+ recycle_path = os.path.join(
+ self.config.options.recycle_dir, self.distfile)
+ if self.config.options.dry_run:
+ logging.info(("dry-run: move '%s' from "
+ "distfiles to recycle") % self.distfile)
+ else:
+ logging.debug(("move '%s' from "
+ "distfiles to recycle") % self.distfile)
+ try:
+ os.rename(distfile_path, recycle_path)
+ except OSError as e:
+ if e.errno != errno.EXDEV:
+ logging.error(("rename %s from distfiles to "
+ "recycle failed: %s") % (self.distfile, e))
+ else:
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return
+
+ self._start_task(
+ FileCopier(src_path=distfile_path,
+ dest_path=recycle_path,
+ background=False),
+ self._recycle_copier_exit)
+ return
+
+ success = True
+
+ if self.config.options.dry_run:
+ logging.info(("dry-run: delete '%s' from "
+ "distfiles") % self.distfile)
+ else:
+ logging.debug(("delete '%s' from "
+ "distfiles") % self.distfile)
+ try:
+ os.unlink(distfile_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ logging.error("%s unlink failed in distfiles: %s" %
+ (self.distfile, e))
+ success = False
+
+ if success:
+ self._success()
+ self.returncode = os.EX_OK
+ else:
+ self.returncode = 1
+
+ self._async_wait()
+
+ def _recycle_copier_exit(self, copier):
+
+ self._assert_current(copier)
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ success = True
+ if copier.returncode == os.EX_OK:
+
+ try:
+ os.unlink(copier.src_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ logging.error("%s unlink failed in distfiles: %s" %
+ (self.distfile, e))
+ success = False
+
+ else:
+ logging.error(("%s copy from distfiles "
+ "to recycle failed: %s") % (self.distfile, e))
+ success = False
+
+ if success:
+ self._success()
+ self.returncode = os.EX_OK
+ else:
+ self.returncode = 1
+
+ self._current_task = None
+ self.wait()
+
+ def _success(self):
+
+ cpv = "unknown"
+ if self.config.distfiles_db is not None:
+ cpv = self.config.distfiles_db.get(self.distfile, cpv)
+
+ self.config.delete_count += 1
+ self.config.log_success("%s\t%s\tremoved" % (cpv, self.distfile))
+
+ if self.config.distfiles_db is not None:
+ try:
+ del self.config.distfiles_db[self.distfile]
+ except KeyError:
+ pass
+ else:
+ logging.debug(("drop '%s' from "
+ "distfiles db") % self.distfile)
+
+ if self.config.deletion_db is not None:
+ try:
+ del self.config.deletion_db[self.distfile]
+ except KeyError:
+ pass
+ else:
+ logging.debug(("drop '%s' from "
+ "deletion db") % self.distfile)
diff --git a/pym/portage/_emirrordist/FetchIterator.py b/pym/portage/_emirrordist/FetchIterator.py
new file mode 100644
index 000000000..16a0b04c9
--- /dev/null
+++ b/pym/portage/_emirrordist/FetchIterator.py
@@ -0,0 +1,147 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.checksum import (_apply_hash_filter,
+ _filter_unaccelarated_hashes, _hash_filter)
+from portage.dep import use_reduce
+from portage.exception import PortageException
+from .FetchTask import FetchTask
+
+class FetchIterator(object):
+
+ def __init__(self, config):
+ self._config = config
+ self._log_failure = config.log_failure
+
+ def _iter_every_cp(self):
+ # List categories individually, in order to start yielding quicker,
+ # and in order to reduce latency in case of a signal interrupt.
+ cp_all = self._config.portdb.cp_all
+ for category in sorted(self._config.portdb.categories):
+ for cp in cp_all(categories=(category,)):
+ yield cp
+
+ def __iter__(self):
+
+ portdb = self._config.portdb
+ get_repo_for_location = portdb.repositories.get_repo_for_location
+ file_owners = self._config.file_owners
+ file_failures = self._config.file_failures
+ restrict_mirror_exemptions = self._config.restrict_mirror_exemptions
+
+ hash_filter = _hash_filter(
+ portdb.settings.get("PORTAGE_CHECKSUM_FILTER", ""))
+ if hash_filter.transparent:
+ hash_filter = None
+
+ for cp in self._iter_every_cp():
+
+ for tree in portdb.porttrees:
+
+ # Reset state so the Manifest is pulled once
+ # for this cp / tree combination.
+ digests = None
+ repo_config = get_repo_for_location(tree)
+
+ for cpv in portdb.cp_list(cp, mytree=tree):
+
+ try:
+ restrict, = portdb.aux_get(cpv, ("RESTRICT",),
+ mytree=tree)
+ except (KeyError, PortageException) as e:
+ self._log_failure("%s\t\taux_get exception %s" %
+ (cpv, e))
+ continue
+
+ # Here we use matchnone=True to ignore conditional parts
+ # of RESTRICT since they don't apply unconditionally.
+ # Assume such conditionals only apply on the client side.
+ try:
+ restrict = frozenset(use_reduce(restrict,
+ flat=True, matchnone=True))
+ except PortageException as e:
+ self._log_failure("%s\t\tuse_reduce exception %s" %
+ (cpv, e))
+ continue
+
+ if "fetch" in restrict:
+ continue
+
+ try:
+ uri_map = portdb.getFetchMap(cpv)
+ except PortageException as e:
+ self._log_failure("%s\t\tgetFetchMap exception %s" %
+ (cpv, e))
+ continue
+
+ if not uri_map:
+ continue
+
+ if "mirror" in restrict:
+ skip = False
+ if restrict_mirror_exemptions is not None:
+ new_uri_map = {}
+ for filename, uri_tuple in uri_map.items():
+ for uri in uri_tuple:
+ if uri[:9] == "mirror://":
+ i = uri.find("/", 9)
+ if i != -1 and uri[9:i].strip("/") in \
+ restrict_mirror_exemptions:
+ new_uri_map[filename] = uri_tuple
+ break
+ if new_uri_map:
+ uri_map = new_uri_map
+ else:
+ skip = True
+ else:
+ skip = True
+
+ if skip:
+ continue
+
+ # Parse Manifest for this cp if we haven't yet.
+ if digests is None:
+ try:
+ digests = repo_config.load_manifest(
+ os.path.join(repo_config.location, cp)
+ ).getTypeDigests("DIST")
+ except (EnvironmentError, PortageException) as e:
+ for filename in uri_map:
+ self._log_failure(
+ "%s\t%s\tManifest exception %s" %
+ (cpv, filename, e))
+ file_failures[filename] = cpv
+ continue
+
+ if not digests:
+ for filename in uri_map:
+ self._log_failure("%s\t%s\tdigest entry missing" %
+ (cpv, filename))
+ file_failures[filename] = cpv
+ continue
+
+ for filename, uri_tuple in uri_map.items():
+ file_digests = digests.get(filename)
+ if file_digests is None:
+ self._log_failure("%s\t%s\tdigest entry missing" %
+ (cpv, filename))
+ file_failures[filename] = cpv
+ continue
+ if filename in file_owners:
+ continue
+ file_owners[filename] = cpv
+
+ file_digests = \
+ _filter_unaccelarated_hashes(file_digests)
+ if hash_filter is not None:
+ file_digests = _apply_hash_filter(
+ file_digests, hash_filter)
+
+ yield FetchTask(cpv=cpv,
+ background=True,
+ digests=file_digests,
+ distfile=filename,
+ restrict=restrict,
+ uri_tuple=uri_tuple,
+ config=self._config)
diff --git a/pym/portage/_emirrordist/FetchTask.py b/pym/portage/_emirrordist/FetchTask.py
new file mode 100644
index 000000000..66c41c1a2
--- /dev/null
+++ b/pym/portage/_emirrordist/FetchTask.py
@@ -0,0 +1,629 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import collections
+import errno
+import logging
+import random
+import stat
+import subprocess
+import sys
+
+import portage
+from portage import _encodings, _unicode_encode
+from portage import os
+from portage.util._async.FileCopier import FileCopier
+from portage.util._async.FileDigester import FileDigester
+from portage.util._async.PipeLogger import PipeLogger
+from portage.util._async.PopenProcess import PopenProcess
+from _emerge.CompositeTask import CompositeTask
+
+default_hash_name = portage.const.MANIFEST2_REQUIRED_HASH
+
+# Use --no-check-certificate since Manifest digests should provide
+# enough security, and certificates can be self-signed or whatnot.
+default_fetchcommand = "wget -c -v -t 1 --passive-ftp --no-check-certificate --timeout=60 -O \"${DISTDIR}/${FILE}\" \"${URI}\""
+
+class FetchTask(CompositeTask):
+
+ __slots__ = ('distfile', 'digests', 'config', 'cpv',
+ 'restrict', 'uri_tuple', '_current_mirror',
+ '_current_stat', '_fetch_tmp_dir_info', '_fetch_tmp_file',
+ '_fs_mirror_stack', '_mirror_stack',
+ '_previously_added',
+ '_primaryuri_stack', '_log_path', '_tried_uris')
+
+ def _start(self):
+
+ if self.config.options.fetch_log_dir is not None and \
+ not self.config.options.dry_run:
+ self._log_path = os.path.join(
+ self.config.options.fetch_log_dir,
+ self.distfile + '.log')
+
+ self._previously_added = True
+ if self.config.distfiles_db is not None and \
+ self.distfile not in self.config.distfiles_db:
+ self._previously_added = False
+ self.config.distfiles_db[self.distfile] = self.cpv
+
+ if not self._have_needed_digests():
+ msg = "incomplete digests: %s" % " ".join(self.digests)
+ self.scheduler.output(msg, background=self.background,
+ log_path=self._log_path)
+ self.config.log_failure("%s\t%s\t%s" %
+ (self.cpv, self.distfile, msg))
+ self.config.file_failures[self.distfile] = self.cpv
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return
+
+ distfile_path = os.path.join(
+ self.config.options.distfiles, self.distfile)
+
+ st = None
+ size_ok = False
+ try:
+ st = os.stat(distfile_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ msg = "%s stat failed in %s: %s" % \
+ (self.distfile, "distfiles", e)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ else:
+ size_ok = st.st_size == self.digests["size"]
+
+ if not size_ok:
+ if self.config.options.dry_run:
+ if st is not None:
+ logging.info(("dry-run: delete '%s' with "
+ "wrong size from distfiles") % (self.distfile,))
+ else:
+ # Do the unlink in order to ensure that the path is clear,
+ # even if stat raised ENOENT, since a broken symlink can
+ # trigger ENOENT.
+ if self._unlink_file(distfile_path, "distfiles"):
+ if st is not None:
+ logging.debug(("delete '%s' with "
+ "wrong size from distfiles") % (self.distfile,))
+ else:
+ self.config.log_failure("%s\t%s\t%s" %
+ (self.cpv, self.distfile, "unlink failed in distfiles"))
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return
+
+ if size_ok:
+ if self.config.options.verify_existing_digest:
+ self._start_task(
+ FileDigester(file_path=distfile_path,
+ hash_names=(self._select_hash(),),
+ background=self.background,
+ logfile=self._log_path), self._distfiles_digester_exit)
+ return
+
+ self._success()
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return
+
+ self._start_fetch()
+
+ def _success(self):
+ if not self._previously_added:
+ size = self.digests["size"]
+ self.config.added_byte_count += size
+ self.config.added_file_count += 1
+ self.config.log_success("%s\t%s\tadded %i bytes" %
+ (self.cpv, self.distfile, size))
+
+ if self._log_path is not None:
+ if not self.config.options.dry_run:
+ try:
+ os.unlink(self._log_path)
+ except OSError:
+ pass
+
+ if self.config.options.recycle_dir is not None:
+
+ recycle_file = os.path.join(
+ self.config.options.recycle_dir, self.distfile)
+
+ if self.config.options.dry_run:
+ if os.path.exists(recycle_file):
+ logging.info("dry-run: delete '%s' from recycle" %
+ (self.distfile,))
+ else:
+ try:
+ os.unlink(recycle_file)
+ except OSError:
+ pass
+ else:
+ logging.debug("delete '%s' from recycle" %
+ (self.distfile,))
+
+ def _distfiles_digester_exit(self, digester):
+
+ self._assert_current(digester)
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ if self._default_exit(digester) != os.EX_OK:
+ # IOError reading file in our main distfiles directory? This
+ # is a bad situation which normally does not occur, so
+ # skip this file and report it, in order to draw attention
+ # from the administrator.
+ msg = "%s distfiles digester failed unexpectedly" % \
+ (self.distfile,)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ self.config.log_failure("%s\t%s\t%s" %
+ (self.cpv, self.distfile, msg))
+ self.config.file_failures[self.distfile] = self.cpv
+ self.wait()
+ return
+
+ wrong_digest = self._find_bad_digest(digester.digests)
+ if wrong_digest is None:
+ self._success()
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ self._start_fetch()
+
+ _mirror_info = collections.namedtuple('_mirror_info',
+ 'name location')
+
+ def _start_fetch(self):
+
+ self._previously_added = False
+ self._fs_mirror_stack = []
+ if self.config.options.distfiles_local is not None:
+ self._fs_mirror_stack.append(self._mirror_info(
+ 'distfiles-local', self.config.options.distfiles_local))
+ if self.config.options.recycle_dir is not None:
+ self._fs_mirror_stack.append(self._mirror_info(
+ 'recycle', self.config.options.recycle_dir))
+
+ self._primaryuri_stack = []
+ self._mirror_stack = []
+ for uri in reversed(self.uri_tuple):
+ if uri.startswith('mirror://'):
+ self._mirror_stack.append(
+ self._mirror_iterator(uri, self.config.mirrors))
+ else:
+ self._primaryuri_stack.append(uri)
+
+ self._tried_uris = set()
+ self._try_next_mirror()
+
+ @staticmethod
+ def _mirror_iterator(uri, mirrors_dict):
+
+ slash_index = uri.find("/", 9)
+ if slash_index != -1:
+ mirror_name = uri[9:slash_index].strip("/")
+ mirrors = mirrors_dict.get(mirror_name)
+ if not mirrors:
+ return
+ mirrors = list(mirrors)
+ while mirrors:
+ mirror = mirrors.pop(random.randint(0, len(mirrors) - 1))
+ yield mirror.rstrip("/") + "/" + uri[slash_index+1:]
+
+ def _try_next_mirror(self):
+ if self._fs_mirror_stack:
+ self._fetch_fs(self._fs_mirror_stack.pop())
+ return
+ else:
+ uri = self._next_uri()
+ if uri is not None:
+ self._tried_uris.add(uri)
+ self._fetch_uri(uri)
+ return
+
+ if self._tried_uris:
+ msg = "all uris failed"
+ else:
+ msg = "no fetchable uris"
+
+ self.config.log_failure("%s\t%s\t%s" %
+ (self.cpv, self.distfile, msg))
+ self.config.file_failures[self.distfile] = self.cpv
+ self.returncode = os.EX_OK
+ self.wait()
+
+ def _next_uri(self):
+ remaining_tries = self.config.options.tries - len(self._tried_uris)
+ if remaining_tries > 0:
+
+ if remaining_tries <= self.config.options.tries / 2:
+ while self._primaryuri_stack:
+ uri = self._primaryuri_stack.pop()
+ if uri not in self._tried_uris:
+ return uri
+
+ while self._mirror_stack:
+ uri = next(self._mirror_stack[-1], None)
+ if uri is None:
+ self._mirror_stack.pop()
+ else:
+ if uri not in self._tried_uris:
+ return uri
+
+ while self._primaryuri_stack:
+ uri = self._primaryuri_stack.pop()
+ if uri not in self._tried_uris:
+ return uri
+
+ return None
+
+ def _fetch_fs(self, mirror_info):
+ file_path = os.path.join(mirror_info.location, self.distfile)
+
+ st = None
+ size_ok = False
+ try:
+ st = os.stat(file_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ msg = "%s stat failed in %s: %s" % \
+ (self.distfile, mirror_info.name, e)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ else:
+ size_ok = st.st_size == self.digests["size"]
+ self._current_stat = st
+
+ if size_ok:
+ self._current_mirror = mirror_info
+ self._start_task(
+ FileDigester(file_path=file_path,
+ hash_names=(self._select_hash(),),
+ background=self.background,
+ logfile=self._log_path),
+ self._fs_mirror_digester_exit)
+ else:
+ self._try_next_mirror()
+
+ def _fs_mirror_digester_exit(self, digester):
+
+ self._assert_current(digester)
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ current_mirror = self._current_mirror
+ if digester.returncode != os.EX_OK:
+ msg = "%s %s digester failed unexpectedly" % \
+ (self.distfile, current_mirror.name)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ else:
+ bad_digest = self._find_bad_digest(digester.digests)
+ if bad_digest is not None:
+ msg = "%s %s has bad %s digest: expected %s, got %s" % \
+ (self.distfile, current_mirror.name, bad_digest,
+ self.digests[bad_digest], digester.digests[bad_digest])
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ elif self.config.options.dry_run:
+ # Report success without actually touching any files
+ if self._same_device(current_mirror.location,
+ self.config.options.distfiles):
+ logging.info(("dry-run: hardlink '%s' from %s "
+ "to distfiles") % (self.distfile, current_mirror.name))
+ else:
+ logging.info("dry-run: copy '%s' from %s to distfiles" %
+ (self.distfile, current_mirror.name))
+ self._success()
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+ else:
+ src = os.path.join(current_mirror.location, self.distfile)
+ dest = os.path.join(self.config.options.distfiles, self.distfile)
+ if self._hardlink_atomic(src, dest,
+ "%s to %s" % (current_mirror.name, "distfiles")):
+ logging.debug("hardlink '%s' from %s to distfiles" %
+ (self.distfile, current_mirror.name))
+ self._success()
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+ else:
+ self._start_task(
+ FileCopier(src_path=src, dest_path=dest,
+ background=(self.background and
+ self._log_path is not None),
+ logfile=self._log_path),
+ self._fs_mirror_copier_exit)
+ return
+
+ self._try_next_mirror()
+
+ def _fs_mirror_copier_exit(self, copier):
+
+ self._assert_current(copier)
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ current_mirror = self._current_mirror
+ if copier.returncode != os.EX_OK:
+ msg = "%s %s copy failed unexpectedly" % \
+ (self.distfile, current_mirror.name)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ else:
+
+ logging.debug("copy '%s' from %s to distfiles" %
+ (self.distfile, current_mirror.name))
+
+ # Apply the timestamp from the source file, but
+ # just rely on umask for permissions.
+ try:
+ if sys.hexversion >= 0x3030000:
+ os.utime(copier.dest_path,
+ ns=(self._current_stat.st_mtime_ns,
+ self._current_stat.st_mtime_ns))
+ else:
+ os.utime(copier.dest_path,
+ (self._current_stat[stat.ST_MTIME],
+ self._current_stat[stat.ST_MTIME]))
+ except OSError as e:
+ msg = "%s %s utime failed unexpectedly: %s" % \
+ (self.distfile, current_mirror.name, e)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+
+ self._success()
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ self._try_next_mirror()
+
+ def _fetch_uri(self, uri):
+
+ if self.config.options.dry_run:
+ # Simply report success.
+ logging.info("dry-run: fetch '%s' from '%s'" %
+ (self.distfile, uri))
+ self._success()
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ if self.config.options.temp_dir:
+ self._fetch_tmp_dir_info = 'temp-dir'
+ distdir = self.config.options.temp_dir
+ else:
+ self._fetch_tmp_dir_info = 'distfiles'
+ distdir = self.config.options.distfiles
+
+ tmp_basename = self.distfile + '._emirrordist_fetch_.%s' % os.getpid()
+
+ variables = {
+ "DISTDIR": distdir,
+ "URI": uri,
+ "FILE": tmp_basename
+ }
+
+ self._fetch_tmp_file = os.path.join(distdir, tmp_basename)
+
+ try:
+ os.unlink(self._fetch_tmp_file)
+ except OSError:
+ pass
+
+ args = portage.util.shlex_split(default_fetchcommand)
+ args = [portage.util.varexpand(x, mydict=variables)
+ for x in args]
+
+ if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000 and \
+ not os.path.isabs(args[0]):
+ # Python 3.1 _execvp throws TypeError for non-absolute executable
+ # path passed as bytes (see http://bugs.python.org/issue8513).
+ fullname = portage.process.find_binary(args[0])
+ if fullname is None:
+ raise portage.exception.CommandNotFound(args[0])
+ args[0] = fullname
+
+ args = [_unicode_encode(x,
+ encoding=_encodings['fs'], errors='strict') for x in args]
+
+ null_fd = os.open(os.devnull, os.O_RDONLY)
+ fetcher = PopenProcess(background=self.background,
+ proc=subprocess.Popen(args, stdin=null_fd,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT),
+ scheduler=self.scheduler)
+ os.close(null_fd)
+
+ fetcher.pipe_reader = PipeLogger(background=self.background,
+ input_fd=fetcher.proc.stdout, log_file_path=self._log_path,
+ scheduler=self.scheduler)
+
+ self._start_task(fetcher, self._fetcher_exit)
+
+ def _fetcher_exit(self, fetcher):
+
+ self._assert_current(fetcher)
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ if os.path.exists(self._fetch_tmp_file):
+ self._start_task(
+ FileDigester(file_path=self._fetch_tmp_file,
+ hash_names=(self._select_hash(),),
+ background=self.background,
+ logfile=self._log_path),
+ self._fetch_digester_exit)
+ else:
+ self._try_next_mirror()
+
+ def _fetch_digester_exit(self, digester):
+
+ self._assert_current(digester)
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ if digester.returncode != os.EX_OK:
+ msg = "%s %s digester failed unexpectedly" % \
+ (self.distfile, self._fetch_tmp_dir_info)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ else:
+ bad_digest = self._find_bad_digest(digester.digests)
+ if bad_digest is not None:
+ msg = "%s has bad %s digest: expected %s, got %s" % \
+ (self.distfile, bad_digest,
+ self.digests[bad_digest], digester.digests[bad_digest])
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ try:
+ os.unlink(self._fetch_tmp_file)
+ except OSError:
+ pass
+ else:
+ dest = os.path.join(self.config.options.distfiles, self.distfile)
+ try:
+ os.rename(self._fetch_tmp_file, dest)
+ except OSError:
+ self._start_task(
+ FileCopier(src_path=self._fetch_tmp_file,
+ dest_path=dest,
+ background=(self.background and
+ self._log_path is not None),
+ logfile=self._log_path),
+ self._fetch_copier_exit)
+ return
+ else:
+ self._success()
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ self._try_next_mirror()
+
+ def _fetch_copier_exit(self, copier):
+
+ self._assert_current(copier)
+
+ try:
+ os.unlink(self._fetch_tmp_file)
+ except OSError:
+ pass
+
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ if copier.returncode == os.EX_OK:
+ self._success()
+ self.returncode = os.EX_OK
+ self.wait()
+ else:
+ # out of space?
+ msg = "%s %s copy failed unexpectedly" % \
+ (self.distfile, self._fetch_tmp_dir_info)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ self.config.log_failure("%s\t%s\t%s" %
+ (self.cpv, self.distfile, msg))
+ self.config.file_failures[self.distfile] = self.cpv
+ self.returncode = 1
+ self.wait()
+
+ def _unlink_file(self, file_path, dir_info):
+ try:
+ os.unlink(file_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ msg = "unlink '%s' failed in %s: %s" % \
+ (self.distfile, dir_info, e)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ return False
+ return True
+
+ def _have_needed_digests(self):
+ return "size" in self.digests and \
+ self._select_hash() is not None
+
+ def _select_hash(self):
+ if default_hash_name in self.digests:
+ return default_hash_name
+ else:
+ for hash_name in self.digests:
+ if hash_name != "size" and \
+ hash_name in portage.checksum.hashfunc_map:
+ return hash_name
+
+ return None
+
+ def _find_bad_digest(self, digests):
+ for hash_name, hash_value in digests.items():
+ if self.digests[hash_name] != hash_value:
+ return hash_name
+ return None
+
+ @staticmethod
+ def _same_device(path1, path2):
+ try:
+ st1 = os.stat(path1)
+ st2 = os.stat(path2)
+ except OSError:
+ return False
+ else:
+ return st1.st_dev == st2.st_dev
+
+ def _hardlink_atomic(self, src, dest, dir_info):
+
+ head, tail = os.path.split(dest)
+ hardlink_tmp = os.path.join(head, ".%s._mirrordist_hardlink_.%s" % \
+ (tail, os.getpid()))
+
+ try:
+ try:
+ os.link(src, hardlink_tmp)
+ except OSError as e:
+ if e.errno != errno.EXDEV:
+ msg = "hardlink %s from %s failed: %s" % \
+ (self.distfile, dir_info, e)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ return False
+
+ try:
+ os.rename(hardlink_tmp, dest)
+ except OSError as e:
+ msg = "hardlink rename '%s' from %s failed: %s" % \
+ (self.distfile, dir_info, e)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ return False
+ finally:
+ try:
+ os.unlink(hardlink_tmp)
+ except OSError:
+ pass
+
+ return True
diff --git a/pym/portage/_emirrordist/MirrorDistTask.py b/pym/portage/_emirrordist/MirrorDistTask.py
new file mode 100644
index 000000000..571caa52d
--- /dev/null
+++ b/pym/portage/_emirrordist/MirrorDistTask.py
@@ -0,0 +1,219 @@
+# Copyright 2013-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import logging
+import sys
+import time
+
+try:
+ import threading
+except ImportError:
+ import dummy_threading as threading
+
+import portage
+from portage import os
+from portage.util._async.TaskScheduler import TaskScheduler
+from _emerge.CompositeTask import CompositeTask
+from .FetchIterator import FetchIterator
+from .DeletionIterator import DeletionIterator
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ long = int
+
+class MirrorDistTask(CompositeTask):
+
+ __slots__ = ('_config', '_terminated', '_term_check_id')
+
+ def __init__(self, config):
+ CompositeTask.__init__(self, scheduler=config.event_loop)
+ self._config = config
+ self._terminated = threading.Event()
+
+ def _start(self):
+ self._term_check_id = self.scheduler.idle_add(self._termination_check)
+ fetch = TaskScheduler(iter(FetchIterator(self._config)),
+ max_jobs=self._config.options.jobs,
+ max_load=self._config.options.load_average,
+ event_loop=self._config.event_loop)
+ self._start_task(fetch, self._fetch_exit)
+
+ def _fetch_exit(self, fetch):
+
+ self._assert_current(fetch)
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ if self._config.options.delete:
+ deletion = TaskScheduler(iter(DeletionIterator(self._config)),
+ max_jobs=self._config.options.jobs,
+ max_load=self._config.options.load_average,
+ event_loop=self._config.event_loop)
+ self._start_task(deletion, self._deletion_exit)
+ return
+
+ self._post_deletion()
+
+ def _deletion_exit(self, deletion):
+
+ self._assert_current(deletion)
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ self._post_deletion()
+
+ def _post_deletion(self):
+
+ if self._config.options.recycle_db is not None:
+ self._update_recycle_db()
+
+ if self._config.options.scheduled_deletion_log is not None:
+ self._scheduled_deletion_log()
+
+ self._summary()
+
+ self.returncode = os.EX_OK
+ self._current_task = None
+ self.wait()
+
+ def _update_recycle_db(self):
+
+ start_time = self._config.start_time
+ recycle_dir = self._config.options.recycle_dir
+ recycle_db = self._config.recycle_db
+ r_deletion_delay = self._config.options.recycle_deletion_delay
+
+ # Use a dict optimize access.
+ recycle_db_cache = dict(recycle_db.items())
+
+ for filename in os.listdir(recycle_dir):
+
+ recycle_file = os.path.join(recycle_dir, filename)
+
+ try:
+ st = os.stat(recycle_file)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ logging.error(("stat failed for '%s' in "
+ "recycle: %s") % (filename, e))
+ continue
+
+ value = recycle_db_cache.pop(filename, None)
+ if value is None:
+ logging.debug(("add '%s' to "
+ "recycle db") % filename)
+ recycle_db[filename] = (st.st_size, start_time)
+ else:
+ r_size, r_time = value
+ if long(r_size) != st.st_size:
+ recycle_db[filename] = (st.st_size, start_time)
+ elif r_time + r_deletion_delay < start_time:
+ if self._config.options.dry_run:
+ logging.info(("dry-run: delete '%s' from "
+ "recycle") % filename)
+ logging.info(("drop '%s' from "
+ "recycle db") % filename)
+ else:
+ try:
+ os.unlink(recycle_file)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ logging.error(("delete '%s' from "
+ "recycle failed: %s") % (filename, e))
+ else:
+ logging.debug(("delete '%s' from "
+ "recycle") % filename)
+ try:
+ del recycle_db[filename]
+ except KeyError:
+ pass
+ else:
+ logging.debug(("drop '%s' from "
+ "recycle db") % filename)
+
+ # Existing files were popped from recycle_db_cache,
+ # so any remaining entries are for files that no
+ # longer exist.
+ for filename in recycle_db_cache:
+ try:
+ del recycle_db[filename]
+ except KeyError:
+ pass
+ else:
+ logging.debug(("drop non-existent '%s' from "
+ "recycle db") % filename)
+
+ def _scheduled_deletion_log(self):
+
+ start_time = self._config.start_time
+ dry_run = self._config.options.dry_run
+ deletion_delay = self._config.options.deletion_delay
+ distfiles_db = self._config.distfiles_db
+
+ date_map = {}
+ for filename, timestamp in self._config.deletion_db.items():
+ date = timestamp + deletion_delay
+ if date < start_time:
+ date = start_time
+ date = time.strftime("%Y-%m-%d", time.gmtime(date))
+ date_files = date_map.get(date)
+ if date_files is None:
+ date_files = []
+ date_map[date] = date_files
+ date_files.append(filename)
+
+ if dry_run:
+ logging.warn(("dry-run: scheduled-deletions log "
+ "will be summarized via logging.info"))
+
+ lines = []
+ for date in sorted(date_map):
+ date_files = date_map[date]
+ if dry_run:
+ logging.info(("dry-run: scheduled deletions for %s: %s files") %
+ (date, len(date_files)))
+ lines.append("%s\n" % date)
+ for filename in date_files:
+ cpv = "unknown"
+ if distfiles_db is not None:
+ cpv = distfiles_db.get(filename, cpv)
+ lines.append("\t%s\t%s\n" % (filename, cpv))
+
+ if not dry_run:
+ portage.util.write_atomic(
+ self._config.options.scheduled_deletion_log,
+ "".join(lines))
+
+ def _summary(self):
+ elapsed_time = time.time() - self._config.start_time
+ fail_count = len(self._config.file_failures)
+ delete_count = self._config.delete_count
+ scheduled_deletion_count = self._config.scheduled_deletion_count - delete_count
+ added_file_count = self._config.added_file_count
+ added_byte_count = self._config.added_byte_count
+
+ logging.info("finished in %i seconds" % elapsed_time)
+ logging.info("failed to fetch %i files" % fail_count)
+ logging.info("deleted %i files" % delete_count)
+ logging.info("deletion of %i files scheduled" %
+ scheduled_deletion_count)
+ logging.info("added %i files" % added_file_count)
+ logging.info("added %i bytes total" % added_byte_count)
+
+ def terminate(self):
+ self._terminated.set()
+
+ def _termination_check(self):
+ if self._terminated.is_set():
+ self.cancel()
+ self.wait()
+ return True
+
+ def _wait(self):
+ CompositeTask._wait(self)
+ if self._term_check_id is not None:
+ self.scheduler.source_remove(self._term_check_id)
+ self._term_check_id = None
diff --git a/pym/portage/_emirrordist/__init__.py b/pym/portage/_emirrordist/__init__.py
new file mode 100644
index 000000000..6cde9320b
--- /dev/null
+++ b/pym/portage/_emirrordist/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/pym/portage/_emirrordist/main.py b/pym/portage/_emirrordist/main.py
new file mode 100644
index 000000000..ce92c2aea
--- /dev/null
+++ b/pym/portage/_emirrordist/main.py
@@ -0,0 +1,463 @@
+# Copyright 2013-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import logging
+import sys
+
+import portage
+from portage import os
+from portage.util import normalize_path, writemsg_level, _recursive_file_list
+from portage.util._argparse import ArgumentParser
+from portage.util._async.run_main_scheduler import run_main_scheduler
+from portage.util._async.SchedulerInterface import SchedulerInterface
+from portage.util._eventloop.global_event_loop import global_event_loop
+from .Config import Config
+from .MirrorDistTask import MirrorDistTask
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ long = int
+
+seconds_per_day = 24 * 60 * 60
+
+common_options = (
+ {
+ "longopt" : "--dry-run",
+ "help" : "perform a trial run with no changes made (usually combined "
+ "with --verbose)",
+ "action" : "store_true"
+ },
+ {
+ "longopt" : "--verbose",
+ "shortopt" : "-v",
+ "help" : "display extra information on stderr "
+ "(multiple occurences increase verbosity)",
+ "action" : "count",
+ "default" : 0,
+ },
+ {
+ "longopt" : "--ignore-default-opts",
+ "help" : "do not use the EMIRRORDIST_DEFAULT_OPTS environment variable",
+ "action" : "store_true"
+ },
+ {
+ "longopt" : "--distfiles",
+ "help" : "distfiles directory to use (required)",
+ "metavar" : "DIR"
+ },
+ {
+ "longopt" : "--jobs",
+ "shortopt" : "-j",
+ "help" : "number of concurrent jobs to run",
+ "type" : int
+ },
+ {
+ "longopt" : "--load-average",
+ "shortopt" : "-l",
+ "help" : "load average limit for spawning of new concurrent jobs",
+ "metavar" : "LOAD",
+ "type" : float
+ },
+ {
+ "longopt" : "--tries",
+ "help" : "maximum number of tries per file, 0 means unlimited (default is 10)",
+ "default" : 10,
+ "type" : int
+ },
+ {
+ "longopt" : "--repo",
+ "help" : "name of repo to operate on"
+ },
+ {
+ "longopt" : "--config-root",
+ "help" : "location of portage config files",
+ "metavar" : "DIR"
+ },
+ {
+ "longopt" : "--portdir",
+ "help" : "override the PORTDIR variable (deprecated in favor of --repositories-configuration)",
+ "metavar" : "DIR"
+ },
+ {
+ "longopt" : "--portdir-overlay",
+ "help" : "override the PORTDIR_OVERLAY variable (deprecated in favor of --repositories-configuration)"
+ },
+ {
+ "longopt" : "--repositories-configuration",
+ "help" : "override configuration of repositories (in format of repos.conf)"
+ },
+ {
+ "longopt" : "--strict-manifests",
+ "help" : "manually override \"strict\" FEATURES setting",
+ "choices" : ("y", "n"),
+ "metavar" : "<y|n>",
+ },
+ {
+ "longopt" : "--failure-log",
+ "help" : "log file for fetch failures, with tab-delimited "
+ "output, for reporting purposes",
+ "metavar" : "FILE"
+ },
+ {
+ "longopt" : "--success-log",
+ "help" : "log file for fetch successes, with tab-delimited "
+ "output, for reporting purposes",
+ "metavar" : "FILE"
+ },
+ {
+ "longopt" : "--scheduled-deletion-log",
+ "help" : "log file for scheduled deletions, with tab-delimited "
+ "output, for reporting purposes",
+ "metavar" : "FILE"
+ },
+ {
+ "longopt" : "--delete",
+ "help" : "enable deletion of unused distfiles",
+ "action" : "store_true"
+ },
+ {
+ "longopt" : "--deletion-db",
+ "help" : "database file used to track lifetime of files "
+ "scheduled for delayed deletion",
+ "metavar" : "FILE"
+ },
+ {
+ "longopt" : "--deletion-delay",
+ "help" : "delay time for deletion, measured in seconds",
+ "metavar" : "SECONDS"
+ },
+ {
+ "longopt" : "--temp-dir",
+ "help" : "temporary directory for downloads",
+ "metavar" : "DIR"
+ },
+ {
+ "longopt" : "--mirror-overrides",
+ "help" : "file holding a list of mirror overrides",
+ "metavar" : "FILE"
+ },
+ {
+ "longopt" : "--mirror-skip",
+ "help" : "comma delimited list of mirror targets to skip "
+ "when fetching"
+ },
+ {
+ "longopt" : "--restrict-mirror-exemptions",
+ "help" : "comma delimited list of mirror targets for which to "
+ "ignore RESTRICT=\"mirror\""
+ },
+ {
+ "longopt" : "--verify-existing-digest",
+ "help" : "use digest as a verification of whether existing "
+ "distfiles are valid",
+ "action" : "store_true"
+ },
+ {
+ "longopt" : "--distfiles-local",
+ "help" : "distfiles-local directory to use",
+ "metavar" : "DIR"
+ },
+ {
+ "longopt" : "--distfiles-db",
+ "help" : "database file used to track which ebuilds a "
+ "distfile belongs to",
+ "metavar" : "FILE"
+ },
+ {
+ "longopt" : "--recycle-dir",
+ "help" : "directory for extended retention of files that "
+ "are removed from distdir with the --delete option",
+ "metavar" : "DIR"
+ },
+ {
+ "longopt" : "--recycle-db",
+ "help" : "database file used to track lifetime of files "
+ "in recycle dir",
+ "metavar" : "FILE"
+ },
+ {
+ "longopt" : "--recycle-deletion-delay",
+ "help" : "delay time for deletion of unused files from "
+ "recycle dir, measured in seconds (defaults to "
+ "the equivalent of 60 days)",
+ "default" : 60 * seconds_per_day,
+ "metavar" : "SECONDS",
+ "type" : int
+ },
+ {
+ "longopt" : "--fetch-log-dir",
+ "help" : "directory for individual fetch logs",
+ "metavar" : "DIR"
+ },
+ {
+ "longopt" : "--whitelist-from",
+ "help" : "specifies a file containing a list of files to "
+ "whitelist, one per line, # prefixed lines ignored",
+ "action" : "append",
+ "metavar" : "FILE"
+ },
+)
+
+def parse_args(args):
+ description = "emirrordist - a fetch tool for mirroring " \
+ "of package distfiles"
+ usage = "emirrordist [options] <action>"
+ parser = ArgumentParser(description=description, usage=usage)
+
+ actions = parser.add_argument_group('Actions')
+ actions.add_argument("--version",
+ action="store_true",
+ help="display portage version and exit")
+ actions.add_argument("--mirror",
+ action="store_true",
+ help="mirror distfiles for the selected repository")
+
+ common = parser.add_argument_group('Common options')
+ for opt_info in common_options:
+ opt_pargs = [opt_info["longopt"]]
+ if opt_info.get("shortopt"):
+ opt_pargs.append(opt_info["shortopt"])
+ opt_kwargs = {"help" : opt_info["help"]}
+ for k in ("action", "choices", "default", "metavar", "type"):
+ if k in opt_info:
+ opt_kwargs[k] = opt_info[k]
+ common.add_argument(*opt_pargs, **opt_kwargs)
+
+ options, args = parser.parse_known_args(args)
+
+ return (parser, options, args)
+
+def emirrordist_main(args):
+
+ # The calling environment is ignored, so the program is
+ # completely controlled by commandline arguments.
+ env = {}
+
+ if not sys.stdout.isatty():
+ portage.output.nocolor()
+ env['NOCOLOR'] = 'true'
+
+ parser, options, args = parse_args(args)
+
+ if options.version:
+ sys.stdout.write("Portage %s\n" % portage.VERSION)
+ return os.EX_OK
+
+ config_root = options.config_root
+
+ if options.portdir is not None:
+ writemsg_level("emirrordist: warning: --portdir option is deprecated in favor of --repositories-configuration option\n",
+ level=logging.WARNING, noiselevel=-1)
+ if options.portdir_overlay is not None:
+ writemsg_level("emirrordist: warning: --portdir-overlay option is deprecated in favor of --repositories-configuration option\n",
+ level=logging.WARNING, noiselevel=-1)
+
+ if options.repositories_configuration is not None:
+ env['PORTAGE_REPOSITORIES'] = options.repositories_configuration
+ elif options.portdir_overlay is not None:
+ env['PORTDIR_OVERLAY'] = options.portdir_overlay
+
+ if options.portdir is not None:
+ env['PORTDIR'] = options.portdir
+
+ settings = portage.config(config_root=config_root,
+ local_config=False, env=env)
+
+ default_opts = None
+ if not options.ignore_default_opts:
+ default_opts = settings.get('EMIRRORDIST_DEFAULT_OPTS', '').split()
+
+ if default_opts:
+ parser, options, args = parse_args(default_opts + args)
+
+ settings = portage.config(config_root=config_root,
+ local_config=False, env=env)
+
+ if options.repo is None:
+ if len(settings.repositories.prepos) == 2:
+ for repo in settings.repositories:
+ if repo.name != "DEFAULT":
+ options.repo = repo.name
+ break
+
+ if options.repo is None:
+ parser.error("--repo option is required")
+
+ repo_path = settings.repositories.treemap.get(options.repo)
+ if repo_path is None:
+ parser.error("Unable to locate repository named '%s'" % (options.repo,))
+
+ if options.jobs is not None:
+ options.jobs = int(options.jobs)
+
+ if options.load_average is not None:
+ options.load_average = float(options.load_average)
+
+ if options.failure_log is not None:
+ options.failure_log = normalize_path(
+ os.path.abspath(options.failure_log))
+
+ parent_dir = os.path.dirname(options.failure_log)
+ if not (os.path.isdir(parent_dir) and
+ os.access(parent_dir, os.W_OK|os.X_OK)):
+ parser.error(("--failure-log '%s' parent is not a "
+ "writable directory") % options.failure_log)
+
+ if options.success_log is not None:
+ options.success_log = normalize_path(
+ os.path.abspath(options.success_log))
+
+ parent_dir = os.path.dirname(options.success_log)
+ if not (os.path.isdir(parent_dir) and
+ os.access(parent_dir, os.W_OK|os.X_OK)):
+ parser.error(("--success-log '%s' parent is not a "
+ "writable directory") % options.success_log)
+
+ if options.scheduled_deletion_log is not None:
+ options.scheduled_deletion_log = normalize_path(
+ os.path.abspath(options.scheduled_deletion_log))
+
+ parent_dir = os.path.dirname(options.scheduled_deletion_log)
+ if not (os.path.isdir(parent_dir) and
+ os.access(parent_dir, os.W_OK|os.X_OK)):
+ parser.error(("--scheduled-deletion-log '%s' parent is not a "
+ "writable directory") % options.scheduled_deletion_log)
+
+ if options.deletion_db is None:
+ parser.error("--scheduled-deletion-log requires --deletion-db")
+
+ if options.deletion_delay is not None:
+ options.deletion_delay = long(options.deletion_delay)
+ if options.deletion_db is None:
+ parser.error("--deletion-delay requires --deletion-db")
+
+ if options.deletion_db is not None:
+ if options.deletion_delay is None:
+ parser.error("--deletion-db requires --deletion-delay")
+ options.deletion_db = normalize_path(
+ os.path.abspath(options.deletion_db))
+
+ if options.temp_dir is not None:
+ options.temp_dir = normalize_path(
+ os.path.abspath(options.temp_dir))
+
+ if not (os.path.isdir(options.temp_dir) and
+ os.access(options.temp_dir, os.W_OK|os.X_OK)):
+ parser.error(("--temp-dir '%s' is not a "
+ "writable directory") % options.temp_dir)
+
+ if options.distfiles is not None:
+ options.distfiles = normalize_path(
+ os.path.abspath(options.distfiles))
+
+ if not (os.path.isdir(options.distfiles) and
+ os.access(options.distfiles, os.W_OK|os.X_OK)):
+ parser.error(("--distfiles '%s' is not a "
+ "writable directory") % options.distfiles)
+ else:
+ parser.error("missing required --distfiles parameter")
+
+ if options.mirror_overrides is not None:
+ options.mirror_overrides = normalize_path(
+ os.path.abspath(options.mirror_overrides))
+
+ if not (os.access(options.mirror_overrides, os.R_OK) and
+ os.path.isfile(options.mirror_overrides)):
+ parser.error(
+ "--mirror-overrides-file '%s' is not a readable file" %
+ options.mirror_overrides)
+
+ if options.distfiles_local is not None:
+ options.distfiles_local = normalize_path(
+ os.path.abspath(options.distfiles_local))
+
+ if not (os.path.isdir(options.distfiles_local) and
+ os.access(options.distfiles_local, os.W_OK|os.X_OK)):
+ parser.error(("--distfiles-local '%s' is not a "
+ "writable directory") % options.distfiles_local)
+
+ if options.distfiles_db is not None:
+ options.distfiles_db = normalize_path(
+ os.path.abspath(options.distfiles_db))
+
+ if options.tries is not None:
+ options.tries = int(options.tries)
+
+ if options.recycle_dir is not None:
+ options.recycle_dir = normalize_path(
+ os.path.abspath(options.recycle_dir))
+ if not (os.path.isdir(options.recycle_dir) and
+ os.access(options.recycle_dir, os.W_OK|os.X_OK)):
+ parser.error(("--recycle-dir '%s' is not a "
+ "writable directory") % options.recycle_dir)
+
+ if options.recycle_db is not None:
+ if options.recycle_dir is None:
+ parser.error("--recycle-db requires "
+ "--recycle-dir to be specified")
+ options.recycle_db = normalize_path(
+ os.path.abspath(options.recycle_db))
+
+ if options.recycle_deletion_delay is not None:
+ options.recycle_deletion_delay = \
+ long(options.recycle_deletion_delay)
+
+ if options.fetch_log_dir is not None:
+ options.fetch_log_dir = normalize_path(
+ os.path.abspath(options.fetch_log_dir))
+
+ if not (os.path.isdir(options.fetch_log_dir) and
+ os.access(options.fetch_log_dir, os.W_OK|os.X_OK)):
+ parser.error(("--fetch-log-dir '%s' is not a "
+ "writable directory") % options.fetch_log_dir)
+
+ if options.whitelist_from:
+ normalized_paths = []
+ for x in options.whitelist_from:
+ path = normalize_path(os.path.abspath(x))
+ if not os.access(path, os.R_OK):
+ parser.error("--whitelist-from '%s' is not readable" % x)
+ if os.path.isfile(path):
+ normalized_paths.append(path)
+ elif os.path.isdir(path):
+ for file in _recursive_file_list(path):
+ if not os.access(file, os.R_OK):
+ parser.error("--whitelist-from '%s' directory contains not readable file '%s'" % (x, file))
+ normalized_paths.append(file)
+ else:
+ parser.error("--whitelist-from '%s' is not a regular file or a directory" % x)
+ options.whitelist_from = normalized_paths
+
+ if options.strict_manifests is not None:
+ if options.strict_manifests == "y":
+ settings.features.add("strict")
+ else:
+ settings.features.discard("strict")
+
+ settings.lock()
+
+ portdb = portage.portdbapi(mysettings=settings)
+
+ # Limit ebuilds to the specified repo.
+ portdb.porttrees = [repo_path]
+
+ portage.util.initialize_logger()
+
+ if options.verbose > 0:
+ l = logging.getLogger()
+ l.setLevel(l.getEffectiveLevel() - 10 * options.verbose)
+
+ with Config(options, portdb,
+ SchedulerInterface(global_event_loop())) as config:
+
+ if not options.mirror:
+ parser.error('No action specified')
+
+ returncode = os.EX_OK
+
+ if options.mirror:
+ signum = run_main_scheduler(MirrorDistTask(config))
+ if signum is not None:
+ sys.exit(128 + signum)
+
+ return returncode
diff --git a/pym/portage/_global_updates.py b/pym/portage/_global_updates.py
index c0f3df0b6..dde726836 100644
--- a/pym/portage/_global_updates.py
+++ b/pym/portage/_global_updates.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -46,12 +46,6 @@ def _do_global_updates(trees, prev_mtimes, quiet=False, if_mtime_changed=True):
portdb = trees[root]["porttree"].dbapi
vardb = trees[root]["vartree"].dbapi
bindb = trees[root]["bintree"].dbapi
- if not os.access(bindb.bintree.pkgdir, os.W_OK):
- bindb = None
- else:
- # Call binarytree.populate(), since we want to make sure it's
- # only populated with local packages here (getbinpkgs=0).
- bindb.bintree.populate()
world_file = os.path.join(mysettings['EROOT'], WORLD_FILE)
world_list = grabfile(world_file)
@@ -92,14 +86,10 @@ def _do_global_updates(trees, prev_mtimes, quiet=False, if_mtime_changed=True):
if not update_notice_printed:
update_notice_printed = True
writemsg_stdout("\n")
- if quiet:
- writemsg_stdout(colorize("GOOD",
- _("Performing Global Updates\n")))
- writemsg_stdout(_("(Could take a couple of minutes if you have a lot of binary packages.)\n"))
- else:
- writemsg_stdout(colorize("GOOD",
- _("Performing Global Updates:\n")))
- writemsg_stdout(_("(Could take a couple of minutes if you have a lot of binary packages.)\n"))
+ writemsg_stdout(colorize("GOOD",
+ _("Performing Global Updates\n")))
+ writemsg_stdout(_("(Could take a couple of minutes if you have a lot of binary packages.)\n"))
+ if not quiet:
writemsg_stdout(_(" %s='update pass' %s='binary update' "
"%s='/var/db update' %s='/var/db move'\n"
" %s='/var/db SLOT move' %s='binary move' "
@@ -120,63 +110,71 @@ def _do_global_updates(trees, prev_mtimes, quiet=False, if_mtime_changed=True):
if myupd:
retupd = True
+ if retupd:
+ if os.access(bindb.bintree.pkgdir, os.W_OK):
+ # Call binarytree.populate(), since we want to make sure it's
+ # only populated with local packages here (getbinpkgs=0).
+ bindb.bintree.populate()
+ else:
+ bindb = None
+
master_repo = portdb.getRepositoryName(portdb.porttree_root)
if master_repo in repo_map:
repo_map['DEFAULT'] = repo_map[master_repo]
for repo_name, myupd in repo_map.items():
- if repo_name == 'DEFAULT':
- continue
- if not myupd:
- continue
-
- def repo_match(repository):
- return repository == repo_name or \
- (repo_name == master_repo and repository not in repo_map)
-
- def _world_repo_match(atoma, atomb):
- """
- Check whether to perform a world change from atoma to atomb.
- If best vardb match for atoma comes from the same repository
- as the update file, allow that. Additionally, if portdb still
- can find a match for old atom name, warn about that.
- """
- matches = vardb.match(atoma)
- if not matches:
- matches = vardb.match(atomb)
- if matches and \
- repo_match(vardb.aux_get(best(matches), ['repository'])[0]):
- if portdb.match(atoma):
- world_warnings.add((atoma, atomb))
- return True
- else:
- return False
+ if repo_name == 'DEFAULT':
+ continue
+ if not myupd:
+ continue
- for update_cmd in myupd:
- for pos, atom in enumerate(world_list):
- new_atom = update_dbentry(update_cmd, atom)
- if atom != new_atom:
- if _world_repo_match(atom, new_atom):
- world_list[pos] = new_atom
- world_modified = True
-
- for update_cmd in myupd:
- if update_cmd[0] == "move":
- moves = vardb.move_ent(update_cmd, repo_match=repo_match)
+ def repo_match(repository):
+ return repository == repo_name or \
+ (repo_name == master_repo and repository not in repo_map)
+
+ def _world_repo_match(atoma, atomb):
+ """
+ Check whether to perform a world change from atoma to atomb.
+ If best vardb match for atoma comes from the same repository
+ as the update file, allow that. Additionally, if portdb still
+ can find a match for old atom name, warn about that.
+ """
+ matches = vardb.match(atoma)
+ if not matches:
+ matches = vardb.match(atomb)
+ if matches and \
+ repo_match(vardb.aux_get(best(matches), ['repository'])[0]):
+ if portdb.match(atoma):
+ world_warnings.add((atoma, atomb))
+ return True
+ else:
+ return False
+
+ for update_cmd in myupd:
+ for pos, atom in enumerate(world_list):
+ new_atom = update_dbentry(update_cmd, atom)
+ if atom != new_atom:
+ if _world_repo_match(atom, new_atom):
+ world_list[pos] = new_atom
+ world_modified = True
+
+ for update_cmd in myupd:
+ if update_cmd[0] == "move":
+ moves = vardb.move_ent(update_cmd, repo_match=repo_match)
+ if moves:
+ writemsg_stdout(moves * "@")
+ if bindb:
+ moves = bindb.move_ent(update_cmd, repo_match=repo_match)
if moves:
- writemsg_stdout(moves * "@")
- if bindb:
- moves = bindb.move_ent(update_cmd, repo_match=repo_match)
- if moves:
- writemsg_stdout(moves * "%")
- elif update_cmd[0] == "slotmove":
- moves = vardb.move_slot_ent(update_cmd, repo_match=repo_match)
+ writemsg_stdout(moves * "%")
+ elif update_cmd[0] == "slotmove":
+ moves = vardb.move_slot_ent(update_cmd, repo_match=repo_match)
+ if moves:
+ writemsg_stdout(moves * "s")
+ if bindb:
+ moves = bindb.move_slot_ent(update_cmd, repo_match=repo_match)
if moves:
- writemsg_stdout(moves * "s")
- if bindb:
- moves = bindb.move_slot_ent(update_cmd, repo_match=repo_match)
- if moves:
- writemsg_stdout(moves * "S")
+ writemsg_stdout(moves * "S")
if world_modified:
world_list.sort()
@@ -189,65 +187,65 @@ def _do_global_updates(trees, prev_mtimes, quiet=False, if_mtime_changed=True):
if retupd:
- def _config_repo_match(repo_name, atoma, atomb):
- """
- Check whether to perform a world change from atoma to atomb.
- If best vardb match for atoma comes from the same repository
- as the update file, allow that. Additionally, if portdb still
- can find a match for old atom name, warn about that.
- """
- matches = vardb.match(atoma)
+ def _config_repo_match(repo_name, atoma, atomb):
+ """
+ Check whether to perform a world change from atoma to atomb.
+ If best vardb match for atoma comes from the same repository
+ as the update file, allow that. Additionally, if portdb still
+ can find a match for old atom name, warn about that.
+ """
+ matches = vardb.match(atoma)
+ if not matches:
+ matches = vardb.match(atomb)
if not matches:
- matches = vardb.match(atomb)
- if not matches:
- return False
- repository = vardb.aux_get(best(matches), ['repository'])[0]
- return repository == repo_name or \
- (repo_name == master_repo and repository not in repo_map)
-
- update_config_files(root,
- shlex_split(mysettings.get("CONFIG_PROTECT", "")),
- shlex_split(mysettings.get("CONFIG_PROTECT_MASK", "")),
- repo_map, match_callback=_config_repo_match)
-
- # The above global updates proceed quickly, so they
- # are considered a single mtimedb transaction.
- if timestamps:
- # We do not update the mtime in the mtimedb
- # until after _all_ of the above updates have
- # been processed because the mtimedb will
- # automatically commit when killed by ctrl C.
- for mykey, mtime in timestamps.items():
- prev_mtimes[mykey] = mtime
-
- do_upgrade_packagesmessage = False
- # We gotta do the brute force updates for these now.
- if True:
- def onUpdate(maxval, curval):
+ return False
+ repository = vardb.aux_get(best(matches), ['repository'])[0]
+ return repository == repo_name or \
+ (repo_name == master_repo and repository not in repo_map)
+
+ update_config_files(root,
+ shlex_split(mysettings.get("CONFIG_PROTECT", "")),
+ shlex_split(mysettings.get("CONFIG_PROTECT_MASK", "")),
+ repo_map, match_callback=_config_repo_match)
+
+ # The above global updates proceed quickly, so they
+ # are considered a single mtimedb transaction.
+ if timestamps:
+ # We do not update the mtime in the mtimedb
+ # until after _all_ of the above updates have
+ # been processed because the mtimedb will
+ # automatically commit when killed by ctrl C.
+ for mykey, mtime in timestamps.items():
+ prev_mtimes[mykey] = mtime
+
+ do_upgrade_packagesmessage = False
+ # We gotta do the brute force updates for these now.
+ if True:
+ def onUpdate(_maxval, curval):
+ if curval > 0:
+ writemsg_stdout("#")
+ if quiet:
+ onUpdate = None
+ vardb.update_ents(repo_map, onUpdate=onUpdate)
+ if bindb:
+ def onUpdate(_maxval, curval):
if curval > 0:
- writemsg_stdout("#")
+ writemsg_stdout("*")
if quiet:
onUpdate = None
- vardb.update_ents(repo_map, onUpdate=onUpdate)
- if bindb:
- def onUpdate(maxval, curval):
- if curval > 0:
- writemsg_stdout("*")
- if quiet:
- onUpdate = None
- bindb.update_ents(repo_map, onUpdate=onUpdate)
- else:
- do_upgrade_packagesmessage = 1
-
- # Update progress above is indicated by characters written to stdout so
- # we print a couple new lines here to separate the progress output from
- # what follows.
- writemsg_stdout("\n\n")
-
- if do_upgrade_packagesmessage and bindb and \
- bindb.cpv_all():
- writemsg_stdout(_(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the tbz2's in the packages directory.\n"))
- writemsg_stdout(bold(_("Note: This can take a very long time.")))
- writemsg_stdout("\n")
+ bindb.update_ents(repo_map, onUpdate=onUpdate)
+ else:
+ do_upgrade_packagesmessage = 1
+
+ # Update progress above is indicated by characters written to stdout so
+ # we print a couple new lines here to separate the progress output from
+ # what follows.
+ writemsg_stdout("\n\n")
+
+ if do_upgrade_packagesmessage and bindb and \
+ bindb.cpv_all():
+ writemsg_stdout(_(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the tbz2's in the packages directory.\n"))
+ writemsg_stdout(bold(_("Note: This can take a very long time.")))
+ writemsg_stdout("\n")
return retupd
diff --git a/pym/portage/_legacy_globals.py b/pym/portage/_legacy_globals.py
index abffa0e9a..bb9691a77 100644
--- a/pym/portage/_legacy_globals.py
+++ b/pym/portage/_legacy_globals.py
@@ -27,7 +27,8 @@ def _get_legacy_global(name):
os.umask(0o22)
kwargs = {}
- for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
+ for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"),
+ ("target_root", "ROOT"), ("eprefix", "EPREFIX")):
kwargs[k] = os.environ.get(envvar)
portage._initializing_globals = True
diff --git a/pym/portage/_selinux.py b/pym/portage/_selinux.py
index 173714515..2a7194c5d 100644
--- a/pym/portage/_selinux.py
+++ b/pym/portage/_selinux.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# Don't use the unicode-wrapped os and shutil modules here since
@@ -8,18 +8,18 @@ import shutil
import portage
from portage import _encodings
-from portage import _unicode_decode
-from portage import _unicode_encode
+from portage import _native_string, _unicode_decode
from portage.localization import _
portage.proxy.lazyimport.lazyimport(globals(),
'selinux')
def copyfile(src, dest):
- src = _unicode_encode(src, encoding=_encodings['fs'], errors='strict')
- dest = _unicode_encode(dest, encoding=_encodings['fs'], errors='strict')
+ src = _native_string(src, encoding=_encodings['fs'], errors='strict')
+ dest = _native_string(dest, encoding=_encodings['fs'], errors='strict')
(rc, ctx) = selinux.lgetfilecon(src)
if rc < 0:
- src = _unicode_decode(src, encoding=_encodings['fs'], errors='replace')
+ if sys.hexversion < 0x3000000:
+ src = _unicode_decode(src, encoding=_encodings['fs'], errors='replace')
raise OSError(_("copyfile: Failed getting context of \"%s\".") % src)
setfscreate(ctx)
@@ -39,12 +39,12 @@ def is_selinux_enabled():
return selinux.is_selinux_enabled()
def mkdir(target, refdir):
- target = _unicode_encode(target, encoding=_encodings['fs'], errors='strict')
- refdir = _unicode_encode(refdir, encoding=_encodings['fs'], errors='strict')
+ target = _native_string(target, encoding=_encodings['fs'], errors='strict')
+ refdir = _native_string(refdir, encoding=_encodings['fs'], errors='strict')
(rc, ctx) = selinux.getfilecon(refdir)
if rc < 0:
- refdir = _unicode_decode(refdir, encoding=_encodings['fs'],
- errors='replace')
+ if sys.hexversion < 0x3000000:
+ refdir = _unicode_decode(refdir, encoding=_encodings['fs'], errors='replace')
raise OSError(
_("mkdir: Failed getting context of reference directory \"%s\".") \
% refdir)
@@ -56,16 +56,17 @@ def mkdir(target, refdir):
setfscreate()
def rename(src, dest):
- src = _unicode_encode(src, encoding=_encodings['fs'], errors='strict')
- dest = _unicode_encode(dest, encoding=_encodings['fs'], errors='strict')
+ src = _native_string(src, encoding=_encodings['fs'], errors='strict')
+ dest = _native_string(dest, encoding=_encodings['fs'], errors='strict')
(rc, ctx) = selinux.lgetfilecon(src)
if rc < 0:
- src = _unicode_decode(src, encoding=_encodings['fs'], errors='replace')
+ if sys.hexversion < 0x3000000:
+ src = _unicode_decode(src, encoding=_encodings['fs'], errors='replace')
raise OSError(_("rename: Failed getting context of \"%s\".") % src)
setfscreate(ctx)
try:
- os.rename(src,dest)
+ os.rename(src, dest)
finally:
setfscreate()
@@ -75,10 +76,10 @@ def settype(newtype):
return ":".join(ret)
def setexec(ctx="\n"):
- ctx = _unicode_encode(ctx, encoding=_encodings['content'], errors='strict')
+ ctx = _native_string(ctx, encoding=_encodings['content'], errors='strict')
if selinux.setexeccon(ctx) < 0:
- ctx = _unicode_decode(ctx, encoding=_encodings['content'],
- errors='replace')
+ if sys.hexversion < 0x3000000:
+ ctx = _unicode_decode(ctx, encoding=_encodings['content'], errors='replace')
if selinux.security_getenforce() == 1:
raise OSError(_("Failed setting exec() context \"%s\".") % ctx)
else:
@@ -87,11 +88,10 @@ def setexec(ctx="\n"):
noiselevel=-1)
def setfscreate(ctx="\n"):
- ctx = _unicode_encode(ctx,
- encoding=_encodings['content'], errors='strict')
+ ctx = _native_string(ctx, encoding=_encodings['content'], errors='strict')
if selinux.setfscreatecon(ctx) < 0:
- ctx = _unicode_decode(ctx,
- encoding=_encodings['content'], errors='replace')
+ if sys.hexversion < 0x3000000:
+ ctx = _unicode_decode(ctx, encoding=_encodings['content'], errors='replace')
raise OSError(
_("setfscreate: Failed setting fs create context \"%s\".") % ctx)
@@ -106,8 +106,7 @@ class spawn_wrapper(object):
def __init__(self, spawn_func, selinux_type):
self._spawn_func = spawn_func
- selinux_type = _unicode_encode(selinux_type,
- encoding=_encodings['content'], errors='strict')
+ selinux_type = _native_string(selinux_type, encoding=_encodings['content'], errors='strict')
self._con = settype(selinux_type)
def __call__(self, *args, **kwargs):
@@ -123,13 +122,13 @@ class spawn_wrapper(object):
return self._spawn_func(*args, **kwargs)
def symlink(target, link, reflnk):
- target = _unicode_encode(target, encoding=_encodings['fs'], errors='strict')
- link = _unicode_encode(link, encoding=_encodings['fs'], errors='strict')
- reflnk = _unicode_encode(reflnk, encoding=_encodings['fs'], errors='strict')
+ target = _native_string(target, encoding=_encodings['fs'], errors='strict')
+ link = _native_string(link, encoding=_encodings['fs'], errors='strict')
+ reflnk = _native_string(reflnk, encoding=_encodings['fs'], errors='strict')
(rc, ctx) = selinux.lgetfilecon(reflnk)
if rc < 0:
- reflnk = _unicode_decode(reflnk, encoding=_encodings['fs'],
- errors='replace')
+ if sys.hexversion < 0x3000000:
+ reflnk = _unicode_decode(reflnk, encoding=_encodings['fs'], errors='replace')
raise OSError(
_("symlink: Failed getting context of reference symlink \"%s\".") \
% reflnk)
diff --git a/pym/portage/_sets/__init__.py b/pym/portage/_sets/__init__.py
index c3b590e92..75d1df7bf 100644
--- a/pym/portage/_sets/__init__.py
+++ b/pym/portage/_sets/__init__.py
@@ -17,6 +17,7 @@ try:
from configparser import SafeConfigParser
except ImportError:
from ConfigParser import SafeConfigParser, NoOptionError, ParsingError
+import portage
from portage import os
from portage import load_mod
from portage import _unicode_decode
@@ -124,6 +125,10 @@ class SetConfig(object):
parser.add_section("system")
parser.set("system", "class", "portage.sets.profiles.PackagesSystemSet")
+ parser.remove_section("security")
+ parser.add_section("security")
+ parser.set("security", "class", "portage.sets.security.NewAffectedSet")
+
parser.remove_section("usersets")
parser.add_section("usersets")
parser.set("usersets", "class", "portage.sets.files.StaticFileSet")
@@ -131,6 +136,27 @@ class SetConfig(object):
parser.set("usersets", "directory", "%(PORTAGE_CONFIGROOT)setc/portage/sets")
parser.set("usersets", "world-candidate", "true")
+ parser.remove_section("live-rebuild")
+ parser.add_section("live-rebuild")
+ parser.set("live-rebuild", "class", "portage.sets.dbapi.VariableSet")
+ parser.set("live-rebuild", "variable", "INHERITED")
+ parser.set("live-rebuild", "includes", " ".join(sorted(portage.const.LIVE_ECLASSES)))
+
+ parser.remove_section("module-rebuild")
+ parser.add_section("module-rebuild")
+ parser.set("module-rebuild", "class", "portage.sets.dbapi.OwnerSet")
+ parser.set("module-rebuild", "files", "/lib/modules")
+
+ parser.remove_section("preserved-rebuild")
+ parser.add_section("preserved-rebuild")
+ parser.set("preserved-rebuild", "class", "portage.sets.libs.PreservedLibraryConsumerSet")
+
+ parser.remove_section("x11-module-rebuild")
+ parser.add_section("x11-module-rebuild")
+ parser.set("x11-module-rebuild", "class", "portage.sets.dbapi.OwnerSet")
+ parser.set("x11-module-rebuild", "files", "/usr/lib/xorg/modules")
+ parser.set("x11-module-rebuild", "exclude-files", "/usr/bin/Xorg")
+
def update(self, setname, options):
parser = self._parser
self.errors = []
@@ -270,8 +296,8 @@ def load_default_config(settings, trees):
return SetConfig(None, settings, trees)
global_config_path = GLOBAL_CONFIG_PATH
- if settings['EPREFIX']:
- global_config_path = os.path.join(settings['EPREFIX'],
+ if portage.const.EPREFIX:
+ global_config_path = os.path.join(portage.const.EPREFIX,
GLOBAL_CONFIG_PATH.lstrip(os.sep))
def _getfiles():
for path, dirs, files in os.walk(os.path.join(global_config_path, "sets")):
diff --git a/pym/portage/_sets/base.py b/pym/portage/_sets/base.py
index c8d3ae405..ee20d3671 100644
--- a/pym/portage/_sets/base.py
+++ b/pym/portage/_sets/base.py
@@ -1,4 +1,4 @@
-# Copyright 2007-2011 Gentoo Foundation
+# Copyright 2007-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import sys
@@ -7,6 +7,7 @@ from portage.exception import InvalidAtom
from portage.versions import cpv_getkey
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
OPERATIONS = ["merge", "unmerge"]
@@ -126,7 +127,7 @@ class PackageSet(object):
if modified_use is not None and modified_use is not pkg.use.enabled:
pkg = pkg.copy()
- pkg.metadata["USE"] = " ".join(modified_use)
+ pkg._metadata["USE"] = " ".join(modified_use)
# Atoms matched via PROVIDE must be temporarily transformed since
# match_from_list() only works correctly when atom.cp == pkg.cp.
@@ -156,7 +157,7 @@ class PackageSet(object):
for atom in atoms:
if match_from_list(atom, cpv_slot_list):
yield atom
- provides = pkg.metadata['PROVIDE']
+ provides = pkg._metadata['PROVIDE']
if not provides:
return
provides = provides.split()
diff --git a/pym/portage/_sets/dbapi.py b/pym/portage/_sets/dbapi.py
index 4982a9244..384fb3aa8 100644
--- a/pym/portage/_sets/dbapi.py
+++ b/pym/portage/_sets/dbapi.py
@@ -26,8 +26,7 @@ class EverythingSet(PackageSet):
def load(self):
myatoms = []
- db_keys = ["SLOT"]
- aux_get = self._db.aux_get
+ pkg_str = self._db._pkg_str
cp_list = self._db.cp_list
for cp in self._db.cp_all():
@@ -35,8 +34,8 @@ class EverythingSet(PackageSet):
# NOTE: Create SLOT atoms even when there is only one
# SLOT installed, in order to avoid the possibility
# of unwanted upgrades as reported in bug #338959.
- slot, = aux_get(cpv, db_keys)
- atom = Atom("%s:%s" % (cp, slot))
+ pkg = pkg_str(cpv, None)
+ atom = Atom("%s:%s" % (pkg.cp, pkg.slot))
if self._filter:
if self._filter(atom):
myatoms.append(atom)
@@ -68,20 +67,19 @@ class OwnerSet(PackageSet):
"""
rValue = set()
vardb = self._db
- aux_get = vardb.aux_get
- aux_keys = ["SLOT"]
+ pkg_str = vardb._pkg_str
if exclude_paths is None:
for link, p in vardb._owners.iter_owners(paths):
- slot, = aux_get(link.mycpv, aux_keys)
- rValue.add("%s:%s" % (link.mycpv.cp, slot))
+ pkg = pkg_str(link.mycpv, None)
+ rValue.add("%s:%s" % (pkg.cp, pkg.slot))
else:
all_paths = set()
all_paths.update(paths)
all_paths.update(exclude_paths)
exclude_atoms = set()
for link, p in vardb._owners.iter_owners(all_paths):
- slot, = aux_get(link.mycpv, aux_keys)
- atom = "%s:%s" % (link.mycpv.cp, slot)
+ pkg = pkg_str(link.mycpv, None)
+ atom = "%s:%s" % (pkg.cp, pkg.slot)
rValue.add(atom)
if p in exclude_paths:
exclude_atoms.add(atom)
@@ -173,12 +171,11 @@ class DowngradeSet(PackageSet):
xmatch = self._portdb.xmatch
xmatch_level = "bestmatch-visible"
cp_list = self._vardb.cp_list
- aux_get = self._vardb.aux_get
- aux_keys = ["SLOT"]
+ pkg_str = self._vardb._pkg_str
for cp in self._vardb.cp_all():
for cpv in cp_list(cp):
- slot, = aux_get(cpv, aux_keys)
- slot_atom = "%s:%s" % (cp, slot)
+ pkg = pkg_str(cpv, None)
+ slot_atom = "%s:%s" % (pkg.cp, pkg.slot)
ebuild = xmatch(xmatch_level, slot_atom)
if not ebuild:
continue
@@ -326,6 +323,7 @@ class CategorySet(PackageSet):
class AgeSet(EverythingSet):
_operations = ["merge", "unmerge"]
+ _aux_keys = ('BUILD_TIME',)
def __init__(self, vardb, mode="older", age=7):
super(AgeSet, self).__init__(vardb)
@@ -335,8 +333,12 @@ class AgeSet(EverythingSet):
def _filter(self, atom):
cpv = self._db.match(atom)[0]
- path = self._db.getpath(cpv, filename="COUNTER")
- age = (time.time() - os.stat(path).st_mtime) / (3600 * 24)
+ try:
+ date, = self._db.aux_get(cpv, self._aux_keys)
+ date = int(date)
+ except (KeyError, ValueError):
+ return bool(self._mode == "older")
+ age = (time.time() - date) / (3600 * 24)
if ((self._mode == "older" and age <= self._age) \
or (self._mode == "newer" and age >= self._age)):
return False
@@ -355,6 +357,83 @@ class AgeSet(EverythingSet):
singleBuilder = classmethod(singleBuilder)
+class DateSet(EverythingSet):
+ _operations = ["merge", "unmerge"]
+ _aux_keys = ('BUILD_TIME',)
+
+ def __init__(self, vardb, date, mode="older"):
+ super(DateSet, self).__init__(vardb)
+ self._mode = mode
+ self._date = date
+
+ def _filter(self, atom):
+
+ cpv = self._db.match(atom)[0]
+ try:
+ date, = self._db.aux_get(cpv, self._aux_keys)
+ date = int(date)
+ except (KeyError, ValueError):
+ return bool(self._mode == "older")
+ # Make sure inequality is _strict_ to exclude tested package
+ if ((self._mode == "older" and date < self._date) \
+ or (self._mode == "newer" and date > self._date)):
+ return True
+ else:
+ return False
+
+ def singleBuilder(cls, options, settings, trees):
+ vardbapi = trees["vartree"].dbapi
+ mode = options.get("mode", "older")
+ if str(mode).lower() not in ["newer", "older"]:
+ raise SetConfigError(_("invalid 'mode' value %s (use either 'newer' or 'older')") % mode)
+
+ formats = []
+ if options.get("package") is not None:
+ formats.append("package")
+ if options.get("filestamp") is not None:
+ formats.append("filestamp")
+ if options.get("seconds") is not None:
+ formats.append("seconds")
+ if options.get("date") is not None:
+ formats.append("date")
+
+ if not formats:
+ raise SetConfigError(_("none of these options specified: 'package', 'filestamp', 'seconds', 'date'"))
+ elif len(formats) > 1:
+ raise SetConfigError(_("no more than one of these options is allowed: 'package', 'filestamp', 'seconds', 'date'"))
+
+ format = formats[0]
+
+ if (format == "package"):
+ package = options.get("package")
+ try:
+ cpv = vardbapi.match(package)[0]
+ date, = vardbapi.aux_get(cpv, ('BUILD_TIME',))
+ date = int(date)
+ except (KeyError, ValueError):
+ raise SetConfigError(_("cannot determine installation date of package %s") % package)
+ elif (format == "filestamp"):
+ filestamp = options.get("filestamp")
+ try:
+ date = int(os.stat(filestamp).st_mtime)
+ except (OSError, ValueError):
+ raise SetConfigError(_("cannot determine 'filestamp' of '%s'") % filestamp)
+ elif (format == "seconds"):
+ try:
+ date = int(options.get("seconds"))
+ except ValueError:
+ raise SetConfigError(_("option 'seconds' must be an integer"))
+ else:
+ dateopt = options.get("date")
+ try:
+ dateformat = options.get("dateformat", "%x %X")
+ date = int(time.mktime(time.strptime(dateopt, dateformat)))
+ except ValueError:
+ raise SetConfigError(_("'date=%s' does not match 'dateformat=%s'") % (dateopt, dateformat))
+ return DateSet(vardb=vardbapi, date=date, mode=mode)
+
+ singleBuilder = classmethod(singleBuilder)
+
class RebuiltBinaries(EverythingSet):
_operations = ('merge',)
_aux_keys = ('BUILD_TIME',)
diff --git a/pym/portage/_sets/files.py b/pym/portage/_sets/files.py
index b891ea4f4..2fb64de87 100644
--- a/pym/portage/_sets/files.py
+++ b/pym/portage/_sets/files.py
@@ -1,4 +1,4 @@
-# Copyright 2007-2012 Gentoo Foundation
+# Copyright 2007-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import errno
@@ -86,8 +86,8 @@ class StaticFileSet(EditablePackageSet):
for a in data:
matches = self.dbapi.match(a)
for cpv in matches:
- atoms.append("%s:%s" % (cpv_getkey(cpv),
- self.dbapi.aux_get(cpv, ["SLOT"])[0]))
+ pkg = self.dbapi._pkg_str(cpv, None)
+ atoms.append("%s:%s" % (pkg.cp, pkg.slot))
# In addition to any installed slots, also try to pull
# in the latest new slot that may be available.
atoms.append(a)
@@ -296,10 +296,14 @@ class WorldSelectedSet(EditablePackageSet):
ensure_dirs(os.path.dirname(self._filename), gid=portage_gid, mode=0o2750, mask=0o2)
def lock(self):
+ if self._lock is not None:
+ raise AssertionError("already locked")
self._ensure_dirs()
self._lock = lockfile(self._filename, wantnewlockfile=1)
def unlock(self):
+ if self._lock is None:
+ raise AssertionError("not locked")
unlockfile(self._lock)
self._lock = None
diff --git a/pym/portage/_sets/libs.py b/pym/portage/_sets/libs.py
index 6c5babc13..022e076f5 100644
--- a/pym/portage/_sets/libs.py
+++ b/pym/portage/_sets/libs.py
@@ -1,12 +1,12 @@
-# Copyright 2007-2011 Gentoo Foundation
+# Copyright 2007-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
+from portage.exception import InvalidData
from portage.localization import _
from portage._sets.base import PackageSet
from portage._sets import get_boolean, SetConfigError
-from portage.versions import cpv_getkey
import portage
class LibraryConsumerSet(PackageSet):
@@ -22,14 +22,14 @@ class LibraryConsumerSet(PackageSet):
for p in paths:
for cpv in self.dbapi._linkmap.getOwners(p):
try:
- slot, = self.dbapi.aux_get(cpv, ["SLOT"])
- except KeyError:
+ pkg = self.dbapi._pkg_str(cpv, None)
+ except (KeyError, InvalidData):
# This is expected for preserved libraries
# of packages that have been uninstalled
# without replacement.
pass
else:
- rValue.add("%s:%s" % (cpv_getkey(cpv), slot))
+ rValue.add("%s:%s" % (pkg.cp, pkg.slot))
return rValue
class LibraryFileConsumerSet(LibraryConsumerSet):
@@ -49,7 +49,8 @@ class LibraryFileConsumerSet(LibraryConsumerSet):
def load(self):
consumers = set()
for lib in self.files:
- consumers.update(self.dbapi._linkmap.findConsumers(lib))
+ consumers.update(
+ self.dbapi._linkmap.findConsumers(lib, greedy=False))
if not consumers:
return
@@ -77,10 +78,10 @@ class PreservedLibraryConsumerSet(LibraryConsumerSet):
for lib in libs:
if self.debug:
print(lib)
- for x in sorted(self.dbapi._linkmap.findConsumers(lib)):
+ for x in sorted(self.dbapi._linkmap.findConsumers(lib, greedy=False)):
print(" ", x)
print("-"*40)
- consumers.update(self.dbapi._linkmap.findConsumers(lib))
+ consumers.update(self.dbapi._linkmap.findConsumers(lib, greedy=False))
# Don't rebuild packages just because they contain preserved
# libs that happen to be consumers of other preserved libs.
for libs in plib_dict.values():
diff --git a/pym/portage/_sets/security.py b/pym/portage/_sets/security.py
index 7e856bc79..f8dbef2be 100644
--- a/pym/portage/_sets/security.py
+++ b/pym/portage/_sets/security.py
@@ -44,8 +44,8 @@ class SecuritySet(PackageSet):
mydict = {}
for atom in atomlist[:]:
cpv = self._portdbapi.xmatch("match-all", atom)[0]
- slot = self._portdbapi.aux_get(cpv, ["SLOT"])[0]
- cps = "%s:%s" % (cpv.cp, slot)
+ pkg = self._portdbapi._pkg_str(cpv, None)
+ cps = "%s:%s" % (pkg.cp, pkg.slot)
if not cps in mydict:
mydict[cps] = (atom, cpv)
else:
diff --git a/pym/portage/cache/ebuild_xattr.py b/pym/portage/cache/ebuild_xattr.py
index 0086e40a3..db6e177cf 100644
--- a/pym/portage/cache/ebuild_xattr.py
+++ b/pym/portage/cache/ebuild_xattr.py
@@ -1,4 +1,4 @@
-# -*- coding: UTF8 -*-
+# -*- coding: utf-8 -*-
# Copyright: 2009-2011 Gentoo Foundation
# Author(s): Petteri Räty (betelgeuse@gentoo.org)
# License: GPL2
diff --git a/pym/portage/cache/flat_hash.py b/pym/portage/cache/flat_hash.py
index 2eae9f634..53042965e 100644
--- a/pym/portage/cache/flat_hash.py
+++ b/pym/portage/cache/flat_hash.py
@@ -1,7 +1,9 @@
-# Copyright: 2005-2011 Gentoo Foundation
+# Copyright 2005-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# Author(s): Brian Harring (ferringb@gentoo.org)
+from __future__ import unicode_literals
+
from portage.cache import fs_template
from portage.cache import cache_errors
import errno
@@ -11,16 +13,14 @@ import sys
import os as _os
from portage import os
from portage import _encodings
-from portage import _unicode_decode
from portage import _unicode_encode
+from portage.exception import InvalidData
+from portage.versions import _pkg_str
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
long = int
-# Coerce to unicode, in order to prevent TypeError when writing
-# raw bytes to TextIOWrapper with python2.
-_setitem_fmt = _unicode_decode("%s=%s\n")
-
class database(fs_template.FsBased):
autocommits = True
@@ -40,11 +40,10 @@ class database(fs_template.FsBased):
# Don't use os.path.join, for better performance.
fp = self.location + _os.sep + cpv
try:
- myf = io.open(_unicode_encode(fp,
+ with io.open(_unicode_encode(fp,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'],
- errors='replace')
- try:
+ errors='replace') as myf:
lines = myf.read().split("\n")
if not lines[-1]:
lines.pop()
@@ -54,8 +53,6 @@ class database(fs_template.FsBased):
# that uses mtime mangling.
d['_mtime_'] = _os.fstat(myf.fileno())[stat.ST_MTIME]
return d
- finally:
- myf.close()
except (IOError, OSError) as e:
if e.errno != errno.ENOENT:
raise cache_errors.CacheCorruption(cpv, e)
@@ -94,7 +91,10 @@ class database(fs_template.FsBased):
v = values.get(k)
if not v:
continue
- myf.write(_setitem_fmt % (k, v))
+ # NOTE: This format string requires unicode_literals, so that
+ # k and v are coerced to unicode, in order to prevent TypeError
+ # when writing raw bytes to TextIOWrapper with Python 2.
+ myf.write("%s=%s\n" % (k, v))
finally:
myf.close()
self._ensure_access(fp)
@@ -135,8 +135,6 @@ class database(fs_template.FsBased):
del e
continue
for l in dir_list:
- if l.endswith(".cpickle"):
- continue
p = os.path.join(dir_path, l)
try:
st = os.lstat(p)
@@ -151,7 +149,11 @@ class database(fs_template.FsBased):
if depth < 1:
dirs.append((depth+1, p))
continue
- yield p[len_base+1:]
+
+ try:
+ yield _pkg_str(p[len_base+1:])
+ except InvalidData:
+ continue
class md5_database(database):
diff --git a/pym/portage/cache/flat_list.py b/pym/portage/cache/flat_list.py
deleted file mode 100644
index 728830753..000000000
--- a/pym/portage/cache/flat_list.py
+++ /dev/null
@@ -1,134 +0,0 @@
-# Copyright 2005-2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.cache import fs_template
-from portage.cache import cache_errors
-from portage import os
-from portage import _encodings
-from portage import _unicode_decode
-from portage import _unicode_encode
-import errno
-import io
-import stat
-import sys
-
-if sys.hexversion >= 0x3000000:
- long = int
-
-# Coerce to unicode, in order to prevent TypeError when writing
-# raw bytes to TextIOWrapper with python2.
-_setitem_fmt = _unicode_decode("%s\n")
-
-# store the current key order *here*.
-class database(fs_template.FsBased):
-
- autocommits = True
-
- # do not screw with this ordering. _eclasses_ needs to be last
- auxdbkey_order=('DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
- 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
- 'KEYWORDS', 'IUSE', 'REQUIRED_USE',
- 'PDEPEND', 'PROVIDE', 'EAPI', 'PROPERTIES', 'DEFINED_PHASES')
-
- def __init__(self, *args, **config):
- super(database,self).__init__(*args, **config)
- self.location = os.path.join(self.location,
- self.label.lstrip(os.path.sep).rstrip(os.path.sep))
-
- if len(self._known_keys) > len(self.auxdbkey_order) + 2:
- raise Exception("less ordered keys then auxdbkeys")
- if not os.path.exists(self.location):
- self._ensure_dirs()
-
-
- def _getitem(self, cpv):
- d = {}
- try:
- myf = io.open(_unicode_encode(os.path.join(self.location, cpv),
- encoding=_encodings['fs'], errors='strict'),
- mode='r', encoding=_encodings['repo.content'],
- errors='replace')
- for k,v in zip(self.auxdbkey_order, myf):
- d[k] = v.rstrip("\n")
- except (OSError, IOError) as e:
- if errno.ENOENT == e.errno:
- raise KeyError(cpv)
- raise cache_errors.CacheCorruption(cpv, e)
-
- try:
- d["_mtime_"] = os.fstat(myf.fileno())[stat.ST_MTIME]
- except OSError as e:
- myf.close()
- raise cache_errors.CacheCorruption(cpv, e)
- myf.close()
- return d
-
-
- def _setitem(self, cpv, values):
- s = cpv.rfind("/")
- fp=os.path.join(self.location,cpv[:s],".update.%i.%s" % (os.getpid(), cpv[s+1:]))
- try:
- myf = io.open(_unicode_encode(fp,
- encoding=_encodings['fs'], errors='strict'),
- mode='w', encoding=_encodings['repo.content'],
- errors='backslashreplace')
- except (OSError, IOError) as e:
- if errno.ENOENT == e.errno:
- try:
- self._ensure_dirs(cpv)
- myf = io.open(_unicode_encode(fp,
- encoding=_encodings['fs'], errors='strict'),
- mode='w', encoding=_encodings['repo.content'],
- errors='backslashreplace')
- except (OSError, IOError) as e:
- raise cache_errors.CacheCorruption(cpv, e)
- else:
- raise cache_errors.CacheCorruption(cpv, e)
-
-
- for x in self.auxdbkey_order:
- myf.write(_setitem_fmt % (values.get(x, ""),))
-
- myf.close()
- self._ensure_access(fp, mtime=values["_mtime_"])
- #update written. now we move it.
- new_fp = os.path.join(self.location,cpv)
- try:
- os.rename(fp, new_fp)
- except (OSError, IOError) as e:
- os.remove(fp)
- raise cache_errors.CacheCorruption(cpv, e)
-
-
- def _delitem(self, cpv):
- try:
- os.remove(os.path.join(self.location,cpv))
- except OSError as e:
- if errno.ENOENT == e.errno:
- raise KeyError(cpv)
- else:
- raise cache_errors.CacheCorruption(cpv, e)
-
-
- def __contains__(self, cpv):
- return os.path.exists(os.path.join(self.location, cpv))
-
-
- def __iter__(self):
- """generator for walking the dir struct"""
- dirs = [self.location]
- len_base = len(self.location)
- while len(dirs):
- for l in os.listdir(dirs[0]):
- if l.endswith(".cpickle"):
- continue
- p = os.path.join(dirs[0],l)
- st = os.lstat(p)
- if stat.S_ISDIR(st.st_mode):
- dirs.append(p)
- continue
- yield p[len_base+1:]
- dirs.pop(0)
-
-
- def commit(self): pass
diff --git a/pym/portage/cache/fs_template.py b/pym/portage/cache/fs_template.py
index 8f0636ed0..de4fe4ba5 100644
--- a/pym/portage/cache/fs_template.py
+++ b/pym/portage/cache/fs_template.py
@@ -1,4 +1,4 @@
-# Copyright 2005-2012 Gentoo Foundation
+# Copyright 2005-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# Author(s): Brian Harring (ferringb@gentoo.org)
@@ -15,6 +15,7 @@ lazyimport(globals(),
del lazyimport
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
long = int
class FsBased(template.database):
@@ -25,7 +26,8 @@ class FsBased(template.database):
for x, y in (("gid", -1), ("perms", -1)):
if x in config:
- setattr(self, "_"+x, config[x])
+ # Since Python 3.4, chown requires int type (no proxies).
+ setattr(self, "_" + x, int(config[x]))
del config[x]
else:
setattr(self, "_"+x, y)
diff --git a/pym/portage/cache/mappings.py b/pym/portage/cache/mappings.py
index bc8ce9af8..cd39a6ea1 100644
--- a/pym/portage/cache/mappings.py
+++ b/pym/portage/cache/mappings.py
@@ -199,10 +199,10 @@ class OrderedDict(UserDict):
return iter(self._order)
def __setitem__(self, key, item):
- if key in self:
- self._order.remove(key)
+ new_key = key not in self
UserDict.__setitem__(self, key, item)
- self._order.append(key)
+ if new_key:
+ self._order.append(key)
def __delitem__(self, key):
UserDict.__delitem__(self, key)
diff --git a/pym/portage/cache/metadata.py b/pym/portage/cache/metadata.py
index 9d2c3a5d7..0c588bde9 100644
--- a/pym/portage/cache/metadata.py
+++ b/pym/portage/cache/metadata.py
@@ -1,4 +1,4 @@
-# Copyright: 2005 Gentoo Foundation
+# Copyright 2005-2014 Gentoo Foundation
# Author(s): Brian Harring (ferringb@gentoo.org)
# License: GPL2
@@ -16,6 +16,7 @@ from portage.cache.template import reconstruct_eclasses
from portage.cache.mappings import ProtectedDict
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
long = int
@@ -28,7 +29,8 @@ class database(flat_hash.database):
auxdbkey_order=('DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
'KEYWORDS', 'INHERITED', 'IUSE', 'REQUIRED_USE',
- 'PDEPEND', 'PROVIDE', 'EAPI', 'PROPERTIES', 'DEFINED_PHASES')
+ 'PDEPEND', 'PROVIDE', 'EAPI', 'PROPERTIES',
+ 'DEFINED_PHASES', 'HDEPEND')
autocommits = True
serialize_eclasses = False
diff --git a/pym/portage/cache/sqlite.py b/pym/portage/cache/sqlite.py
index a6a3e066d..42a239922 100644
--- a/pym/portage/cache/sqlite.py
+++ b/pym/portage/cache/sqlite.py
@@ -1,6 +1,8 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
import re
import sys
from portage.cache import fs_template
@@ -11,6 +13,7 @@ from portage.util import writemsg
from portage.localization import _
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
class database(fs_template.FsBased):
@@ -21,7 +24,6 @@ class database(fs_template.FsBased):
# to calculate the number of pages requested, according to the following
# equation: cache_bytes = page_bytes * page_count
cache_bytes = 1024 * 1024 * 10
- _db_table = None
def __init__(self, *args, **config):
super(database, self).__init__(*args, **config)
@@ -29,6 +31,7 @@ class database(fs_template.FsBased):
self._allowed_keys = ["_mtime_", "_eclasses_"]
self._allowed_keys.extend(self._known_keys)
self._allowed_keys.sort()
+ self._allowed_keys_set = frozenset(self._allowed_keys)
self.location = os.path.join(self.location,
self.label.lstrip(os.path.sep).rstrip(os.path.sep))
@@ -38,8 +41,8 @@ class database(fs_template.FsBased):
config.setdefault("autocommit", self.autocommits)
config.setdefault("cache_bytes", self.cache_bytes)
config.setdefault("synchronous", self.synchronous)
- # Timeout for throwing a "database is locked" exception (pysqlite
- # default is 5.0 seconds).
+ # Set longer timeout for throwing a "database is locked" exception.
+ # Default timeout in sqlite3 module is 5.0 seconds.
config.setdefault("timeout", 15)
self._db_init_connection(config)
self._db_init_structures()
@@ -48,11 +51,8 @@ class database(fs_template.FsBased):
# sqlite3 is optional with >=python-2.5
try:
import sqlite3 as db_module
- except ImportError:
- try:
- from pysqlite2 import dbapi2 as db_module
- except ImportError as e:
- raise cache_errors.InitializationError(self.__class__, e)
+ except ImportError as e:
+ raise cache_errors.InitializationError(self.__class__, e)
self._db_module = db_module
self._db_error = db_module.Error
@@ -63,7 +63,6 @@ class database(fs_template.FsBased):
# Avoid potential UnicodeEncodeError in python-2.x by
# only calling str() when it's absolutely necessary.
s = str(s)
- # This is equivalent to the _quote function from pysqlite 1.1.
return "'%s'" % s.replace("'", "''")
def _db_init_connection(self, config):
@@ -93,9 +92,6 @@ class database(fs_template.FsBased):
self._db_table["packages"]["table_name"] = mytable
self._db_table["packages"]["package_id"] = "internal_db_package_id"
self._db_table["packages"]["package_key"] = "portage_package_key"
- self._db_table["packages"]["internal_columns"] = \
- [self._db_table["packages"]["package_id"],
- self._db_table["packages"]["package_key"]]
create_statement = []
create_statement.append("CREATE TABLE")
create_statement.append(mytable)
@@ -110,9 +106,6 @@ class database(fs_template.FsBased):
create_statement.append(")")
self._db_table["packages"]["create"] = " ".join(create_statement)
- self._db_table["packages"]["columns"] = \
- self._db_table["packages"]["internal_columns"] + \
- self._allowed_keys
cursor = self._db_cursor
for k, v in self._db_table.items():
@@ -211,13 +204,17 @@ class database(fs_template.FsBased):
raise KeyError(cpv)
else:
raise cache_errors.CacheCorruption(cpv, "key is not unique")
+ result = result[0]
d = {}
- internal_columns = self._db_table["packages"]["internal_columns"]
- column_index = -1
- for k in self._db_table["packages"]["columns"]:
- column_index +=1
- if k not in internal_columns:
- d[k] = result[0][column_index]
+ allowed_keys_set = self._allowed_keys_set
+ for column_index, column_info in enumerate(cursor.description):
+ k = column_info[0]
+ if k in allowed_keys_set:
+ v = result[column_index]
+ if v is None:
+ # This happens after a new empty column has been added.
+ v = ""
+ d[k] = v
return d
diff --git a/pym/portage/cache/template.py b/pym/portage/cache/template.py
index cf1e8aebb..bc81b8642 100644
--- a/pym/portage/cache/template.py
+++ b/pym/portage/cache/template.py
@@ -1,6 +1,6 @@
-# Copyright: 2005-2012 Gentoo Foundation
+# Copyright 2005-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
# Author(s): Brian Harring (ferringb@gentoo.org)
-# License: GPL2
from portage.cache import cache_errors
from portage.cache.cache_errors import InvalidRestriction
@@ -10,6 +10,7 @@ import warnings
import operator
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
_unicode = str
basestring = str
long = int
@@ -164,7 +165,14 @@ class database(object):
def commit(self):
if not self.autocommits:
- raise NotImplementedError
+ raise NotImplementedError(self)
+
+ def __del__(self):
+ # This used to be handled by an atexit hook that called
+ # close_portdbapi_caches() for all portdbapi instances, but that was
+ # prone to memory leaks for API consumers that needed to create/destroy
+ # many portdbapi instances. So, instead we rely on __del__.
+ self.sync()
def __contains__(self, cpv):
"""This method should always be overridden. It is provided only for
diff --git a/pym/portage/checksum.py b/pym/portage/checksum.py
index daf4a0cbf..f24a90ffc 100644
--- a/pym/portage/checksum.py
+++ b/pym/portage/checksum.py
@@ -1,15 +1,16 @@
# checksum.py -- core Portage functionality
-# Copyright 1998-2012 Gentoo Foundation
+# Copyright 1998-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import portage
-from portage.const import PRELINK_BINARY,HASHING_BLOCKSIZE
+from portage.const import PRELINK_BINARY, HASHING_BLOCKSIZE
from portage.localization import _
from portage import os
from portage import _encodings
from portage import _unicode_encode
import errno
import stat
+import subprocess
import tempfile
#dict of all available hash functions
@@ -48,16 +49,15 @@ class _generate_hash_function(object):
@type filename: String
@return: The hash and size of the data
"""
- f = _open_file(filename)
- blocksize = HASHING_BLOCKSIZE
- data = f.read(blocksize)
- size = 0
- checksum = self._hashobject()
- while data:
- checksum.update(data)
- size = size + len(data)
+ with _open_file(filename) as f:
+ blocksize = HASHING_BLOCKSIZE
+ size = 0
+ checksum = self._hashobject()
data = f.read(blocksize)
- f.close()
+ while data:
+ checksum.update(data)
+ size = size + len(data)
+ data = f.read(blocksize)
return (checksum.hexdigest(), size)
@@ -163,11 +163,16 @@ hashfunc_map["size"] = getsize
prelink_capable = False
if os.path.exists(PRELINK_BINARY):
- results = portage.subprocess_getstatusoutput(
- "%s --version > /dev/null 2>&1" % (PRELINK_BINARY,))
- if (results[0] >> 8) == 0:
- prelink_capable=1
- del results
+ cmd = [PRELINK_BINARY, "--version"]
+ cmd = [_unicode_encode(x, encoding=_encodings['fs'], errors='strict')
+ for x in cmd]
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ proc.communicate()
+ status = proc.wait()
+ if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
+ prelink_capable = 1
+ del cmd, proc, status
def is_prelinkable_elf(filename):
f = _open_file(filename)
@@ -217,6 +222,64 @@ def _filter_unaccelarated_hashes(digests):
return digests
+class _hash_filter(object):
+ """
+ Implements filtering for PORTAGE_CHECKSUM_FILTER.
+ """
+
+ __slots__ = ('transparent', '_tokens',)
+
+ def __init__(self, filter_str):
+ tokens = filter_str.upper().split()
+ if not tokens or tokens[-1] == "*":
+ del tokens[:]
+ self.transparent = not tokens
+ tokens.reverse()
+ self._tokens = tuple(tokens)
+
+ def __call__(self, hash_name):
+ if self.transparent:
+ return True
+ matches = ("*", hash_name)
+ for token in self._tokens:
+ if token in matches:
+ return True
+ elif token[:1] == "-":
+ if token[1:] in matches:
+ return False
+ return False
+
+def _apply_hash_filter(digests, hash_filter):
+ """
+ Return a new dict containing the filtered digests, or the same
+ dict if no changes are necessary. This will always preserve at
+ at least one digest, in order to ensure that they are not all
+ discarded.
+ @param digests: dictionary of digests
+ @type digests: dict
+ @param hash_filter: A callable that takes a single hash name
+ argument, and returns True if the hash is to be used or
+ False otherwise
+ @type hash_filter: callable
+ """
+
+ verifiable_hash_types = set(digests).intersection(hashfunc_map)
+ verifiable_hash_types.discard("size")
+ modified = False
+ if len(verifiable_hash_types) > 1:
+ for k in list(verifiable_hash_types):
+ if not hash_filter(k):
+ modified = True
+ verifiable_hash_types.remove(k)
+ if len(verifiable_hash_types) == 1:
+ break
+
+ if modified:
+ digests = dict((k, v) for (k, v) in digests.items()
+ if k == "size" or k in verifiable_hash_types)
+
+ return digests
+
def verify_all(filename, mydict, calc_prelink=0, strict=0):
"""
Verify all checksums against a file.
@@ -275,9 +338,10 @@ def verify_all(filename, mydict, calc_prelink=0, strict=0):
{"file" : filename, "type" : x})
else:
file_is_ok = False
- reason = (("Failed on %s verification" % x), myhash,mydict[x])
+ reason = (("Failed on %s verification" % x), myhash, mydict[x])
break
- return file_is_ok,reason
+
+ return file_is_ok, reason
def perform_checksum(filename, hashname="MD5", calc_prelink=0):
"""
diff --git a/pym/portage/const.py b/pym/portage/const.py
index ceef5c56b..1785bfff7 100644
--- a/pym/portage/const.py
+++ b/pym/portage/const.py
@@ -1,7 +1,9 @@
# portage: Constants
-# Copyright 1998-2012 Gentoo Foundation
+# Copyright 1998-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
import os
# ===========================================================================
@@ -27,8 +29,8 @@ import os
# The variables in this file are grouped by config_root, target_root.
# variables used with config_root (these need to be relative)
-MAKE_CONF_FILE = "etc/make.conf"
USER_CONFIG_PATH = "etc/portage"
+MAKE_CONF_FILE = USER_CONFIG_PATH + "/make.conf"
MODULES_FILE_PATH = USER_CONFIG_PATH + "/modules"
CUSTOM_PROFILE_PATH = USER_CONFIG_PATH + "/profile"
USER_VIRTUALS_FILE = USER_CONFIG_PATH + "/virtuals"
@@ -36,7 +38,7 @@ EBUILD_SH_ENV_FILE = USER_CONFIG_PATH + "/bashrc"
EBUILD_SH_ENV_DIR = USER_CONFIG_PATH + "/env"
CUSTOM_MIRRORS_FILE = USER_CONFIG_PATH + "/mirrors"
COLOR_MAP_FILE = USER_CONFIG_PATH + "/color.map"
-PROFILE_PATH = "etc/make.profile"
+PROFILE_PATH = USER_CONFIG_PATH + "/make.profile"
MAKE_DEFAULTS_FILE = PROFILE_PATH + "/make.defaults" # FIXME: not used
DEPRECATED_PROFILE_FILE = PROFILE_PATH + "/deprecated"
@@ -56,7 +58,10 @@ DEPCACHE_PATH = "/var/cache/edb/dep"
GLOBAL_CONFIG_PATH = "/usr/share/portage/config"
# these variables are not used with target_root or config_root
-PORTAGE_BASE_PATH = os.path.join(os.sep, os.sep.join(__file__.split(os.sep)[:-3]))
+# NOTE: Use realpath(__file__) so that python module symlinks in site-packages
+# are followed back to the real location of the whole portage installation.
+PORTAGE_BASE_PATH = os.path.join(os.sep, os.sep.join(os.path.realpath(
+ __file__.rstrip("co")).split(os.sep)[:-3]))
PORTAGE_BIN_PATH = PORTAGE_BASE_PATH + "/bin"
PORTAGE_PYM_PATH = PORTAGE_BASE_PATH + "/pym"
LOCALE_DATA_PATH = PORTAGE_BASE_PATH + "/locale" # FIXME: not used
@@ -75,40 +80,123 @@ REPO_NAME_LOC = "profiles" + "/" + REPO_NAME_FILE
PORTAGE_PACKAGE_ATOM = "sys-apps/portage"
LIBC_PACKAGE_ATOM = "virtual/libc"
OS_HEADERS_PACKAGE_ATOM = "virtual/os-headers"
+CVS_PACKAGE_ATOM = "dev-vcs/cvs"
+GIT_PACKAGE_ATOM = "dev-vcs/git"
+RSYNC_PACKAGE_ATOM = "net-misc/rsync"
-INCREMENTALS = ("USE", "USE_EXPAND", "USE_EXPAND_HIDDEN",
- "FEATURES", "ACCEPT_KEYWORDS",
- "CONFIG_PROTECT_MASK", "CONFIG_PROTECT",
- "PRELINK_PATH", "PRELINK_PATH_MASK",
- "PROFILE_ONLY_VARIABLES")
-EBUILD_PHASES = ("pretend", "setup", "unpack", "prepare", "configure",
- "compile", "test", "install",
- "package", "preinst", "postinst","prerm", "postrm",
- "nofetch", "config", "info", "other")
+INCREMENTALS = (
+ "ACCEPT_KEYWORDS",
+ "CONFIG_PROTECT",
+ "CONFIG_PROTECT_MASK",
+ "FEATURES",
+ "IUSE_IMPLICIT",
+ "PRELINK_PATH",
+ "PRELINK_PATH_MASK",
+ "PROFILE_ONLY_VARIABLES",
+ "USE",
+ "USE_EXPAND",
+ "USE_EXPAND_HIDDEN",
+ "USE_EXPAND_IMPLICIT",
+ "USE_EXPAND_UNPREFIXED",
+)
+EBUILD_PHASES = (
+ "pretend",
+ "setup",
+ "unpack",
+ "prepare",
+ "configure",
+ "compile",
+ "test",
+ "install",
+ "package",
+ "preinst",
+ "postinst",
+ "prerm",
+ "postrm",
+ "nofetch",
+ "config",
+ "info",
+ "other",
+)
SUPPORTED_FEATURES = frozenset([
- "assume-digests", "binpkg-logs", "buildpkg", "buildsyspkg", "candy",
- "ccache", "chflags", "clean-logs",
- "collision-protect", "compress-build-logs", "compressdebug",
- "config-protect-if-modified",
- "digest", "distcc", "distcc-pump", "distlocks",
- "downgrade-backup", "ebuild-locks", "fakeroot",
- "fail-clean", "force-mirror", "force-prefix", "getbinpkg",
- "installsources", "keeptemp", "keepwork", "fixlafiles", "lmirror",
- "metadata-transfer", "mirror", "multilib-strict", "news",
- "noauto", "noclean", "nodoc", "noinfo", "noman",
- "nostrip", "notitles", "parallel-fetch", "parallel-install",
- "parse-eapi-ebuild-head",
- "prelink-checksums",
- "protect-owned", "python-trace", "sandbox",
- "selinux", "sesandbox", "sfperms",
- "sign", "skiprocheck", "split-elog", "split-log", "splitdebug",
- "strict", "stricter", "suidctl", "test", "test-fail-continue",
- "unknown-features-filter", "unknown-features-warn",
- "unmerge-backup",
- "unmerge-logs", "unmerge-orphans", "userfetch", "userpriv",
- "usersandbox", "usersync", "webrsync-gpg", "xattr"])
-
-EAPI = 4
+ "assume-digests",
+ "binpkg-logs",
+ "buildpkg",
+ "buildsyspkg",
+ "candy",
+ "ccache",
+ "cgroup",
+ "chflags",
+ "clean-logs",
+ "collision-protect",
+ "compress-build-logs",
+ "compressdebug",
+ "compress-index",
+ "config-protect-if-modified",
+ "digest",
+ "distcc",
+ "distcc-pump",
+ "distlocks",
+ "downgrade-backup",
+ "ebuild-locks",
+ "fail-clean",
+ "fakeroot",
+ "fixlafiles",
+ "force-mirror",
+ "force-prefix",
+ "getbinpkg",
+ "installsources",
+ "ipc-sandbox",
+ "keeptemp",
+ "keepwork",
+ "lmirror",
+ "merge-sync",
+ "metadata-transfer",
+ "mirror",
+ "multilib-strict",
+ "network-sandbox",
+ "news",
+ "noauto",
+ "noclean",
+ "nodoc",
+ "noinfo",
+ "noman",
+ "nostrip",
+ "notitles",
+ "parallel-fetch",
+ "parallel-install",
+ "prelink-checksums",
+ "preserve-libs",
+ "protect-owned",
+ "python-trace",
+ "sandbox",
+ "selinux",
+ "sesandbox",
+ "sfperms",
+ "sign",
+ "skiprocheck",
+ "splitdebug",
+ "split-elog",
+ "split-log",
+ "strict",
+ "stricter",
+ "suidctl",
+ "test",
+ "test-fail-continue",
+ "unknown-features-filter",
+ "unknown-features-warn",
+ "unmerge-backup",
+ "unmerge-logs",
+ "unmerge-orphans",
+ "userfetch",
+ "userpriv",
+ "usersandbox",
+ "usersync",
+ "webrsync-gpg",
+ "xattr",
+])
+
+EAPI = 5
HASHING_BLOCKSIZE = 32768
MANIFEST1_HASH_FUNCTIONS = ("MD5", "SHA256", "RMD160")
@@ -151,13 +239,35 @@ MANIFEST2_IDENTIFIERS = ("AUX", "MISC", "DIST", "EBUILD")
# a config instance (since it's possible to contruct a config instance with
# a different EPREFIX). Therefore, the EPREFIX constant should *NOT* be used
# in the definition of any other constants within this file.
-EPREFIX=""
+EPREFIX = ""
# pick up EPREFIX from the environment if set
if "PORTAGE_OVERRIDE_EPREFIX" in os.environ:
EPREFIX = os.environ["PORTAGE_OVERRIDE_EPREFIX"]
if EPREFIX:
EPREFIX = os.path.normpath(EPREFIX)
+ if EPREFIX == os.sep:
+ EPREFIX = ""
+
+VCS_DIRS = ("CVS", "RCS", "SCCS", ".bzr", ".git", ".hg", ".svn")
+
+# List of known live eclasses. Keep it in sync with cnf/sets/portage.conf
+LIVE_ECLASSES = frozenset([
+ "bzr",
+ "cvs",
+ "darcs",
+ "git",
+ "git-2",
+ "git-r3",
+ "mercurial",
+ "subversion",
+ "tla",
+])
+
+SUPPORTED_BINPKG_FORMATS = ("tar", "rpm")
+
+# Time formats used in various places like metadata.chk.
+TIMESTAMP_FORMAT = "%a, %d %b %Y %H:%M:%S +0000" # to be used with time.gmtime()
# ===========================================================================
# END OF CONSTANTS -- END OF CONSTANTS -- END OF CONSTANTS -- END OF CONSTANT
@@ -165,17 +275,5 @@ if "PORTAGE_OVERRIDE_EPREFIX" in os.environ:
# Private constants for use in conditional code in order to minimize the diff
# between branches.
-_ENABLE_DYN_LINK_MAP = True
-_ENABLE_PRESERVE_LIBS = True
-_ENABLE_REPO_NAME_WARN = True
+_DEPCLEAN_LIB_CHECK_DEFAULT = True
_ENABLE_SET_CONFIG = True
-_ENABLE_INHERIT_CHECK = True
-
-
-# The definitions above will differ between branches, so it's useful to have
-# common lines of diff context here in order to avoid merge conflicts.
-
-if _ENABLE_PRESERVE_LIBS:
- SUPPORTED_FEATURES = set(SUPPORTED_FEATURES)
- SUPPORTED_FEATURES.add("preserve-libs")
- SUPPORTED_FEATURES = frozenset(SUPPORTED_FEATURES)
diff --git a/pym/portage/cvstree.py b/pym/portage/cvstree.py
index 3680ae41f..4a3afae11 100644
--- a/pym/portage/cvstree.py
+++ b/pym/portage/cvstree.py
@@ -1,5 +1,5 @@
# cvstree.py -- cvs tree utilities
-# Copyright 1998-2011 Gentoo Foundation
+# Copyright 1998-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -15,20 +15,20 @@ from portage import _encodings
from portage import _unicode_encode
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
long = int
# [D]/Name/Version/Date/Flags/Tags
def pathdata(entries, path):
- """(entries,path)
- Returns the data(dict) for a specific file/dir at the path specified."""
- mysplit=path.split("/")
- myentries=entries
- mytarget=mysplit[-1]
- mysplit=mysplit[:-1]
+ """Returns the data(dict) for a specific file/dir at the path specified."""
+ mysplit = path.split("/")
+ myentries = entries
+ mytarget = mysplit[-1]
+ mysplit = mysplit[:-1]
for mys in mysplit:
if mys in myentries["dirs"]:
- myentries=myentries["dirs"][mys]
+ myentries = myentries["dirs"][mys]
else:
return None
if mytarget in myentries["dirs"]:
@@ -39,18 +39,17 @@ def pathdata(entries, path):
return None
def fileat(entries, path):
- return pathdata(entries,path)
+ return pathdata(entries, path)
def isadded(entries, path):
- """(entries,path)
- Returns true if the path exists and is added to the cvs tree."""
- mytarget=pathdata(entries, path)
+ """Returns True if the path exists and is added to the cvs tree."""
+ mytarget = pathdata(entries, path)
if mytarget:
if "cvs" in mytarget["status"]:
return 1
- basedir=os.path.dirname(path)
- filename=os.path.basename(path)
+ basedir = os.path.dirname(path)
+ filename = os.path.basename(path)
try:
myfile = io.open(
@@ -59,234 +58,250 @@ def isadded(entries, path):
mode='r', encoding=_encodings['content'], errors='strict')
except IOError:
return 0
- mylines=myfile.readlines()
+ mylines = myfile.readlines()
myfile.close()
- rep=re.compile("^\/"+re.escape(filename)+"\/");
+ rep = re.compile("^\/%s\/" % re.escape(filename))
for x in mylines:
if rep.search(x):
return 1
return 0
-def findnew(entries,recursive=0,basedir=""):
- """(entries,recursive=0,basedir="")
- Recurses the entries tree to find all elements that have been added but
+def findnew(entries, recursive=0, basedir=""):
+ """Recurses the entries tree to find all elements that have been added but
have not yet been committed. Returns a list of paths, optionally prepended
- with a basedir."""
- if basedir and basedir[-1]!="/":
- basedir=basedir+"/"
- mylist=[]
+ with a basedir.
+ """
+ if basedir and basedir[-1] != "/":
+ basedir += "/"
+
+ mylist = []
for myfile in entries["files"]:
if "cvs" in entries["files"][myfile]["status"]:
if "0" == entries["files"][myfile]["revision"]:
- mylist.append(basedir+myfile)
+ mylist.append(basedir + myfile)
+
if recursive:
for mydir in entries["dirs"]:
- mylist+=findnew(entries["dirs"][mydir],recursive,basedir+mydir)
+ mylist += findnew(entries["dirs"][mydir], recursive, basedir + mydir)
+
return mylist
def findoption(entries, pattern, recursive=0, basedir=""):
- """(entries, pattern, recursive=0, basedir="")
- Iterate over paths of cvs entries for which the pattern.search() method
+ """Iterate over paths of cvs entries for which the pattern.search() method
finds a match. Returns a list of paths, optionally prepended with a
- basedir."""
+ basedir.
+ """
if not basedir.endswith("/"):
basedir += "/"
+
for myfile, mydata in entries["files"].items():
if "cvs" in mydata["status"]:
if pattern.search(mydata["flags"]):
- yield basedir+myfile
+ yield basedir + myfile
+
if recursive:
for mydir, mydata in entries["dirs"].items():
for x in findoption(mydata, pattern,
- recursive, basedir+mydir):
+ recursive, basedir + mydir):
yield x
-def findchanged(entries,recursive=0,basedir=""):
- """(entries,recursive=0,basedir="")
- Recurses the entries tree to find all elements that exist in the cvs tree
+def findchanged(entries, recursive=0, basedir=""):
+ """Recurses the entries tree to find all elements that exist in the cvs tree
and differ from the committed version. Returns a list of paths, optionally
- prepended with a basedir."""
- if basedir and basedir[-1]!="/":
- basedir=basedir+"/"
- mylist=[]
+ prepended with a basedir.
+ """
+ if basedir and basedir[-1] != "/":
+ basedir += "/"
+
+ mylist = []
for myfile in entries["files"]:
if "cvs" in entries["files"][myfile]["status"]:
if "current" not in entries["files"][myfile]["status"]:
if "exists" in entries["files"][myfile]["status"]:
- if entries["files"][myfile]["revision"]!="0":
- mylist.append(basedir+myfile)
+ if entries["files"][myfile]["revision"] != "0":
+ mylist.append(basedir + myfile)
+
if recursive:
for mydir in entries["dirs"]:
- mylist+=findchanged(entries["dirs"][mydir],recursive,basedir+mydir)
+ mylist += findchanged(entries["dirs"][mydir], recursive, basedir + mydir)
+
return mylist
-def findmissing(entries,recursive=0,basedir=""):
- """(entries,recursive=0,basedir="")
- Recurses the entries tree to find all elements that are listed in the cvs
+def findmissing(entries, recursive=0, basedir=""):
+ """Recurses the entries tree to find all elements that are listed in the cvs
tree but do not exist on the filesystem. Returns a list of paths,
- optionally prepended with a basedir."""
- if basedir and basedir[-1]!="/":
- basedir=basedir+"/"
- mylist=[]
+ optionally prepended with a basedir.
+ """
+ if basedir and basedir[-1] != "/":
+ basedir += "/"
+
+ mylist = []
for myfile in entries["files"]:
if "cvs" in entries["files"][myfile]["status"]:
if "exists" not in entries["files"][myfile]["status"]:
if "removed" not in entries["files"][myfile]["status"]:
- mylist.append(basedir+myfile)
+ mylist.append(basedir + myfile)
+
if recursive:
for mydir in entries["dirs"]:
- mylist+=findmissing(entries["dirs"][mydir],recursive,basedir+mydir)
+ mylist += findmissing(entries["dirs"][mydir], recursive, basedir + mydir)
+
return mylist
-def findunadded(entries,recursive=0,basedir=""):
- """(entries,recursive=0,basedir="")
- Recurses the entries tree to find all elements that are in valid cvs
+def findunadded(entries, recursive=0, basedir=""):
+ """Recurses the entries tree to find all elements that are in valid cvs
directories but are not part of the cvs tree. Returns a list of paths,
- optionally prepended with a basedir."""
- if basedir and basedir[-1]!="/":
- basedir=basedir+"/"
- mylist=[]
+ optionally prepended with a basedir.
+ """
+ if basedir and basedir[-1] != "/":
+ basedir += "/"
- #ignore what cvs ignores.
+ # Ignore what cvs ignores.
+ mylist = []
for myfile in entries["files"]:
if "cvs" not in entries["files"][myfile]["status"]:
- mylist.append(basedir+myfile)
+ mylist.append(basedir + myfile)
+
if recursive:
for mydir in entries["dirs"]:
- mylist+=findunadded(entries["dirs"][mydir],recursive,basedir+mydir)
+ mylist += findunadded(entries["dirs"][mydir], recursive, basedir + mydir)
+
return mylist
-def findremoved(entries,recursive=0,basedir=""):
- """(entries,recursive=0,basedir="")
- Recurses the entries tree to find all elements that are in flagged for cvs
- deletions. Returns a list of paths, optionally prepended with a basedir."""
- if basedir and basedir[-1]!="/":
- basedir=basedir+"/"
- mylist=[]
+def findremoved(entries, recursive=0, basedir=""):
+ """Recurses the entries tree to find all elements that are in flagged for cvs
+ deletions. Returns a list of paths, optionally prepended with a basedir.
+ """
+ if basedir and basedir[-1] != "/":
+ basedir += "/"
+
+ mylist = []
for myfile in entries["files"]:
if "removed" in entries["files"][myfile]["status"]:
- mylist.append(basedir+myfile)
+ mylist.append(basedir + myfile)
+
if recursive:
for mydir in entries["dirs"]:
- mylist+=findremoved(entries["dirs"][mydir],recursive,basedir+mydir)
+ mylist += findremoved(entries["dirs"][mydir], recursive, basedir + mydir)
+
return mylist
def findall(entries, recursive=0, basedir=""):
- """(entries,recursive=0,basedir="")
- Recurses the entries tree to find all new, changed, missing, and unadded
- entities. Returns a 4 element list of lists as returned from each find*()."""
-
- if basedir and basedir[-1]!="/":
- basedir=basedir+"/"
- mynew = findnew(entries,recursive,basedir)
- mychanged = findchanged(entries,recursive,basedir)
- mymissing = findmissing(entries,recursive,basedir)
- myunadded = findunadded(entries,recursive,basedir)
- myremoved = findremoved(entries,recursive,basedir)
+ """Recurses the entries tree to find all new, changed, missing, and unadded
+ entities. Returns a 4 element list of lists as returned from each find*().
+ """
+ if basedir and basedir[-1] != "/":
+ basedir += "/"
+ mynew = findnew(entries, recursive, basedir)
+ mychanged = findchanged(entries, recursive, basedir)
+ mymissing = findmissing(entries, recursive, basedir)
+ myunadded = findunadded(entries, recursive, basedir)
+ myremoved = findremoved(entries, recursive, basedir)
return [mynew, mychanged, mymissing, myunadded, myremoved]
ignore_list = re.compile("(^|/)(RCS(|LOG)|SCCS|CVS(|\.adm)|cvslog\..*|tags|TAGS|\.(make\.state|nse_depinfo)|.*~|(\.|)#.*|,.*|_$.*|.*\$|\.del-.*|.*\.(old|BAK|bak|orig|rej|a|olb|o|obj|so|exe|Z|elc|ln)|core)$")
def apply_cvsignore_filter(list):
- x=0
+ x = 0
while x < len(list):
if ignore_list.match(list[x].split("/")[-1]):
list.pop(x)
else:
- x+=1
+ x += 1
return list
-def getentries(mydir,recursive=0):
- """(basedir,recursive=0)
- Scans the given directory and returns a datadict of all the entries in
- the directory separated as a dirs dict and a files dict."""
- myfn=mydir+"/CVS/Entries"
+def getentries(mydir, recursive=0):
+ """Scans the given directory and returns a datadict of all the entries in
+ the directory separated as a dirs dict and a files dict.
+ """
+ myfn = mydir + "/CVS/Entries"
# entries=[dirs, files]
- entries={"dirs":{},"files":{}}
+ entries = {"dirs":{}, "files":{}}
if not os.path.exists(mydir):
return entries
try:
myfile = io.open(_unicode_encode(myfn,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['content'], errors='strict')
- mylines=myfile.readlines()
+ mylines = myfile.readlines()
myfile.close()
except SystemExit as e:
raise
except:
- mylines=[]
+ mylines = []
+
for line in mylines:
- if line and line[-1]=="\n":
- line=line[:-1]
+ if line and line[-1] == "\n":
+ line = line[:-1]
if not line:
continue
- if line=="D": # End of entries file
+ if line == "D": # End of entries file
break
- mysplit=line.split("/")
- if len(mysplit)!=6:
- print("Confused:",mysplit)
+ mysplit = line.split("/")
+ if len(mysplit) != 6:
+ print("Confused:", mysplit)
continue
- if mysplit[0]=="D":
- entries["dirs"][mysplit[1]]={"dirs":{},"files":{},"status":[]}
- entries["dirs"][mysplit[1]]["status"]=["cvs"]
+ if mysplit[0] == "D":
+ entries["dirs"][mysplit[1]] = {"dirs":{}, "files":{}, "status":[]}
+ entries["dirs"][mysplit[1]]["status"] = ["cvs"]
if os.path.isdir(mydir+"/"+mysplit[1]):
- entries["dirs"][mysplit[1]]["status"]+=["exists"]
- entries["dirs"][mysplit[1]]["flags"]=mysplit[2:]
+ entries["dirs"][mysplit[1]]["status"] += ["exists"]
+ entries["dirs"][mysplit[1]]["flags"] = mysplit[2:]
if recursive:
- rentries=getentries(mydir+"/"+mysplit[1],recursive)
- entries["dirs"][mysplit[1]]["dirs"]=rentries["dirs"]
- entries["dirs"][mysplit[1]]["files"]=rentries["files"]
+ rentries = getentries(mydir + "/" + mysplit[1], recursive)
+ entries["dirs"][mysplit[1]]["dirs"] = rentries["dirs"]
+ entries["dirs"][mysplit[1]]["files"] = rentries["files"]
else:
# [D]/Name/revision/Date/Flags/Tags
- entries["files"][mysplit[1]]={}
- entries["files"][mysplit[1]]["revision"]=mysplit[2]
- entries["files"][mysplit[1]]["date"]=mysplit[3]
- entries["files"][mysplit[1]]["flags"]=mysplit[4]
- entries["files"][mysplit[1]]["tags"]=mysplit[5]
- entries["files"][mysplit[1]]["status"]=["cvs"]
- if entries["files"][mysplit[1]]["revision"][0]=="-":
- entries["files"][mysplit[1]]["status"]+=["removed"]
+ entries["files"][mysplit[1]] = {}
+ entries["files"][mysplit[1]]["revision"] = mysplit[2]
+ entries["files"][mysplit[1]]["date"] = mysplit[3]
+ entries["files"][mysplit[1]]["flags"] = mysplit[4]
+ entries["files"][mysplit[1]]["tags"] = mysplit[5]
+ entries["files"][mysplit[1]]["status"] = ["cvs"]
+ if entries["files"][mysplit[1]]["revision"][0] == "-":
+ entries["files"][mysplit[1]]["status"] += ["removed"]
for file in os.listdir(mydir):
- if file=="CVS":
+ if file == "CVS":
continue
- if os.path.isdir(mydir+"/"+file):
+ if os.path.isdir(mydir + "/" + file):
if file not in entries["dirs"]:
if ignore_list.match(file) is not None:
continue
- entries["dirs"][file]={"dirs":{},"files":{}}
+ entries["dirs"][file] = {"dirs":{}, "files":{}}
# It's normal for a directory to be unlisted in Entries
# when checked out without -P (see bug #257660).
- rentries=getentries(mydir+"/"+file,recursive)
- entries["dirs"][file]["dirs"]=rentries["dirs"]
- entries["dirs"][file]["files"]=rentries["files"]
+ rentries = getentries(mydir + "/" + file, recursive)
+ entries["dirs"][file]["dirs"] = rentries["dirs"]
+ entries["dirs"][file]["files"] = rentries["files"]
if "status" in entries["dirs"][file]:
if "exists" not in entries["dirs"][file]["status"]:
- entries["dirs"][file]["status"]+=["exists"]
+ entries["dirs"][file]["status"] += ["exists"]
else:
- entries["dirs"][file]["status"]=["exists"]
- elif os.path.isfile(mydir+"/"+file):
+ entries["dirs"][file]["status"] = ["exists"]
+ elif os.path.isfile(mydir + "/" + file):
if file not in entries["files"]:
if ignore_list.match(file) is not None:
continue
- entries["files"][file]={"revision":"","date":"","flags":"","tags":""}
+ entries["files"][file] = {"revision":"", "date":"", "flags":"", "tags":""}
if "status" in entries["files"][file]:
if "exists" not in entries["files"][file]["status"]:
- entries["files"][file]["status"]+=["exists"]
+ entries["files"][file]["status"] += ["exists"]
else:
- entries["files"][file]["status"]=["exists"]
+ entries["files"][file]["status"] = ["exists"]
try:
- mystat=os.stat(mydir+"/"+file)
+ mystat = os.stat(mydir + "/" + file)
mytime = time.asctime(time.gmtime(mystat[stat.ST_MTIME]))
if "status" not in entries["files"][file]:
- entries["files"][file]["status"]=[]
- if mytime==entries["files"][file]["date"]:
- entries["files"][file]["status"]+=["current"]
+ entries["files"][file]["status"] = []
+ if mytime == entries["files"][file]["date"]:
+ entries["files"][file]["status"] += ["current"]
except SystemExit as e:
raise
except Exception as e:
- print("failed to stat",file)
+ print("failed to stat", file)
print(e)
return
@@ -294,6 +309,7 @@ def getentries(mydir,recursive=0):
pass
else:
print()
- print("File of unknown type:",mydir+"/"+file)
+ print("File of unknown type:", mydir + "/" + file)
print()
+
return entries
diff --git a/pym/portage/data.py b/pym/portage/data.py
index c4d967a1b..54e3a8d65 100644
--- a/pym/portage/data.py
+++ b/pym/portage/data.py
@@ -1,17 +1,18 @@
# data.py -- Calculated/Discovered Data Values
-# Copyright 1998-2011 Gentoo Foundation
+# Copyright 1998-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-import os, pwd, grp, platform
+import os, pwd, grp, platform, sys
import portage
portage.proxy.lazyimport.lazyimport(globals(),
'portage.output:colorize',
'portage.util:writemsg',
+ 'subprocess'
)
from portage.localization import _
-ostype=platform.system()
+ostype = platform.system()
userland = None
if ostype == "DragonFly" or ostype.endswith("BSD"):
userland = "BSD"
@@ -22,10 +23,10 @@ lchown = getattr(os, "lchown", None)
if not lchown:
if ostype == "Darwin":
- def lchown(*pos_args, **key_args):
+ def lchown(*_args, **_kwargs):
pass
else:
- def lchown(*pargs, **kwargs):
+ def lchown(*_args, **_kwargs):
writemsg(colorize("BAD", "!!!") + _(
" It seems that os.lchown does not"
" exist. Please rebuild python.\n"), noiselevel=-1)
@@ -58,11 +59,10 @@ def portage_group_warning():
# If the "wheel" group does not exist then wheelgid falls back to 0.
# If the "portage" group does not exist then portage_uid falls back to wheelgid.
-uid=os.getuid()
-wheelgid=0
-
+uid = os.getuid()
+wheelgid = 0
try:
- wheelgid=grp.getgrnam("wheel")[2]
+ wheelgid = grp.getgrnam("wheel")[2]
except KeyError:
pass
@@ -85,19 +85,27 @@ def _get_global(k):
elif portage.const.EPREFIX:
secpass = 2
#Discover the uid and gid of the portage user/group
+ keyerror = False
try:
portage_uid = pwd.getpwnam(_get_global('_portage_username')).pw_uid
- _portage_grpname = _get_global('_portage_grpname')
- if platform.python_implementation() == 'PyPy':
- # Somehow this prevents "TypeError: expected string" errors
- # from grp.getgrnam() with PyPy 1.7
- _portage_grpname = str(_portage_grpname)
- portage_gid = grp.getgrnam(_portage_grpname).gr_gid
- if secpass < 1 and portage_gid in os.getgroups():
- secpass = 1
except KeyError:
+ keyerror = True
portage_uid = 0
+
+ try:
+ portage_gid = grp.getgrnam(_get_global('_portage_grpname')).gr_gid
+ except KeyError:
+ keyerror = True
portage_gid = 0
+
+ if secpass < 1 and portage_gid in os.getgroups():
+ secpass = 1
+
+ # Suppress this error message if both PORTAGE_GRPNAME and
+ # PORTAGE_USERNAME are set to "root", for things like
+ # Android (see bug #454060).
+ if keyerror and not (_get_global('_portage_username') == "root" and
+ _get_global('_portage_grpname') == "root"):
writemsg(colorize("BAD",
_("portage: 'portage' user or group missing.")) + "\n", noiselevel=-1)
writemsg(_(
@@ -129,10 +137,28 @@ def _get_global(k):
# Get a list of group IDs for the portage user. Do not use
# grp.getgrall() since it is known to trigger spurious
# SIGPIPE problems with nss_ldap.
- mystatus, myoutput = \
- portage.subprocess_getstatusoutput("id -G %s" % _portage_username)
- if mystatus == os.EX_OK:
- for x in myoutput.split():
+ cmd = ["id", "-G", _portage_username]
+
+ if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000:
+ # Python 3.1 _execvp throws TypeError for non-absolute executable
+ # path passed as bytes (see http://bugs.python.org/issue8513).
+ fullname = portage.process.find_binary(cmd[0])
+ if fullname is None:
+ globals()[k] = v
+ _initialized_globals.add(k)
+ return v
+ cmd[0] = fullname
+
+ encoding = portage._encodings['content']
+ cmd = [portage._unicode_encode(x,
+ encoding=encoding, errors='strict') for x in cmd]
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ myoutput = proc.communicate()[0]
+ status = proc.wait()
+ if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
+ for x in portage._unicode_decode(myoutput,
+ encoding=encoding, errors='strict').split():
try:
v.append(int(x))
except ValueError:
@@ -213,10 +239,18 @@ def _init(settings):
if '_portage_grpname' not in _initialized_globals and \
'_portage_username' not in _initialized_globals:
+ # Prevents "TypeError: expected string" errors
+ # from grp.getgrnam() with PyPy
+ native_string = platform.python_implementation() == 'PyPy'
+
v = settings.get('PORTAGE_GRPNAME', 'portage')
+ if native_string:
+ v = portage._native_string(v)
globals()['_portage_grpname'] = v
_initialized_globals.add('_portage_grpname')
v = settings.get('PORTAGE_USERNAME', 'portage')
+ if native_string:
+ v = portage._native_string(v)
globals()['_portage_username'] = v
_initialized_globals.add('_portage_username')
diff --git a/pym/portage/dbapi/_MergeProcess.py b/pym/portage/dbapi/_MergeProcess.py
index b5f6a0b0b..956dbb9e6 100644
--- a/pym/portage/dbapi/_MergeProcess.py
+++ b/pym/portage/dbapi/_MergeProcess.py
@@ -1,7 +1,8 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import io
+import platform
import signal
import sys
import traceback
@@ -10,10 +11,11 @@ import errno
import fcntl
import portage
from portage import os, _unicode_decode
+from portage.util._ctypes import find_library
import portage.elog.messages
-from _emerge.SpawnProcess import SpawnProcess
+from portage.util._async.ForkProcess import ForkProcess
-class MergeProcess(SpawnProcess):
+class MergeProcess(ForkProcess):
"""
Merge packages in a subprocess, so the Scheduler can run in the main
thread while files are moved or copied asynchronously.
@@ -40,11 +42,20 @@ class MergeProcess(SpawnProcess):
settings.reset()
settings.setcpv(cpv, mydb=self.mydbapi)
+ # This caches the libc library lookup in the current
+ # process, so that it's only done once rather than
+ # for each child process.
+ if platform.system() == "Linux" and \
+ "merge-sync" in settings.features:
+ find_library("c")
+
# Inherit stdin by default, so that the pdb SIGUSR1
# handler is usable for the subprocess.
if self.fd_pipes is None:
self.fd_pipes = {}
- self.fd_pipes.setdefault(0, sys.stdin.fileno())
+ else:
+ self.fd_pipes = self.fd_pipes.copy()
+ self.fd_pipes.setdefault(0, portage._get_stdin().fileno())
super(MergeProcess, self)._start()
@@ -90,7 +101,7 @@ class MergeProcess(SpawnProcess):
reporter(msg, phase=phase, key=key, out=out)
if event & self.scheduler.IO_HUP:
- self.scheduler.unregister(self._elog_reg_id)
+ self.scheduler.source_remove(self._elog_reg_id)
self._elog_reg_id = None
os.close(self._elog_reader_fd)
self._elog_reader_fd = None
@@ -101,12 +112,24 @@ class MergeProcess(SpawnProcess):
def _spawn(self, args, fd_pipes, **kwargs):
"""
Fork a subprocess, apply local settings, and call
- dblink.merge().
+ dblink.merge(). TODO: Share code with ForkProcess.
"""
elog_reader_fd, elog_writer_fd = os.pipe()
+
fcntl.fcntl(elog_reader_fd, fcntl.F_SETFL,
fcntl.fcntl(elog_reader_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(elog_reader_fd, fcntl.F_SETFD,
+ fcntl.fcntl(elog_reader_fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
blockers = None
if self.blockers is not None:
# Query blockers in the main process, since closing
@@ -116,10 +139,9 @@ class MergeProcess(SpawnProcess):
blockers = self.blockers()
mylink = portage.dblink(self.mycat, self.mypkg, settings=self.settings,
treetype=self.treetype, vartree=self.vartree,
- blockers=blockers, scheduler=self.scheduler,
- pipe=elog_writer_fd)
+ blockers=blockers, pipe=elog_writer_fd)
fd_pipes[elog_writer_fd] = elog_writer_fd
- self._elog_reg_id = self.scheduler.register(elog_reader_fd,
+ self._elog_reg_id = self.scheduler.io_add_watch(elog_reader_fd,
self._registered_events, self._elog_output_handler)
# If a concurrent emerge process tries to install a package
@@ -133,88 +155,100 @@ class MergeProcess(SpawnProcess):
if not self.unmerge:
counter = self.vartree.dbapi.counter_tick()
- pid = os.fork()
- if pid != 0:
- if not isinstance(pid, int):
- raise AssertionError(
- "fork returned non-integer: %s" % (repr(pid),))
-
- os.close(elog_writer_fd)
- self._elog_reader_fd = elog_reader_fd
- self._buf = ""
- self._elog_keys = set()
-
- # invalidate relevant vardbapi caches
- if self.vartree.dbapi._categories is not None:
- self.vartree.dbapi._categories = None
- self.vartree.dbapi._pkgs_changed = True
- self.vartree.dbapi._clear_pkg_cache(mylink)
-
- portage.process.spawned_pids.append(pid)
- return [pid]
-
- os.close(elog_reader_fd)
- portage.locks._close_fds()
- # Disable close_fds since we don't exec (see _setup_pipes docstring).
- portage.process._setup_pipes(fd_pipes, close_fds=False)
-
- # Use default signal handlers since the ones inherited
- # from the parent process are irrelevant here.
- signal.signal(signal.SIGINT, signal.SIG_DFL)
- signal.signal(signal.SIGTERM, signal.SIG_DFL)
-
- portage.output.havecolor = self.settings.get('NOCOLOR') \
- not in ('yes', 'true')
-
- # In this subprocess we want mylink._display_merge() to use
- # stdout/stderr directly since they are pipes. This behavior
- # is triggered when mylink._scheduler is None.
- mylink._scheduler = None
-
- # Avoid wastful updates of the vdb cache.
- self.vartree.dbapi._flush_cache_enabled = False
-
- # In this subprocess we don't want PORTAGE_BACKGROUND to
- # suppress stdout/stderr output since they are pipes. We
- # also don't want to open PORTAGE_LOG_FILE, since it will
- # already be opened by the parent process, so we set the
- # "subprocess" value for use in conditional logging code
- # involving PORTAGE_LOG_FILE.
- if not self.unmerge:
- # unmerge phases have separate logs
- if self.settings.get("PORTAGE_BACKGROUND") == "1":
- self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "1"
- else:
- self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "0"
- self.settings.backup_changes("PORTAGE_BACKGROUND_UNMERGE")
- self.settings["PORTAGE_BACKGROUND"] = "subprocess"
- self.settings.backup_changes("PORTAGE_BACKGROUND")
-
- rval = 1
+ parent_pid = os.getpid()
+ pid = None
try:
- if self.unmerge:
- if not mylink.exists():
- rval = os.EX_OK
- elif mylink.unmerge(
- ldpath_mtimes=self.prev_mtimes) == os.EX_OK:
- mylink.lockdb()
- try:
- mylink.delete()
- finally:
- mylink.unlockdb()
- rval = os.EX_OK
- else:
- rval = mylink.merge(self.pkgloc, self.infloc,
- myebuild=self.myebuild, mydbapi=self.mydbapi,
- prev_mtimes=self.prev_mtimes, counter=counter)
- except SystemExit:
- raise
- except:
- traceback.print_exc()
+ pid = os.fork()
+
+ if pid != 0:
+ if not isinstance(pid, int):
+ raise AssertionError(
+ "fork returned non-integer: %s" % (repr(pid),))
+
+ os.close(elog_writer_fd)
+ self._elog_reader_fd = elog_reader_fd
+ self._buf = ""
+ self._elog_keys = set()
+ # Discard messages which will be collected by the subprocess,
+ # in order to avoid duplicates (bug #446136).
+ portage.elog.messages.collect_messages(key=mylink.mycpv)
+
+ # invalidate relevant vardbapi caches
+ if self.vartree.dbapi._categories is not None:
+ self.vartree.dbapi._categories = None
+ self.vartree.dbapi._pkgs_changed = True
+ self.vartree.dbapi._clear_pkg_cache(mylink)
+
+ return [pid]
+
+ os.close(elog_reader_fd)
+
+ # Use default signal handlers in order to avoid problems
+ # killing subprocesses as reported in bug #353239.
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+ portage.locks._close_fds()
+ # We don't exec, so use close_fds=False
+ # (see _setup_pipes docstring).
+ portage.process._setup_pipes(fd_pipes, close_fds=False)
+
+ portage.output.havecolor = self.settings.get('NOCOLOR') \
+ not in ('yes', 'true')
+
+ # Avoid wastful updates of the vdb cache.
+ self.vartree.dbapi._flush_cache_enabled = False
+
+ # In this subprocess we don't want PORTAGE_BACKGROUND to
+ # suppress stdout/stderr output since they are pipes. We
+ # also don't want to open PORTAGE_LOG_FILE, since it will
+ # already be opened by the parent process, so we set the
+ # "subprocess" value for use in conditional logging code
+ # involving PORTAGE_LOG_FILE.
+ if not self.unmerge:
+ # unmerge phases have separate logs
+ if self.settings.get("PORTAGE_BACKGROUND") == "1":
+ self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "1"
+ else:
+ self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "0"
+ self.settings.backup_changes("PORTAGE_BACKGROUND_UNMERGE")
+ self.settings["PORTAGE_BACKGROUND"] = "subprocess"
+ self.settings.backup_changes("PORTAGE_BACKGROUND")
+
+ rval = 1
+ try:
+ if self.unmerge:
+ if not mylink.exists():
+ rval = os.EX_OK
+ elif mylink.unmerge(
+ ldpath_mtimes=self.prev_mtimes) == os.EX_OK:
+ mylink.lockdb()
+ try:
+ mylink.delete()
+ finally:
+ mylink.unlockdb()
+ rval = os.EX_OK
+ else:
+ rval = mylink.merge(self.pkgloc, self.infloc,
+ myebuild=self.myebuild, mydbapi=self.mydbapi,
+ prev_mtimes=self.prev_mtimes, counter=counter)
+ except SystemExit:
+ raise
+ except:
+ traceback.print_exc()
+ # os._exit() skips stderr flush!
+ sys.stderr.flush()
+ finally:
+ os._exit(rval)
+
finally:
- # Call os._exit() from finally block, in order to suppress any
- # finally blocks from earlier in the call stack. See bug #345289.
- os._exit(rval)
+ if pid == 0 or (pid is None and os.getpid() != parent_pid):
+ # Call os._exit() from a finally block in order
+ # to suppress any finally blocks from earlier
+ # in the call stack (see bug #345289). This
+ # finally block has to be setup before the fork
+ # in order to avoid a race condition.
+ os._exit(1)
def _unregister(self):
"""
@@ -231,7 +265,7 @@ class MergeProcess(SpawnProcess):
self._unlock_vdb()
if self._elog_reg_id is not None:
- self.scheduler.unregister(self._elog_reg_id)
+ self.scheduler.source_remove(self._elog_reg_id)
self._elog_reg_id = None
if self._elog_reader_fd is not None:
os.close(self._elog_reader_fd)
diff --git a/pym/portage/dbapi/_SyncfsProcess.py b/pym/portage/dbapi/_SyncfsProcess.py
new file mode 100644
index 000000000..7518214ec
--- /dev/null
+++ b/pym/portage/dbapi/_SyncfsProcess.py
@@ -0,0 +1,53 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.util._ctypes import find_library, LoadLibrary
+from portage.util._async.ForkProcess import ForkProcess
+
+class SyncfsProcess(ForkProcess):
+ """
+ Isolate ctypes usage in a subprocess, in order to avoid
+ potential problems with stale cached libraries as
+ described in bug #448858, comment #14 (also see
+ http://bugs.python.org/issue14597).
+ """
+
+ __slots__ = ('paths',)
+
+ @staticmethod
+ def _get_syncfs():
+
+ filename = find_library("c")
+ if filename is not None:
+ library = LoadLibrary(filename)
+ if library is not None:
+ try:
+ return library.syncfs
+ except AttributeError:
+ pass
+
+ return None
+
+ def _run(self):
+
+ syncfs_failed = False
+ syncfs = self._get_syncfs()
+
+ if syncfs is not None:
+ for path in self.paths:
+ try:
+ fd = os.open(path, os.O_RDONLY)
+ except OSError:
+ pass
+ else:
+ try:
+ if syncfs(fd) != 0:
+ # Happens with PyPy (bug #446610)
+ syncfs_failed = True
+ finally:
+ os.close(fd)
+
+ if syncfs is None or syncfs_failed:
+ return 1
+ return os.EX_OK
diff --git a/pym/portage/dbapi/__init__.py b/pym/portage/dbapi/__init__.py
index b999fb5df..a20a1e84f 100644
--- a/pym/portage/dbapi/__init__.py
+++ b/pym/portage/dbapi/__init__.py
@@ -1,6 +1,8 @@
-# Copyright 1998-2012 Gentoo Foundation
+# Copyright 1998-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = ["dbapi"]
import re
@@ -16,16 +18,18 @@ portage.proxy.lazyimport.lazyimport(globals(),
from portage import os
from portage import auxdbkeys
+from portage.eapi import _get_eapi_attrs
from portage.exception import InvalidData
from portage.localization import _
+from _emerge.Package import Package
class dbapi(object):
- _category_re = re.compile(r'^\w[-.+\w]*$')
+ _category_re = re.compile(r'^\w[-.+\w]*$', re.UNICODE)
_categories = None
_use_mutable = False
_known_keys = frozenset(x for x in auxdbkeys
if not x.startswith("UNUSED_0"))
- _pkg_str_aux_keys = ("EAPI", "SLOT", "repository")
+ _pkg_str_aux_keys = ("EAPI", "KEYWORDS", "SLOT", "repository")
def __init__(self):
pass
@@ -153,8 +157,7 @@ class dbapi(object):
metadata = dict(zip(self._pkg_str_aux_keys,
self.aux_get(cpv, self._pkg_str_aux_keys, myrepo=repo)))
- return _pkg_str(cpv, slot=metadata["SLOT"],
- repo=metadata["repository"], eapi=metadata["EAPI"])
+ return _pkg_str(cpv, metadata=metadata, settings=self.settings)
def _iter_match_repo(self, atom, cpv_iter):
for cpv in cpv_iter:
@@ -182,7 +185,7 @@ class dbapi(object):
2) Check enabled/disabled flag states.
"""
- aux_keys = ["IUSE", "SLOT", "USE", "repository"]
+ aux_keys = ["EAPI", "IUSE", "KEYWORDS", "SLOT", "USE", "repository"]
for cpv in cpv_iter:
try:
metadata = dict(zip(aux_keys,
@@ -190,17 +193,31 @@ class dbapi(object):
except KeyError:
continue
+ try:
+ cpv.slot
+ except AttributeError:
+ try:
+ cpv = _pkg_str(cpv, metadata=metadata,
+ settings=self.settings)
+ except InvalidData:
+ continue
+
if not self._match_use(atom, cpv, metadata):
continue
yield cpv
- def _match_use(self, atom, cpv, metadata):
- iuse_implicit_match = self.settings._iuse_implicit_match
- iuse = frozenset(x.lstrip('+-') for x in metadata["IUSE"].split())
+ def _match_use(self, atom, pkg, metadata):
+ eapi_attrs = _get_eapi_attrs(metadata["EAPI"])
+ if eapi_attrs.iuse_effective:
+ iuse_implicit_match = self.settings._iuse_effective_match
+ else:
+ iuse_implicit_match = self.settings._iuse_implicit_match
+ usealiases = self.settings._use_manager.getUseAliases(pkg)
+ iuse = Package._iuse(None, metadata["IUSE"].split(), iuse_implicit_match, usealiases, metadata["EAPI"])
for x in atom.unevaluated_atom.use.required:
- if x not in iuse and not iuse_implicit_match(x):
+ if iuse.get_real_flag(x) is None:
return False
if atom.use is None:
@@ -210,44 +227,54 @@ class dbapi(object):
# Use IUSE to validate USE settings for built packages,
# in case the package manager that built this package
# failed to do that for some reason (or in case of
- # data corruption).
- use = frozenset(x for x in metadata["USE"].split()
- if x in iuse or iuse_implicit_match(x))
- missing_enabled = atom.use.missing_enabled.difference(iuse)
- missing_disabled = atom.use.missing_disabled.difference(iuse)
-
- if atom.use.enabled:
- if any(x in atom.use.enabled for x in missing_disabled):
+ # data corruption). The enabled flags must be consistent
+ # with implicit IUSE, in order to avoid potential
+ # inconsistencies in USE dep matching (see bug #453400).
+ use = frozenset(x for x in metadata["USE"].split() if iuse.get_real_flag(x) is not None)
+ missing_enabled = frozenset(x for x in atom.use.missing_enabled if iuse.get_real_flag(x) is None)
+ missing_disabled = frozenset(x for x in atom.use.missing_disabled if iuse.get_real_flag(x) is None)
+ enabled = frozenset((iuse.get_real_flag(x) or x) for x in atom.use.enabled)
+ disabled = frozenset((iuse.get_real_flag(x) or x) for x in atom.use.disabled)
+
+ if enabled:
+ if any(x in enabled for x in missing_disabled):
return False
- need_enabled = atom.use.enabled.difference(use)
+ need_enabled = enabled.difference(use)
if need_enabled:
if any(x not in missing_enabled for x in need_enabled):
return False
- if atom.use.disabled:
- if any(x in atom.use.disabled for x in missing_enabled):
+ if disabled:
+ if any(x in disabled for x in missing_enabled):
return False
- need_disabled = atom.use.disabled.intersection(use)
+ need_disabled = disabled.intersection(use)
if need_disabled:
if any(x not in missing_disabled for x in need_disabled):
return False
elif not self.settings.local_config:
# Check masked and forced flags for repoman.
- if hasattr(cpv, 'slot'):
- pkg = cpv
- else:
- pkg = _pkg_str(cpv, slot=metadata["SLOT"],
- repo=metadata.get("repository"))
- usemask = self.settings._getUseMask(pkg)
+ usemask = self.settings._getUseMask(pkg,
+ stable=self.settings._parent_stable)
if any(x in usemask for x in atom.use.enabled):
return False
- useforce = self.settings._getUseForce(pkg)
+ useforce = self.settings._getUseForce(pkg,
+ stable=self.settings._parent_stable)
if any(x in useforce and x not in usemask
for x in atom.use.disabled):
return False
+ # Check unsatisfied use-default deps
+ if atom.use.enabled:
+ missing_disabled = frozenset(x for x in atom.use.missing_disabled if iuse.get_real_flag(x) is None)
+ if any(x in atom.use.enabled for x in missing_disabled):
+ return False
+ if atom.use.disabled:
+ missing_enabled = frozenset(x for x in atom.use.missing_enabled if iuse.get_real_flag(x) is None)
+ if any(x in atom.use.disabled for x in missing_enabled):
+ return False
+
return True
def invalidentry(self, mypath):
@@ -275,7 +302,8 @@ class dbapi(object):
maxval = len(cpv_all)
aux_get = self.aux_get
aux_update = self.aux_update
- meta_keys = ["DEPEND", "EAPI", "RDEPEND", "PDEPEND", "PROVIDE", 'repository']
+ update_keys = Package._dep_keys + ("PROVIDE",)
+ meta_keys = update_keys + self._pkg_str_aux_keys
repo_dict = None
if isinstance(updates, dict):
repo_dict = updates
@@ -284,14 +312,20 @@ class dbapi(object):
if onProgress:
onProgress(maxval, 0)
for i, cpv in enumerate(cpv_all):
- metadata = dict(zip(meta_keys, aux_get(cpv, meta_keys)))
- eapi = metadata.pop('EAPI')
- repo = metadata.pop('repository')
+ try:
+ metadata = dict(zip(meta_keys, aux_get(cpv, meta_keys)))
+ except KeyError:
+ continue
+ try:
+ pkg = _pkg_str(cpv, metadata=metadata, settings=self.settings)
+ except InvalidData:
+ continue
+ metadata = dict((k, metadata[k]) for k in update_keys)
if repo_dict is None:
updates_list = updates
else:
try:
- updates_list = repo_dict[repo]
+ updates_list = repo_dict[pkg.repo]
except KeyError:
try:
updates_list = repo_dict['DEFAULT']
@@ -302,7 +336,7 @@ class dbapi(object):
continue
metadata_updates = \
- portage.update_dbentries(updates_list, metadata, eapi=eapi)
+ portage.update_dbentries(updates_list, metadata, parent=pkg)
if metadata_updates:
aux_update(cpv, metadata_updates)
if onUpdate:
@@ -343,9 +377,9 @@ class dbapi(object):
continue
moves += 1
if "/" not in newslot and \
- mycpv.slot_abi and \
- mycpv.slot_abi not in (mycpv.slot, newslot):
- newslot = "%s/%s" % (newslot, mycpv.slot_abi)
+ mycpv.sub_slot and \
+ mycpv.sub_slot not in (mycpv.slot, newslot):
+ newslot = "%s/%s" % (newslot, mycpv.sub_slot)
mydata = {"SLOT": newslot+"\n"}
self.aux_update(mycpv, mydata)
return moves
diff --git a/pym/portage/dbapi/_expand_new_virt.py b/pym/portage/dbapi/_expand_new_virt.py
index d379b4c1d..9aa603d11 100644
--- a/pym/portage/dbapi/_expand_new_virt.py
+++ b/pym/portage/dbapi/_expand_new_virt.py
@@ -1,8 +1,11 @@
-# Copyright 2011 Gentoo Foundation
+# Copyright 2011-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
import portage
from portage.dep import Atom, _get_useflag_re
+from portage.eapi import _get_eapi_attrs
def expand_new_virt(vardb, atom):
"""
@@ -44,6 +47,7 @@ def expand_new_virt(vardb, atom):
yield atom
continue
+ eapi_attrs = _get_eapi_attrs(eapi)
# Validate IUSE and IUSE, for early detection of vardb corruption.
useflag_re = _get_useflag_re(eapi)
valid_iuse = []
@@ -54,7 +58,11 @@ def expand_new_virt(vardb, atom):
valid_iuse.append(x)
valid_iuse = frozenset(valid_iuse)
- iuse_implicit_match = vardb.settings._iuse_implicit_match
+ if eapi_attrs.iuse_effective:
+ iuse_implicit_match = vardb.settings._iuse_effective_match
+ else:
+ iuse_implicit_match = vardb.settings._iuse_implicit_match
+
valid_use = []
for x in use.split():
if x in valid_iuse or iuse_implicit_match(x):
diff --git a/pym/portage/dbapi/_similar_name_search.py b/pym/portage/dbapi/_similar_name_search.py
new file mode 100644
index 000000000..b6e4a1fbe
--- /dev/null
+++ b/pym/portage/dbapi/_similar_name_search.py
@@ -0,0 +1,57 @@
+# Copyright 2011-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import difflib
+
+from portage.versions import catsplit
+
+def similar_name_search(dbs, atom):
+
+ cp_lower = atom.cp.lower()
+ cat, pkg = catsplit(cp_lower)
+ if cat == "null":
+ cat = None
+
+ all_cp = set()
+ for db in dbs:
+ all_cp.update(db.cp_all())
+
+ # discard dir containing no ebuilds
+ all_cp.discard(atom.cp)
+
+ orig_cp_map = {}
+ for cp_orig in all_cp:
+ orig_cp_map.setdefault(cp_orig.lower(), []).append(cp_orig)
+ all_cp = set(orig_cp_map)
+
+ if cat:
+ matches = difflib.get_close_matches(cp_lower, all_cp)
+ else:
+ pkg_to_cp = {}
+ for other_cp in list(all_cp):
+ other_pkg = catsplit(other_cp)[1]
+ if other_pkg == pkg:
+ # Check for non-identical package that
+ # differs only by upper/lower case.
+ identical = True
+ for cp_orig in orig_cp_map[other_cp]:
+ if catsplit(cp_orig)[1] != \
+ catsplit(atom.cp)[1]:
+ identical = False
+ break
+ if identical:
+ # discard dir containing no ebuilds
+ all_cp.discard(other_cp)
+ continue
+ pkg_to_cp.setdefault(other_pkg, set()).add(other_cp)
+
+ pkg_matches = difflib.get_close_matches(pkg, pkg_to_cp)
+ matches = []
+ for pkg_match in pkg_matches:
+ matches.extend(pkg_to_cp[pkg_match])
+
+ matches_orig_case = []
+ for cp in matches:
+ matches_orig_case.extend(orig_cp_map[cp])
+
+ return matches_orig_case
diff --git a/pym/portage/dbapi/bintree.py b/pym/portage/dbapi/bintree.py
index 9527b0766..229ce3b18 100644
--- a/pym/portage/dbapi/bintree.py
+++ b/pym/portage/dbapi/bintree.py
@@ -1,11 +1,14 @@
-# Copyright 1998-2012 Gentoo Foundation
+# Copyright 1998-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = ["bindbapi", "binarytree"]
import portage
portage.proxy.lazyimport.lazyimport(globals(),
- 'portage.checksum:hashfunc_map,perform_multiple_checksums,verify_all',
+ 'portage.checksum:hashfunc_map,perform_multiple_checksums,' + \
+ 'verify_all,_apply_hash_filter,_hash_filter',
'portage.dbapi.dep_expand:dep_expand',
'portage.dep:dep_getkey,isjustname,isvalidatom,match_from_list',
'portage.output:EOutput,colorize',
@@ -24,7 +27,7 @@ from portage.const import CACHE_PATH
from portage.dbapi.virtual import fakedbapi
from portage.dep import Atom, use_reduce, paren_enclose
from portage.exception import AlarmSignal, InvalidData, InvalidPackageName, \
- PermissionDenied, PortageException
+ ParseError, PermissionDenied, PortageException
from portage.localization import _
from portage import _movefile
from portage import os
@@ -40,7 +43,9 @@ import subprocess
import sys
import tempfile
import textwrap
+import traceback
import warnings
+from gzip import GzipFile
from itertools import chain
try:
from urllib.parse import urlparse
@@ -48,12 +53,18 @@ except ImportError:
from urlparse import urlparse
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
_unicode = str
basestring = str
long = int
else:
_unicode = unicode
+class UseCachedCopyOfRemoteIndex(Exception):
+ # If the local copy is recent enough
+ # then fetching the remote index can be skipped.
+ pass
+
class bindbapi(fakedbapi):
_known_keys = frozenset(list(fakedbapi._known_keys) + \
["CHOST", "repository", "USE"])
@@ -65,9 +76,10 @@ class bindbapi(fakedbapi):
self.cpdict={}
# Selectively cache metadata in order to optimize dep matching.
self._aux_cache_keys = set(
- ["BUILD_TIME", "CHOST", "DEPEND", "EAPI", "IUSE", "KEYWORDS",
+ ["BUILD_TIME", "CHOST", "DEPEND", "EAPI",
+ "HDEPEND", "IUSE", "KEYWORDS",
"LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE",
- "RDEPEND", "repository", "RESTRICT", "SLOT", "USE", "DEFINED_PHASES",
+ "RDEPEND", "repository", "RESTRICT", "SLOT", "USE", "DEFINED_PHASES"
])
self._aux_cache_slot_dict = slot_dict_class(self._aux_cache_keys)
self._aux_cache = {}
@@ -130,15 +142,15 @@ class bindbapi(fakedbapi):
if myval:
mydata[x] = " ".join(myval.split())
- if not mydata.setdefault('EAPI', _unicode_decode('0')):
- mydata['EAPI'] = _unicode_decode('0')
+ if not mydata.setdefault('EAPI', '0'):
+ mydata['EAPI'] = '0'
if cache_me:
aux_cache = self._aux_cache_slot_dict()
for x in self._aux_cache_keys:
- aux_cache[x] = mydata.get(x, _unicode_decode(''))
+ aux_cache[x] = mydata.get(x, '')
self._aux_cache[mycpv] = aux_cache
- return [mydata.get(x, _unicode_decode('')) for x in wants]
+ return [mydata.get(x, '') for x in wants]
def aux_update(self, cpv, values):
if not self.bintree.populated:
@@ -250,7 +262,7 @@ def _pkgindex_cpv_map_latest_build(pkgindex):
class binarytree(object):
"this tree scans for a list of all packages available in PKGDIR"
- def __init__(self, _unused=None, pkgdir=None,
+ def __init__(self, _unused=DeprecationWarning, pkgdir=None,
virtual=DeprecationWarning, settings=None):
if pkgdir is None:
@@ -259,11 +271,11 @@ class binarytree(object):
if settings is None:
raise TypeError("settings parameter is required")
- if _unused is not None and _unused != settings['ROOT']:
- warnings.warn("The root parameter of the "
+ if _unused is not DeprecationWarning:
+ warnings.warn("The first parameter of the "
"portage.dbapi.bintree.binarytree"
- " constructor is now unused. Use "
- "settings['ROOT'] instead.",
+ " constructor is now unused. Instead "
+ "settings['ROOT'] is used.",
DeprecationWarning, stacklevel=2)
if virtual is not DeprecationWarning:
@@ -295,22 +307,26 @@ class binarytree(object):
self._pkgindex_keys.update(["CPV", "MTIME", "SIZE"])
self._pkgindex_aux_keys = \
["BUILD_TIME", "CHOST", "DEPEND", "DESCRIPTION", "EAPI",
- "IUSE", "KEYWORDS", "LICENSE", "PDEPEND", "PROPERTIES",
- "PROVIDE", "RDEPEND", "repository", "SLOT", "USE", "DEFINED_PHASES",
+ "HDEPEND", "IUSE", "KEYWORDS", "LICENSE", "PDEPEND", "PROPERTIES",
+ "PROVIDE", "RESTRICT", "RDEPEND", "repository", "SLOT", "USE", "DEFINED_PHASES",
"BASE_URI"]
self._pkgindex_aux_keys = list(self._pkgindex_aux_keys)
self._pkgindex_use_evaluated_keys = \
- ("LICENSE", "RDEPEND", "DEPEND",
- "PDEPEND", "PROPERTIES", "PROVIDE")
+ ("DEPEND", "HDEPEND", "LICENSE", "RDEPEND",
+ "PDEPEND", "PROPERTIES", "PROVIDE", "RESTRICT")
self._pkgindex_header_keys = set([
"ACCEPT_KEYWORDS", "ACCEPT_LICENSE",
- "ACCEPT_PROPERTIES", "CBUILD",
+ "ACCEPT_PROPERTIES", "ACCEPT_RESTRICT", "CBUILD",
"CONFIG_PROTECT", "CONFIG_PROTECT_MASK", "FEATURES",
- "GENTOO_MIRRORS", "INSTALL_MASK", "SYNC", "USE"])
+ "GENTOO_MIRRORS", "INSTALL_MASK", "IUSE_IMPLICIT", "USE",
+ "USE_EXPAND", "USE_EXPAND_HIDDEN", "USE_EXPAND_IMPLICIT",
+ "USE_EXPAND_UNPREFIXED"])
self._pkgindex_default_pkg_data = {
"BUILD_TIME" : "",
+ "DEFINED_PHASES" : "",
"DEPEND" : "",
"EAPI" : "0",
+ "HDEPEND" : "",
"IUSE" : "",
"KEYWORDS": "",
"LICENSE" : "",
@@ -322,7 +338,6 @@ class binarytree(object):
"RESTRICT": "",
"SLOT" : "0",
"USE" : "",
- "DEFINED_PHASES" : "",
}
self._pkgindex_inherited_keys = ["CHOST", "repository"]
@@ -416,7 +431,7 @@ class binarytree(object):
moves += 1
mytbz2 = portage.xpak.tbz2(tbz2path)
mydata = mytbz2.get_data()
- updated_items = update_dbentries([mylist], mydata, eapi=mycpv.eapi)
+ updated_items = update_dbentries([mylist], mydata, parent=mycpv)
mydata.update(updated_items)
mydata[b'PF'] = \
_unicode_encode(mynewpkg + "\n",
@@ -552,6 +567,20 @@ class binarytree(object):
if not os.path.isdir(path):
raise
+ def _file_permissions(self, path):
+ try:
+ pkgdir_st = os.stat(self.pkgdir)
+ except OSError:
+ pass
+ else:
+ pkgdir_gid = pkgdir_st.st_gid
+ pkgdir_grp_mode = 0o0060 & pkgdir_st.st_mode
+ try:
+ portage.util.apply_permissions(path, gid=pkgdir_gid,
+ mode=pkgdir_grp_mode, mask=0)
+ except PortageException:
+ pass
+
def _move_to_all(self, cpv):
"""If the file exists, move it. Whether or not it exists, update state
for future getname() calls."""
@@ -807,9 +836,7 @@ class binarytree(object):
del pkgindex.packages[:]
pkgindex.packages.extend(iter(metadata.values()))
self._update_pkgindex_header(pkgindex.header)
- f = atomic_ofstream(self._pkgindex_file)
- pkgindex.write(f)
- f.close()
+ self._pkgindex_write(pkgindex)
if getbinpkgs and not self.settings["PORTAGE_BINHOST"]:
writemsg(_("!!! PORTAGE_BINHOST unset, but use is requested.\n"),
@@ -852,6 +879,7 @@ class binarytree(object):
if e.errno != errno.ENOENT:
raise
local_timestamp = pkgindex.header.get("TIMESTAMP", None)
+ remote_timestamp = None
rmt_idx = self._new_pkgindex()
proc = None
tmp_filename = None
@@ -860,41 +888,79 @@ class binarytree(object):
# protocols and requires the base url to have a trailing
# slash, so join manually...
url = base_url.rstrip("/") + "/Packages"
- try:
- f = _urlopen(url)
- except IOError:
- path = parsed_url.path.rstrip("/") + "/Packages"
+ f = None
+
+ # Don't use urlopen for https, since it doesn't support
+ # certificate/hostname verification (bug #469888).
+ if parsed_url.scheme not in ('https',):
+ try:
+ f = _urlopen(url, if_modified_since=local_timestamp)
+ if hasattr(f, 'headers') and f.headers.get('timestamp', ''):
+ remote_timestamp = f.headers.get('timestamp')
+ except IOError as err:
+ if hasattr(err, 'code') and err.code == 304: # not modified (since local_timestamp)
+ raise UseCachedCopyOfRemoteIndex()
+
+ if parsed_url.scheme in ('ftp', 'http', 'https'):
+ # This protocol is supposedly supported by urlopen,
+ # so apparently there's a problem with the url
+ # or a bug in urlopen.
+ if self.settings.get("PORTAGE_DEBUG", "0") != "0":
+ traceback.print_exc()
- if parsed_url.scheme == 'sftp':
- # The sftp command complains about 'Illegal seek' if
- # we try to make it write to /dev/stdout, so use a
- # temp file instead.
- fd, tmp_filename = tempfile.mkstemp()
- os.close(fd)
- if port is not None:
- port_args = ['-P', "%s" % (port,)]
- proc = subprocess.Popen(['sftp'] + port_args + \
- [user_passwd + host + ":" + path, tmp_filename])
- if proc.wait() != os.EX_OK:
raise
- f = open(tmp_filename, 'rb')
- elif parsed_url.scheme == 'ssh':
+ except ValueError:
+ raise ParseError("Invalid Portage BINHOST value '%s'"
+ % url.lstrip())
+
+ if f is None:
+
+ path = parsed_url.path.rstrip("/") + "/Packages"
+
+ if parsed_url.scheme == 'ssh':
+ # Use a pipe so that we can terminate the download
+ # early if we detect that the TIMESTAMP header
+ # matches that of the cached Packages file.
+ ssh_args = ['ssh']
if port is not None:
- port_args = ['-p', "%s" % (port,)]
- proc = subprocess.Popen(['ssh'] + port_args + \
- [user_passwd + host, '--', 'cat', path],
+ ssh_args.append("-p%s" % (port,))
+ # NOTE: shlex evaluates embedded quotes
+ ssh_args.extend(portage.util.shlex_split(
+ self.settings.get("PORTAGE_SSH_OPTS", "")))
+ ssh_args.append(user_passwd + host)
+ ssh_args.append('--')
+ ssh_args.append('cat')
+ ssh_args.append(path)
+
+ proc = subprocess.Popen(ssh_args,
stdout=subprocess.PIPE)
f = proc.stdout
else:
setting = 'FETCHCOMMAND_' + parsed_url.scheme.upper()
fcmd = self.settings.get(setting)
if not fcmd:
- raise
+ fcmd = self.settings.get('FETCHCOMMAND')
+ if not fcmd:
+ raise EnvironmentError("FETCHCOMMAND is unset")
+
fd, tmp_filename = tempfile.mkstemp()
tmp_dirname, tmp_basename = os.path.split(tmp_filename)
os.close(fd)
- success = portage.getbinpkg.file_get(url,
- tmp_dirname, fcmd=fcmd, filename=tmp_basename)
+
+ fcmd_vars = {
+ "DISTDIR": tmp_dirname,
+ "FILE": tmp_basename,
+ "URI": url
+ }
+
+ for k in ("PORTAGE_SSH_OPTS",):
+ try:
+ fcmd_vars[k] = self.settings[k]
+ except KeyError:
+ pass
+
+ success = portage.getbinpkg.file_get(
+ fcmd=fcmd, fcmd_vars=fcmd_vars)
if not success:
raise EnvironmentError("%s failed" % (setting,))
f = open(tmp_filename, 'rb')
@@ -903,7 +969,8 @@ class binarytree(object):
_encodings['repo.content'], errors='replace')
try:
rmt_idx.readHeader(f_dec)
- remote_timestamp = rmt_idx.header.get("TIMESTAMP", None)
+ if not remote_timestamp: # in case it had not been read from HTTP header
+ remote_timestamp = rmt_idx.header.get("TIMESTAMP", None)
if not remote_timestamp:
# no timestamp in the header, something's wrong
pkgindex = None
@@ -931,6 +998,12 @@ class binarytree(object):
writemsg("\n\n!!! %s\n" % \
_("Timed out while closing connection to binhost"),
noiselevel=-1)
+ except UseCachedCopyOfRemoteIndex:
+ writemsg_stdout("\n")
+ writemsg_stdout(
+ colorize("GOOD", _("Local copy of remote index is up-to-date and will be used.")) + \
+ "\n")
+ rmt_idx = pkgindex
except EnvironmentError as e:
writemsg(_("\n\n!!! Error fetching binhost package" \
" info from '%s'\n") % _hide_url_passwd(base_url))
@@ -999,75 +1072,7 @@ class binarytree(object):
# Local package instances override remote instances.
for cpv in metadata:
self._remotepkgs.pop(cpv, None)
- continue
- try:
- chunk_size = long(self.settings["PORTAGE_BINHOST_CHUNKSIZE"])
- if chunk_size < 8:
- chunk_size = 8
- except (ValueError, KeyError):
- chunk_size = 3000
- writemsg_stdout("\n")
- writemsg_stdout(
- colorize("GOOD", _("Fetching bininfo from ")) + \
- _hide_url_passwd(base_url) + "\n")
- remotepkgs = portage.getbinpkg.dir_get_metadata(
- base_url, chunk_size=chunk_size)
-
- for mypkg, remote_metadata in remotepkgs.items():
- mycat = remote_metadata.get("CATEGORY")
- if mycat is None:
- #old-style or corrupt package
- writemsg(_("!!! Invalid remote binary package: %s\n") % mypkg,
- noiselevel=-1)
- continue
- mycat = mycat.strip()
- try:
- fullpkg = _pkg_str(mycat+"/"+mypkg[:-5])
- except InvalidData:
- writemsg(_("!!! Invalid remote binary package: %s\n") % mypkg,
- noiselevel=-1)
- continue
-
- if fullpkg in metadata:
- # When using this old protocol, comparison with the remote
- # package isn't supported, so the local package is always
- # preferred even if getbinpkgsonly is enabled.
- continue
-
- if not self.dbapi._category_re.match(mycat):
- writemsg(_("!!! Remote binary package has an " \
- "unrecognized category: '%s'\n") % fullpkg,
- noiselevel=-1)
- writemsg(_("!!! '%s' has a category that is not" \
- " listed in %setc/portage/categories\n") % \
- (fullpkg, self.settings["PORTAGE_CONFIGROOT"]),
- noiselevel=-1)
- continue
- mykey = portage.cpv_getkey(fullpkg)
- try:
- # invalid tbz2's can hurt things.
- self.dbapi.cpv_inject(fullpkg)
- for k, v in remote_metadata.items():
- remote_metadata[k] = v.strip()
- remote_metadata["BASE_URI"] = base_url
-
- # Eliminate metadata values with names that digestCheck
- # uses, since they are not valid when using the old
- # protocol. Typically this is needed for SIZE metadata
- # which corresponds to the size of the unpacked files
- # rather than the binpkg file size, triggering digest
- # verification failures as reported in bug #303211.
- remote_metadata.pop('SIZE', None)
- for k in portage.checksum.hashfunc_map:
- remote_metadata.pop(k, None)
-
- self._remotepkgs[fullpkg] = remote_metadata
- except SystemExit as e:
- raise
- except:
- writemsg(_("!!! Failed to inject remote binary package: %s\n") % fullpkg,
- noiselevel=-1)
- continue
+
self.populated=1
def inject(self, cpv, filename=None):
@@ -1121,6 +1126,10 @@ class binarytree(object):
if not samefile:
self._ensure_dir(os.path.dirname(new_filename))
_movefile(filename, new_filename, mysettings=self.settings)
+ full_path = new_filename
+
+ self._file_permissions(full_path)
+
if self._all_directory and \
self.getname(cpv).split(os.path.sep)[-2] == "All":
self._create_symlink(cpv)
@@ -1168,13 +1177,35 @@ class binarytree(object):
pkgindex.packages.append(d)
self._update_pkgindex_header(pkgindex.header)
- f = atomic_ofstream(os.path.join(self.pkgdir, "Packages"))
- pkgindex.write(f)
- f.close()
+ self._pkgindex_write(pkgindex)
+
finally:
if pkgindex_lock:
unlockfile(pkgindex_lock)
+ def _pkgindex_write(self, pkgindex):
+ contents = codecs.getwriter(_encodings['repo.content'])(io.BytesIO())
+ pkgindex.write(contents)
+ contents = contents.getvalue()
+ atime = mtime = long(pkgindex.header["TIMESTAMP"])
+ output_files = [(atomic_ofstream(self._pkgindex_file, mode="wb"),
+ self._pkgindex_file, None)]
+
+ if "compress-index" in self.settings.features:
+ gz_fname = self._pkgindex_file + ".gz"
+ fileobj = atomic_ofstream(gz_fname, mode="wb")
+ output_files.append((GzipFile(filename='', mode="wb",
+ fileobj=fileobj, mtime=mtime), gz_fname, fileobj))
+
+ for f, fname, f_close in output_files:
+ f.write(contents)
+ f.close()
+ if f_close is not None:
+ f_close.close()
+ self._file_permissions(fname)
+ # some seconds might have elapsed since TIMESTAMP
+ os.utime(fname, (atime, mtime))
+
def _pkgindex_entry(self, cpv):
"""
Performs checksums and evaluates USE flag conditionals.
@@ -1234,6 +1265,16 @@ class binarytree(object):
else:
header.pop(k, None)
+ # These values may be useful for using a binhost without
+ # having a local copy of the profile (bug #470006).
+ for k in self.settings.get("USE_EXPAND_IMPLICIT", "").split():
+ k = "USE_EXPAND_VALUES_" + k
+ v = self.settings.get(k)
+ if v:
+ header[k] = v
+ else:
+ header.pop(k, None)
+
def _pkgindex_version_supported(self, pkgindex):
version = pkgindex.header.get("VERSION")
if version:
@@ -1246,11 +1287,6 @@ class binarytree(object):
def _eval_use_flags(self, cpv, metadata):
use = frozenset(metadata["USE"].split())
- raw_use = use
- iuse = set(f.lstrip("-+") for f in metadata["IUSE"].split())
- use = [f for f in use if f in iuse]
- use.sort()
- metadata["USE"] = " ".join(use)
for k in self._pkgindex_use_evaluated_keys:
if k.endswith('DEPEND'):
token_class = Atom
@@ -1259,7 +1295,7 @@ class binarytree(object):
try:
deps = metadata[k]
- deps = use_reduce(deps, uselist=raw_use, token_class=token_class)
+ deps = use_reduce(deps, uselist=use, token_class=token_class)
deps = paren_enclose(deps)
except portage.exception.InvalidDependString as e:
writemsg("%s: %s\n" % (k, str(e)),
@@ -1383,19 +1419,14 @@ class binarytree(object):
f.close()
return pkgindex
- def digestCheck(self, pkg):
- """
- Verify digests for the given package and raise DigestException
- if verification fails.
- @rtype: bool
- @return: True if digests could be located, False otherwise.
- """
- cpv = pkg
- if not isinstance(cpv, basestring):
+ def _get_digests(self, pkg):
+
+ try:
cpv = pkg.cpv
- pkg = None
+ except AttributeError:
+ cpv = pkg
- pkg_path = self.getname(cpv)
+ digests = {}
metadata = None
if self._remotepkgs is None or cpv not in self._remotepkgs:
for d in self._load_pkgindex().packages:
@@ -1405,9 +1436,8 @@ class binarytree(object):
else:
metadata = self._remotepkgs[cpv]
if metadata is None:
- return False
+ return digests
- digests = {}
for k in hashfunc_map:
v = metadata.get(k)
if not v:
@@ -1421,9 +1451,31 @@ class binarytree(object):
writemsg(_("!!! Malformed SIZE attribute in remote " \
"metadata for '%s'\n") % cpv)
+ return digests
+
+ def digestCheck(self, pkg):
+ """
+ Verify digests for the given package and raise DigestException
+ if verification fails.
+ @rtype: bool
+ @return: True if digests could be located, False otherwise.
+ """
+
+ digests = self._get_digests(pkg)
+
if not digests:
return False
+ try:
+ cpv = pkg.cpv
+ except AttributeError:
+ cpv = pkg
+
+ pkg_path = self.getname(cpv)
+ hash_filter = _hash_filter(
+ self.settings.get("PORTAGE_CHECKSUM_FILTER", ""))
+ if not hash_filter.transparent:
+ digests = _apply_hash_filter(digests, hash_filter)
eout = EOutput()
eout.quiet = self.settings.get("PORTAGE_QUIET") == "1"
ok, st = _check_distfile(pkg_path, digests, eout, show_errors=0)
@@ -1439,9 +1491,7 @@ class binarytree(object):
"Get a slot for a catpkg; assume it exists."
myslot = ""
try:
- myslot = self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
- except SystemExit as e:
- raise
- except Exception as e:
+ myslot = self.dbapi._pkg_str(mycatpkg, None).slot
+ except KeyError:
pass
return myslot
diff --git a/pym/portage/dbapi/cpv_expand.py b/pym/portage/dbapi/cpv_expand.py
index 947194cca..70ee78245 100644
--- a/pym/portage/dbapi/cpv_expand.py
+++ b/pym/portage/dbapi/cpv_expand.py
@@ -1,6 +1,8 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = ["cpv_expand"]
import portage
diff --git a/pym/portage/dbapi/dep_expand.py b/pym/portage/dbapi/dep_expand.py
index ac8ccf4b3..3de5d8fc3 100644
--- a/pym/portage/dbapi/dep_expand.py
+++ b/pym/portage/dbapi/dep_expand.py
@@ -1,6 +1,8 @@
-# Copyright 2010 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = ["dep_expand"]
import re
@@ -23,7 +25,7 @@ def dep_expand(mydep, mydb=None, use_cache=1, settings=None):
if mydep[0] == "*":
mydep = mydep[1:]
orig_dep = mydep
- has_cat = '/' in orig_dep
+ has_cat = '/' in orig_dep.split(':')[0]
if not has_cat:
alphanum = re.search(r'\w', orig_dep)
if alphanum:
diff --git a/pym/portage/dbapi/porttree.py b/pym/portage/dbapi/porttree.py
index 945c22c3d..590e3c5ef 100644
--- a/pym/portage/dbapi/porttree.py
+++ b/pym/portage/dbapi/porttree.py
@@ -1,6 +1,8 @@
-# Copyright 1998-2012 Gentoo Foundation
+# Copyright 1998-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = [
"close_portdbapi_caches", "FetchlistDict", "portagetree", "portdbapi"
]
@@ -33,21 +35,75 @@ from portage import os
from portage import _encodings
from portage import _unicode_encode
from portage import OrderedDict
+from portage.util._eventloop.EventLoop import EventLoop
+from portage.util._eventloop.global_event_loop import global_event_loop
from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
-from _emerge.PollScheduler import PollScheduler
import os as _os
import sys
import traceback
import warnings
+try:
+ from urllib.parse import urlparse
+except ImportError:
+ from urlparse import urlparse
+
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
long = int
+def close_portdbapi_caches():
+ # The python interpreter does _not_ guarantee that destructors are
+ # called for objects that remain when the interpreter exits, so we
+ # use an atexit hook to call destructors for any global portdbapi
+ # instances that may have been constructed.
+ try:
+ portage._legacy_globals_constructed
+ except AttributeError:
+ pass
+ else:
+ if "db" in portage._legacy_globals_constructed:
+ try:
+ db = portage.db
+ except AttributeError:
+ pass
+ else:
+ if isinstance(db, dict):
+ for x in db.values():
+ try:
+ if "porttree" in x.lazy_items:
+ continue
+ except (AttributeError, TypeError):
+ continue
+ try:
+ x = x.pop("porttree").dbapi
+ except (AttributeError, KeyError):
+ continue
+ if not isinstance(x, portdbapi):
+ continue
+ x.close_caches()
+
+portage.process.atexit_register(close_portdbapi_caches)
+
+# It used to be necessary for API consumers to remove portdbapi instances
+# from portdbapi_instances, in order to avoid having accumulated instances
+# consume memory. Now, portdbapi_instances is just an empty dummy list, so
+# for backward compatibility, ignore ValueError for removal on non-existent
+# items.
+class _dummy_list(list):
+ def remove(self, item):
+ # TODO: Trigger a DeprecationWarning here, after stable portage
+ # has dummy portdbapi_instances.
+ try:
+ list.remove(self, item)
+ except ValueError:
+ pass
+
class portdbapi(dbapi):
"""this tree will scan a portage directory located at root (passed to init)"""
- portdbapi_instances = []
+ portdbapi_instances = _dummy_list()
_use_mutable = True
@property
@@ -56,23 +112,28 @@ class portdbapi(dbapi):
@property
def porttree_root(self):
+ warnings.warn("portage.dbapi.porttree.portdbapi.porttree_root is deprecated in favor of portage.repository.config.RepoConfig.location "
+ "(available as repositories[repo_name].location attribute of instances of portage.dbapi.porttree.portdbapi class)",
+ DeprecationWarning, stacklevel=2)
return self.settings.repositories.mainRepoLocation()
@property
def eclassdb(self):
+ warnings.warn("portage.dbapi.porttree.portdbapi.eclassdb is deprecated in favor of portage.repository.config.RepoConfig.eclass_db "
+ "(available as repositories[repo_name].eclass_db attribute of instances of portage.dbapi.porttree.portdbapi class)",
+ DeprecationWarning, stacklevel=2)
main_repo = self.repositories.mainRepo()
if main_repo is None:
return None
return main_repo.eclass_db
- def __init__(self, _unused_param=None, mysettings=None):
+ def __init__(self, _unused_param=DeprecationWarning, mysettings=None):
"""
@param _unused_param: deprecated, use mysettings['PORTDIR'] instead
@type _unused_param: None
@param mysettings: an immutable config instance
@type mysettings: portage.config
"""
- portdbapi.portdbapi_instances.append(self)
from portage import config
if mysettings:
@@ -81,7 +142,7 @@ class portdbapi(dbapi):
from portage import settings
self.settings = config(clone=settings)
- if _unused_param is not None:
+ if _unused_param is not DeprecationWarning:
warnings.warn("The first parameter of the " + \
"portage.dbapi.porttree.portdbapi" + \
" constructor is unused since portage-2.1.8. " + \
@@ -96,7 +157,6 @@ class portdbapi(dbapi):
# this purpose because doebuild makes many changes to the config
# instance that is passed in.
self.doebuild_settings = config(clone=self.settings)
- self._scheduler = PollScheduler().sched_iface
self.depcachedir = os.path.realpath(self.settings.depcachedir)
if os.environ.get("SANDBOX_ON") == "1":
@@ -153,10 +213,10 @@ class portdbapi(dbapi):
# portage group.
depcachedir_unshared = True
else:
- cache_kwargs.update({
+ cache_kwargs.update(portage._native_kwargs({
'gid' : portage_gid,
'perms' : 0o664
- })
+ }))
# If secpass < 1, we don't want to write to the cache
# since then we won't be able to apply group permissions
@@ -187,13 +247,25 @@ class portdbapi(dbapi):
self._pregen_auxdb[x] = cache
# Selectively cache metadata in order to optimize dep matching.
self._aux_cache_keys = set(
- ["DEPEND", "EAPI", "INHERITED", "IUSE", "KEYWORDS", "LICENSE",
+ ["DEPEND", "EAPI", "HDEPEND",
+ "INHERITED", "IUSE", "KEYWORDS", "LICENSE",
"PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND", "repository",
"RESTRICT", "SLOT", "DEFINED_PHASES", "REQUIRED_USE"])
self._aux_cache = {}
self._broken_ebuilds = set()
+ @property
+ def _event_loop(self):
+ if portage._internal_caller:
+ # For internal portage usage, the global_event_loop is safe.
+ return global_event_loop()
+ else:
+ # For external API consumers, use a local EventLoop, since
+ # we don't want to assume that it's safe to override the
+ # global SIGCHLD handler.
+ return EventLoop(main=False)
+
def _create_pregen_cache(self, tree):
conf = self.repositories.get_repo_for_location(tree)
cache = conf.get_pregenerated_cache(
@@ -203,6 +275,13 @@ class portdbapi(dbapi):
cache.ec = self.repositories.get_repo_for_location(tree).eclass_db
except AttributeError:
pass
+
+ if not cache.complete_eclass_entries:
+ warnings.warn(
+ ("Repository '%s' used deprecated 'pms' cache format. "
+ "Please migrate to 'md5-dict' format.") % (conf.name,),
+ DeprecationWarning)
+
return cache
def _init_cache_dirs(self):
@@ -447,7 +526,7 @@ class portdbapi(dbapi):
proc = EbuildMetadataPhase(cpv=mycpv,
ebuild_hash=ebuild_hash, portdb=self,
- repo_path=mylocation, scheduler=self._scheduler,
+ repo_path=mylocation, scheduler=self._event_loop,
settings=self.doebuild_settings)
proc.start()
@@ -627,13 +706,14 @@ class portdbapi(dbapi):
else:
return 0
- def cp_all(self, categories=None, trees=None):
+ def cp_all(self, categories=None, trees=None, reverse=False):
"""
This returns a list of all keys in our tree or trees
@param categories: optional list of categories to search or
defaults to self.settings.categories
@param trees: optional list of trees to search the categories in or
defaults to self.porttrees
+ @param reverse: reverse sort order (default is False)
@rtype list of [cat/pkg,...]
"""
d = {}
@@ -652,7 +732,7 @@ class portdbapi(dbapi):
continue
d[atom.cp] = None
l = list(d)
- l.sort()
+ l.sort(reverse=reverse)
return l
def cp_list(self, mycp, use_cache=1, mytree=None):
@@ -827,8 +907,8 @@ class portdbapi(dbapi):
continue
try:
- pkg_str = _pkg_str(cpv, slot=metadata["SLOT"],
- repo=metadata["repository"], eapi=metadata["EAPI"])
+ pkg_str = _pkg_str(cpv, metadata=metadata,
+ settings=self.settings)
except InvalidData:
continue
@@ -966,19 +1046,16 @@ class portdbapi(dbapi):
return False
if settings._getMissingProperties(cpv, metadata):
return False
+ if settings._getMissingRestrict(cpv, metadata):
+ return False
except InvalidDependString:
return False
return True
-def close_portdbapi_caches():
- for i in portdbapi.portdbapi_instances:
- i.close_caches()
-
-portage.process.atexit_register(portage.portageexit)
-
class portagetree(object):
- def __init__(self, root=None, virtual=DeprecationWarning, settings=None):
+ def __init__(self, root=DeprecationWarning, virtual=DeprecationWarning,
+ settings=None):
"""
Constructor for a PortageTree
@@ -994,7 +1071,7 @@ class portagetree(object):
settings = portage.settings
self.settings = settings
- if root is not None and root != settings['ROOT']:
+ if root is not DeprecationWarning:
warnings.warn("The root parameter of the " + \
"portage.dbapi.porttree.portagetree" + \
" constructor is now unused. Use " + \
@@ -1062,10 +1139,8 @@ class portagetree(object):
"Get a slot for a catpkg; assume it exists."
myslot = ""
try:
- myslot = self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
- except SystemExit:
- raise
- except Exception:
+ myslot = self.dbapi._pkg_str(mycatpkg, None).slot
+ except KeyError:
pass
return myslot
@@ -1137,9 +1212,18 @@ def _parse_uri_map(cpv, metadata, use=None):
uri_set = uri_map.get(distfile)
if uri_set is None:
- uri_set = set()
+ # Use OrderedDict to preserve order from SRC_URI
+ # while ensuring uniqueness.
+ uri_set = OrderedDict()
uri_map[distfile] = uri_set
- uri_set.add(uri)
- uri = None
+
+ # SRC_URI may contain a file name with no scheme, and in
+ # this case it does not belong in uri_set.
+ if urlparse(uri).scheme:
+ uri_set[uri] = True
+
+ # Convert OrderedDicts to tuples.
+ for k, v in uri_map.items():
+ uri_map[k] = tuple(v)
return uri_map
diff --git a/pym/portage/dbapi/vartree.py b/pym/portage/dbapi/vartree.py
index ea62f6bcc..6417a561b 100644
--- a/pym/portage/dbapi/vartree.py
+++ b/pym/portage/dbapi/vartree.py
@@ -1,6 +1,8 @@
-# Copyright 1998-2012 Gentoo Foundation
+# Copyright 1998-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = [
"vardbapi", "vartree", "dblink"] + \
["write_contents", "tar_contents"]
@@ -11,8 +13,9 @@ portage.proxy.lazyimport.lazyimport(globals(),
'portage.data:portage_gid,portage_uid,secpass',
'portage.dbapi.dep_expand:dep_expand',
'portage.dbapi._MergeProcess:MergeProcess',
+ 'portage.dbapi._SyncfsProcess:SyncfsProcess',
'portage.dep:dep_getkey,isjustname,isvalidatom,match_from_list,' + \
- 'use_reduce,_get_slot_re',
+ 'use_reduce,_slot_separator,_repo_separator',
'portage.eapi:_get_eapi_attrs',
'portage.elog:collect_ebuild_messages,collect_messages,' + \
'elog_process,_merge_logentries',
@@ -22,7 +25,6 @@ portage.proxy.lazyimport.lazyimport(globals(),
'_merge_unicode_error', '_spawn_phase',
'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
'portage.package.ebuild._ipc.QueryCommand:QueryCommand',
- 'portage.update:fixdbentries',
'portage.util:apply_secpass_permissions,ConfigProtect,ensure_dirs,' + \
'writemsg,writemsg_level,write_atomic,atomic_ofstream,writedict,' + \
'grabdict,normalize_path,new_protect_filename',
@@ -30,17 +32,20 @@ portage.proxy.lazyimport.lazyimport(globals(),
'portage.util.env_update:env_update',
'portage.util.listdir:dircache,listdir',
'portage.util.movefile:movefile',
+ 'portage.util.writeable_check:get_ro_checker',
'portage.util._dyn_libs.PreservedLibsRegistry:PreservedLibsRegistry',
'portage.util._dyn_libs.LinkageMapELF:LinkageMapELF@LinkageMap',
+ 'portage.util._async.SchedulerInterface:SchedulerInterface',
+ 'portage.util._eventloop.EventLoop:EventLoop',
+ 'portage.util._eventloop.global_event_loop:global_event_loop',
'portage.versions:best,catpkgsplit,catsplit,cpv_getkey,vercmp,' + \
- '_pkgsplit@pkgsplit,_pkg_str',
+ '_get_slot_re,_pkgsplit@pkgsplit,_pkg_str,_unknown_repo',
'subprocess',
'tarfile',
)
from portage.const import CACHE_PATH, CONFIG_MEMORY_FILE, \
PORTAGE_PACKAGE_ATOM, PRIVATE_PATH, VDB_PATH
-from portage.const import _ENABLE_DYN_LINK_MAP, _ENABLE_PRESERVE_LIBS
from portage.dbapi import dbapi
from portage.exception import CommandNotFound, \
InvalidData, InvalidLocation, InvalidPackageName, \
@@ -61,7 +66,6 @@ from portage import _unicode_encode
from _emerge.EbuildBuildDir import EbuildBuildDir
from _emerge.EbuildPhase import EbuildPhase
from _emerge.emergelog import emergelog
-from _emerge.PollScheduler import PollScheduler
from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
from _emerge.SpawnProcess import SpawnProcess
@@ -73,6 +77,7 @@ import io
from itertools import chain
import logging
import os as _os
+import platform
import pwd
import re
import stat
@@ -88,6 +93,7 @@ except ImportError:
import pickle
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
long = int
_unicode = str
@@ -111,7 +117,8 @@ class vardbapi(dbapi):
_aux_cache_keys_re = re.compile(r'^NEEDED\..*$')
_aux_multi_line_re = re.compile(r'^(CONTENTS|NEEDED\..*)$')
- def __init__(self, _unused_param=None, categories=None, settings=None, vartree=None):
+ def __init__(self, _unused_param=DeprecationWarning,
+ categories=None, settings=None, vartree=None):
"""
The categories parameter is unused since the dbapi class
now has a categories property that is generated from the
@@ -141,11 +148,11 @@ class vardbapi(dbapi):
settings = portage.settings
self.settings = settings
- if _unused_param is not None and _unused_param != settings['ROOT']:
+ if _unused_param is not DeprecationWarning:
warnings.warn("The first parameter of the "
"portage.dbapi.vartree.vardbapi"
- " constructor is now unused. Use "
- "settings['ROOT'] instead.",
+ " constructor is now unused. Instead "
+ "settings['ROOT'] is used.",
DeprecationWarning, stacklevel=2)
self._eroot = settings['EROOT']
@@ -162,7 +169,7 @@ class vardbapi(dbapi):
self.vartree = vartree
self._aux_cache_keys = set(
["BUILD_TIME", "CHOST", "COUNTER", "DEPEND", "DESCRIPTION",
- "EAPI", "HOMEPAGE", "IUSE", "KEYWORDS",
+ "EAPI", "HDEPEND", "HOMEPAGE", "IUSE", "KEYWORDS",
"LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND",
"repository", "RESTRICT" , "SLOT", "USE", "DEFINED_PHASES",
])
@@ -172,15 +179,9 @@ class vardbapi(dbapi):
self._counter_path = os.path.join(self._eroot,
CACHE_PATH, "counter")
- self._plib_registry = None
- if _ENABLE_PRESERVE_LIBS:
- self._plib_registry = PreservedLibsRegistry(settings["ROOT"],
- os.path.join(self._eroot, PRIVATE_PATH,
- "preserved_libs_registry"))
-
- self._linkmap = None
- if _ENABLE_DYN_LINK_MAP:
- self._linkmap = LinkageMap(self)
+ self._plib_registry = PreservedLibsRegistry(settings["ROOT"],
+ os.path.join(self._eroot, PRIVATE_PATH, "preserved_libs_registry"))
+ self._linkmap = LinkageMap(self)
self._owners = self._owners_db(self)
self._cached_counter = None
@@ -368,7 +369,7 @@ class vardbapi(dbapi):
del e
write_atomic(os.path.join(newpath, "PF"), new_pf+"\n")
write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
- fixdbentries([mylist], newpath, eapi=mycpv.eapi)
+
return moves
def cp_list(self, mycp, use_cache=1):
@@ -376,7 +377,10 @@ class vardbapi(dbapi):
if mysplit[0] == '*':
mysplit[0] = mysplit[0][1:]
try:
- mystat = os.stat(self.getpath(mysplit[0])).st_mtime
+ if sys.hexversion >= 0x3030000:
+ mystat = os.stat(self.getpath(mysplit[0])).st_mtime_ns
+ else:
+ mystat = os.stat(self.getpath(mysplit[0])).st_mtime
except OSError:
mystat = 0
if use_cache and mycp in self.cpcache:
@@ -511,7 +515,10 @@ class vardbapi(dbapi):
return list(self._iter_match(mydep,
self.cp_list(mydep.cp, use_cache=use_cache)))
try:
- curmtime = os.stat(os.path.join(self._eroot, VDB_PATH, mycat)).st_mtime
+ if sys.hexversion >= 0x3030000:
+ curmtime = os.stat(os.path.join(self._eroot, VDB_PATH, mycat)).st_mtime_ns
+ else:
+ curmtime = os.stat(os.path.join(self._eroot, VDB_PATH, mycat)).st_mtime
except (IOError, OSError):
curmtime=0
@@ -566,31 +573,32 @@ class vardbapi(dbapi):
def _aux_cache_init(self):
aux_cache = None
open_kwargs = {}
- if sys.hexversion >= 0x3000000:
+ if sys.hexversion >= 0x3000000 and sys.hexversion < 0x3020000:
# Buffered io triggers extreme performance issues in
# Unpickler.load() (problem observed with python-3.0.1).
# Unfortunately, performance is still poor relative to
- # python-2.x, but buffering makes it much worse.
+ # python-2.x, but buffering makes it much worse (problem
+ # appears to be solved in Python >=3.2 at least).
open_kwargs["buffering"] = 0
try:
- f = open(_unicode_encode(self._aux_cache_filename,
+ with open(_unicode_encode(self._aux_cache_filename,
encoding=_encodings['fs'], errors='strict'),
- mode='rb', **open_kwargs)
- mypickle = pickle.Unpickler(f)
- try:
- mypickle.find_global = None
- except AttributeError:
- # TODO: If py3k, override Unpickler.find_class().
- pass
- aux_cache = mypickle.load()
- f.close()
- del f
- except (AttributeError, EOFError, EnvironmentError, ValueError, pickle.UnpicklingError) as e:
+ mode='rb', **open_kwargs) as f:
+ mypickle = pickle.Unpickler(f)
+ try:
+ mypickle.find_global = None
+ except AttributeError:
+ # TODO: If py3k, override Unpickler.find_class().
+ pass
+ aux_cache = mypickle.load()
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except Exception as e:
if isinstance(e, EnvironmentError) and \
getattr(e, 'errno', None) in (errno.ENOENT, errno.EACCES):
pass
else:
- writemsg(_unicode_decode(_("!!! Error loading '%s': %s\n")) % \
+ writemsg(_("!!! Error loading '%s': %s\n") % \
(self._aux_cache_filename, e), noiselevel=-1)
del e
@@ -710,7 +718,7 @@ class vardbapi(dbapi):
if _get_slot_re(eapi_attrs).match(mydata['SLOT']) is None:
# Empty or invalid slot triggers InvalidAtom exceptions when
# generating slot atoms for packages, so translate it to '0' here.
- mydata['SLOT'] = _unicode_decode('0')
+ mydata['SLOT'] = '0'
return [mydata[x] for x in wants]
@@ -735,21 +743,18 @@ class vardbapi(dbapi):
results[x] = st[stat.ST_MTIME]
continue
try:
- myf = io.open(
+ with io.open(
_unicode_encode(os.path.join(mydir, x),
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'],
- errors='replace')
- try:
- myd = myf.read()
- finally:
- myf.close()
+ errors='replace') as f:
+ myd = f.read()
except IOError:
if x not in self._aux_cache_keys and \
self._aux_cache_keys_re.match(x) is None:
env_keys.append(x)
continue
- myd = _unicode_decode('')
+ myd = ''
# Preserve \n for metadata that is known to
# contain multiple lines.
@@ -763,13 +768,13 @@ class vardbapi(dbapi):
for k in env_keys:
v = env_results.get(k)
if v is None:
- v = _unicode_decode('')
+ v = ''
if self._aux_multi_line_re.match(k) is None:
v = " ".join(v.split())
results[k] = v
if results.get("EAPI") == "":
- results[_unicode_decode("EAPI")] = _unicode_decode('0')
+ results["EAPI"] = '0'
return results
@@ -889,11 +894,17 @@ class vardbapi(dbapi):
del myroot
counter = -1
try:
- cfile = io.open(
+ with io.open(
_unicode_encode(self._counter_path,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'],
- errors='replace')
+ errors='replace') as f:
+ try:
+ counter = long(f.readline().strip())
+ except (OverflowError, ValueError) as e:
+ writemsg(_("!!! COUNTER file is corrupt: '%s'\n") %
+ self._counter_path, noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
except EnvironmentError as e:
# Silently allow ENOENT since files under
# /var/cache/ are allowed to disappear.
@@ -902,17 +913,6 @@ class vardbapi(dbapi):
self._counter_path, noiselevel=-1)
writemsg("!!! %s\n" % str(e), noiselevel=-1)
del e
- else:
- try:
- try:
- counter = long(cfile.readline().strip())
- finally:
- cfile.close()
- except (OverflowError, ValueError) as e:
- writemsg(_("!!! COUNTER file is corrupt: '%s'\n") % \
- self._counter_path, noiselevel=-1)
- writemsg("!!! %s\n" % str(e), noiselevel=-1)
- del e
if self._cached_counter == counter:
max_counter = counter
@@ -1004,16 +1004,31 @@ class vardbapi(dbapi):
relative_filename = filename[root_len:]
contents_key = pkg._match_contents(relative_filename)
if contents_key:
- del new_contents[contents_key]
+ # It's possible for two different paths to refer to the same
+ # contents_key, due to directory symlinks. Therefore, pass a
+ # default value to pop, in order to avoid a KeyError which
+ # could otherwise be triggered (see bug #454400).
+ new_contents.pop(contents_key, None)
removed += 1
if removed:
- self._bump_mtime(pkg.mycpv)
- f = atomic_ofstream(os.path.join(pkg.dbdir, "CONTENTS"))
- write_contents(new_contents, root, f)
- f.close()
- self._bump_mtime(pkg.mycpv)
- pkg._clear_contents_cache()
+ self.writeContentsToContentsFile(pkg, new_contents)
+
+ def writeContentsToContentsFile(self, pkg, new_contents):
+ """
+ @param pkg: package to write contents file for
+ @type pkg: dblink
+ @param new_contents: contents to write to CONTENTS file
+ @type new_contents: contents dictionary of the form
+ {u'/path/to/file' : (contents_attribute 1, ...), ...}
+ """
+ root = self.settings['ROOT']
+ self._bump_mtime(pkg.mycpv)
+ f = atomic_ofstream(os.path.join(pkg.dbdir, "CONTENTS"))
+ write_contents(new_contents, root, f)
+ f.close()
+ self._bump_mtime(pkg.mycpv)
+ pkg._clear_contents_cache()
class _owners_cache(object):
"""
@@ -1258,18 +1273,35 @@ class vardbapi(dbapi):
name = os.path.basename(path.rstrip(os.path.sep))
path_info_list.append((path, name, is_basename))
+ # Do work via the global event loop, so that it can be used
+ # for indication of progress during the search (bug #461412).
+ event_loop = (portage._internal_caller and
+ global_event_loop() or EventLoop(main=False))
root = self._vardb._eroot
- for cpv in self._vardb.cpv_all():
- dblnk = self._vardb._dblink(cpv)
+ def search_pkg(cpv):
+ dblnk = self._vardb._dblink(cpv)
for path, name, is_basename in path_info_list:
if is_basename:
for p in dblnk.getcontents():
if os.path.basename(p) == name:
- yield dblnk, p[len(root):]
+ search_pkg.results.append((dblnk, p[len(root):]))
else:
if dblnk.isowner(path):
- yield dblnk, path
+ search_pkg.results.append((dblnk, path))
+ search_pkg.complete = True
+ return False
+
+ search_pkg.results = []
+
+ for cpv in self._vardb.cpv_all():
+ del search_pkg.results[:]
+ search_pkg.complete = False
+ event_loop.idle_add(search_pkg, cpv)
+ while not search_pkg.complete:
+ event_loop.iteration()
+ for result in search_pkg.results:
+ yield result
class vartree(object):
"this tree will scan a var/db/pkg database located at root (passed to init)"
@@ -1390,7 +1422,7 @@ class vartree(object):
def getslot(self, mycatpkg):
"Get a slot for a catpkg; assume it exists."
try:
- return self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
+ return self.dbapi._pkg_str(mycatpkg, None).slot
except KeyError:
return ""
@@ -1483,11 +1515,16 @@ class dblink(object):
self._contents_inodes = None
self._contents_basenames = None
self._linkmap_broken = False
+ self._device_path_map = {}
self._hardlink_merge_map = {}
self._hash_key = (self._eroot, self.mycpv)
self._protect_obj = None
self._pipe = pipe
+ # When necessary, this attribute is modified for
+ # compliance with RESTRICT=preserve-libs.
+ self._preserve_libs = "preserve-libs" in mysettings.features
+
def __hash__(self):
return hash(self._hash_key)
@@ -1530,7 +1567,11 @@ class dblink(object):
"""
Remove this entry from the database
"""
- if not os.path.exists(self.dbdir):
+ try:
+ os.lstat(self.dbdir)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ENOTDIR, errno.ESTALE):
+ raise
return
# Check validity of self.dbdir before attempting to remove it.
@@ -1547,6 +1588,14 @@ class dblink(object):
pass
self.vartree.dbapi._remove(self)
+ # Use self.dbroot since we need an existing path for syncfs.
+ try:
+ self._merged_path(self.dbroot, os.lstat(self.dbroot))
+ except OSError:
+ pass
+
+ self._post_merge_sync()
+
def clearcontents(self):
"""
For a given db entry (self), erase the CONTENTS values.
@@ -1572,18 +1621,18 @@ class dblink(object):
return self.contentscache
pkgfiles = {}
try:
- myc = io.open(_unicode_encode(contents_file,
+ with io.open(_unicode_encode(contents_file,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'],
- errors='replace')
+ errors='replace') as f:
+ mylines = f.readlines()
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
del e
self.contentscache = pkgfiles
return pkgfiles
- mylines = myc.readlines()
- myc.close()
+
null_byte = "\0"
normalize_needed = self._normalize_needed
contents_re = self._contents_re
@@ -1598,7 +1647,7 @@ class dblink(object):
if myroot == os.path.sep:
myroot = None
# used to generate parent dir entries
- dir_entry = (_unicode_decode("dir"),)
+ dir_entry = ("dir",)
eroot_split_len = len(self.settings["EROOT"].split(os.sep)) - 1
pos = 0
errors = []
@@ -1698,8 +1747,11 @@ class dblink(object):
unmerge_preserve = \
self._find_libs_to_preserve(unmerge=True)
counter = self.vartree.dbapi.cpv_counter(self.mycpv)
- plib_registry.unregister(self.mycpv,
- self.settings["SLOT"], counter)
+ try:
+ slot = self.mycpv.slot
+ except AttributeError:
+ slot = _pkg_str(self.mycpv, slot=self.settings["SLOT"]).slot
+ plib_registry.unregister(self.mycpv, slot, counter)
if unmerge_preserve:
for path in sorted(unmerge_preserve):
contents_key = self._match_contents(path)
@@ -1709,7 +1761,7 @@ class dblink(object):
self._display_merge(_(">>> needed %s %s\n") % \
(obj_type, contents_key), noiselevel=-1)
plib_registry.register(self.mycpv,
- self.settings["SLOT"], counter, unmerge_preserve)
+ slot, counter, unmerge_preserve)
# Remove the preserved files from our contents
# so that they won't be unmerged.
self.vartree.dbapi.removeFromContents(self,
@@ -1779,7 +1831,8 @@ class dblink(object):
if self._scheduler is None:
# We create a scheduler instance and use it to
# log unmerge output separately from merge output.
- self._scheduler = PollScheduler().sched_iface
+ self._scheduler = SchedulerInterface(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False))
if self.settings.get("PORTAGE_BACKGROUND") == "subprocess":
if self.settings.get("PORTAGE_BACKGROUND_UNMERGE") == "1":
self.settings["PORTAGE_BACKGROUND"] = "1"
@@ -1804,7 +1857,7 @@ class dblink(object):
# done for this slot, so it shouldn't be repeated until the next
# replacement or unmerge operation.
if others_in_slot is None:
- slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
+ slot = self.vartree.dbapi._pkg_str(self.mycpv, None).slot
slot_matches = self.vartree.dbapi.match(
"%s:%s" % (portage.cpv_getkey(self.mycpv), slot))
others_in_slot = []
@@ -1848,13 +1901,17 @@ class dblink(object):
except UnsupportedAPIException as e:
eapi_unsupported = e
+ if self._preserve_libs and "preserve-libs" in \
+ self.settings["PORTAGE_RESTRICT"].split():
+ self._preserve_libs = False
+
builddir_lock = None
scheduler = self._scheduler
retval = os.EX_OK
try:
# Only create builddir_lock if the caller
# has not already acquired the lock.
- if "PORTAGE_BUILDIR_LOCKED" not in self.settings:
+ if "PORTAGE_BUILDDIR_LOCKED" not in self.settings:
builddir_lock = EbuildBuildDir(
scheduler=scheduler,
settings=self.settings)
@@ -1883,7 +1940,7 @@ class dblink(object):
showMessage(_("!!! FAILED prerm: %s\n") % \
os.path.join(self.dbdir, "EAPI"),
level=logging.ERROR, noiselevel=-1)
- showMessage(_unicode_decode("%s\n") % (eapi_unsupported,),
+ showMessage("%s\n" % (eapi_unsupported,),
level=logging.ERROR, noiselevel=-1)
elif os.path.isfile(myebuildpath):
phase = EbuildPhase(background=background,
@@ -2072,7 +2129,7 @@ class dblink(object):
if others_in_slot is None:
others_in_slot = []
- slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
+ slot = self.vartree.dbapi._pkg_str(self.mycpv, None).slot
slot_matches = self.vartree.dbapi.match(
"%s:%s" % (portage.cpv_getkey(self.mycpv), slot))
for cur_cpv in slot_matches:
@@ -2129,6 +2186,14 @@ class dblink(object):
self._eerror("postrm",
["Could not chmod or unlink '%s': %s" % \
(file_name, ose)])
+ else:
+
+ # Even though the file no longer exists, we log it
+ # here so that _unmerge_dirs can see that we've
+ # removed a file from this device, and will record
+ # the parent directory for a syncfs call.
+ self._merged_path(file_name, lstatobj, exists=False)
+
finally:
if bsd_chflags and pflags != 0:
# Restore the parent flags we saved before unlinking
@@ -2549,15 +2614,19 @@ class dblink(object):
raise
del e
show_unmerge("!!!", "", "obj", child)
+
try:
+ parent_name = os.path.dirname(obj)
+ parent_stat = os.stat(parent_name)
+
if bsd_chflags:
lstatobj = os.lstat(obj)
if lstatobj.st_flags != 0:
bsd_chflags.lchflags(obj, 0)
- parent_name = os.path.dirname(obj)
+
# Use normal stat/chflags for the parent since we want to
# follow any symlinks to the real parent directory.
- pflags = os.stat(parent_name).st_flags
+ pflags = parent_stat.st_flags
if pflags != 0:
bsd_chflags.chflags(parent_name, 0)
try:
@@ -2566,13 +2635,34 @@ class dblink(object):
if bsd_chflags and pflags != 0:
# Restore the parent flags we saved before unlinking
bsd_chflags.chflags(parent_name, pflags)
+
+ # Record the parent directory for use in syncfs calls.
+ # Note that we use a realpath and a regular stat here, since
+ # we want to follow any symlinks back to the real device where
+ # the real parent directory resides.
+ self._merged_path(os.path.realpath(parent_name), parent_stat)
+
show_unmerge("<<<", "", "dir", obj)
except EnvironmentError as e:
if e.errno not in ignored_rmdir_errnos:
raise
if e.errno != errno.ENOENT:
show_unmerge("---", unmerge_desc["!empty"], "dir", obj)
- del e
+
+ # Since we didn't remove this directory, record the directory
+ # itself for use in syncfs calls, if we have removed another
+ # file from the same device.
+ # Note that we use a realpath and a regular stat here, since
+ # we want to follow any symlinks back to the real device where
+ # the real directory resides.
+ try:
+ dir_stat = os.stat(obj)
+ except OSError:
+ pass
+ else:
+ if dir_stat.st_dev in self._device_path_map:
+ self._merged_path(os.path.realpath(obj), dir_stat)
+
else:
# When a directory is successfully removed, there's
# no need to protect symlinks that point to it.
@@ -2799,7 +2889,7 @@ class dblink(object):
self.vartree.dbapi._linkmap is None or \
self.vartree.dbapi._plib_registry is None or \
(not unmerge and self._installed_instance is None) or \
- "preserve-libs" not in self.settings.features:
+ not self._preserve_libs:
return set()
os = _os_merge
@@ -3383,7 +3473,10 @@ class dblink(object):
else:
logdir = os.path.join(self.settings["T"], "logging")
ebuild_logentries = collect_ebuild_messages(logdir)
- py_logentries = collect_messages(key=cpv).get(cpv, {})
+ # phasefilter is irrelevant for the above collect_ebuild_messages
+ # call, since this package instance has a private logdir. However,
+ # it may be relevant for the following collect_messages call.
+ py_logentries = collect_messages(key=cpv, phasefilter=phasefilter).get(cpv, {})
logentries = _merge_logentries(py_logentries, ebuild_logentries)
funcnames = {
"INFO": "einfo",
@@ -3404,7 +3497,9 @@ class dblink(object):
str_buffer.append(' '.join(fields))
str_buffer.append('\n')
if str_buffer:
- os.write(self._pipe, _unicode_encode(''.join(str_buffer)))
+ str_buffer = _unicode_encode(''.join(str_buffer))
+ while str_buffer:
+ str_buffer = str_buffer[os.write(self._pipe, str_buffer):]
def _emerge_log(self, msg):
emergelog(False, msg)
@@ -3415,6 +3510,8 @@ class dblink(object):
This function does the following:
+ calls get_ro_checker to retrieve a function for checking whether Portage
+ will write to a read-only filesystem, then runs it against the directory list
calls self._preserve_libs if FEATURES=preserve-libs
calls self._collision_protect if FEATURES=collision-protect
calls doebuild(mydo=pkg_preinst)
@@ -3462,6 +3559,7 @@ class dblink(object):
level=logging.ERROR, noiselevel=-1)
return 1
+ is_binpkg = self.settings.get("EMERGE_FROM") == "binary"
slot = ''
for var_name in ('CHOST', 'SLOT'):
if var_name == 'CHOST' and self.cat == 'virtual':
@@ -3471,22 +3569,18 @@ class dblink(object):
pass
continue
- f = None
try:
- f = io.open(_unicode_encode(
+ with io.open(_unicode_encode(
os.path.join(inforoot, var_name),
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'],
- errors='replace')
- val = f.readline().strip()
+ errors='replace') as f:
+ val = f.readline().strip()
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
del e
val = ''
- finally:
- if f is not None:
- f.close()
if var_name == 'SLOT':
slot = val
@@ -3499,7 +3593,9 @@ class dblink(object):
return 1
write_atomic(os.path.join(inforoot, var_name), slot + '\n')
- if val != self.settings.get(var_name, ''):
+ # This check only applies when built from source, since
+ # inforoot values are written just after src_install.
+ if not is_binpkg and val != self.settings.get(var_name, ''):
self._eqawarn('preinst',
[_("QA Notice: Expected %(var_name)s='%(expected_value)s', got '%(actual_value)s'\n") % \
{"var_name":var_name, "expected_value":self.settings.get(var_name, ''), "actual_value":val}])
@@ -3517,27 +3613,40 @@ class dblink(object):
cp = self.mysplit[0]
slot_atom = "%s:%s" % (cp, slot)
- # filter any old-style virtual matches
- slot_matches = [cpv for cpv in self.vartree.dbapi.match(slot_atom) \
- if cpv_getkey(cpv) == cp]
-
- if self.mycpv not in slot_matches and \
- self.vartree.dbapi.cpv_exists(self.mycpv):
- # handle multislot or unapplied slotmove
- slot_matches.append(self.mycpv)
-
- others_in_slot = []
- from portage import config
- for cur_cpv in slot_matches:
- # Clone the config in case one of these has to be unmerged since
- # we need it to have private ${T} etc... for things like elog.
- settings_clone = config(clone=self.settings)
- settings_clone.pop("PORTAGE_BUILDIR_LOCKED", None)
- settings_clone.reset()
- others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
- settings=settings_clone,
- vartree=self.vartree, treetype="vartree",
- scheduler=self._scheduler, pipe=self._pipe))
+ self.lockdb()
+ try:
+ # filter any old-style virtual matches
+ slot_matches = [cpv for cpv in self.vartree.dbapi.match(slot_atom)
+ if cpv_getkey(cpv) == cp]
+
+ if self.mycpv not in slot_matches and \
+ self.vartree.dbapi.cpv_exists(self.mycpv):
+ # handle multislot or unapplied slotmove
+ slot_matches.append(self.mycpv)
+
+ others_in_slot = []
+ for cur_cpv in slot_matches:
+ # Clone the config in case one of these has to be unmerged,
+ # since we need it to have private ${T} etc... for things
+ # like elog.
+ settings_clone = portage.config(clone=self.settings)
+ settings_clone.pop("PORTAGE_BUILDDIR_LOCKED", None)
+ settings_clone.setcpv(cur_cpv, mydb=self.vartree.dbapi)
+ if self._preserve_libs and "preserve-libs" in \
+ settings_clone["PORTAGE_RESTRICT"].split():
+ self._preserve_libs = False
+ others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
+ settings=settings_clone,
+ vartree=self.vartree, treetype="vartree",
+ scheduler=self._scheduler, pipe=self._pipe))
+ finally:
+ self.unlockdb()
+
+ # If any instance has RESTRICT=preserve-libs, then
+ # restrict it for all instances.
+ if not self._preserve_libs:
+ for dblnk in others_in_slot:
+ dblnk._preserve_libs = False
retval = self._security_check(others_in_slot)
if retval:
@@ -3579,8 +3688,9 @@ class dblink(object):
unicode_error = False
eagain_error = False
- myfilelist = []
- mylinklist = []
+ filelist = []
+ dirlist = []
+ linklist = []
paths_with_newlines = []
def onerror(e):
raise
@@ -3612,6 +3722,9 @@ class dblink(object):
unicode_errors.append(new_parent[ed_len:])
break
+ relative_path = parent[srcroot_len:]
+ dirlist.append(os.path.join("/", relative_path))
+
for fname in files:
try:
fname = _unicode_decode(fname,
@@ -3641,12 +3754,19 @@ class dblink(object):
file_mode = os.lstat(fpath).st_mode
if stat.S_ISREG(file_mode):
- myfilelist.append(relative_path)
+ filelist.append(relative_path)
elif stat.S_ISLNK(file_mode):
# Note: os.walk puts symlinks to directories in the "dirs"
# list and it does not traverse them since that could lead
# to an infinite recursion loop.
- mylinklist.append(relative_path)
+ linklist.append(relative_path)
+
+ myto = _unicode_decode(
+ _os.readlink(_unicode_encode(fpath,
+ encoding=_encodings['merge'], errors='strict')),
+ encoding=_encodings['merge'], errors='replace')
+ if line_ending_re.search(myto) is not None:
+ paths_with_newlines.append(relative_path)
if unicode_error:
break
@@ -3674,7 +3794,7 @@ class dblink(object):
# If there are no files to merge, and an installed package in the same
# slot has files, it probably means that something went wrong.
if self.settings.get("PORTAGE_PACKAGE_EMPTY_ABORT") == "1" and \
- not myfilelist and not mylinklist and others_in_slot:
+ not filelist and not linklist and others_in_slot:
installed_files = None
for other_dblink in others_in_slot:
installed_files = other_dblink.getcontents()
@@ -3699,7 +3819,7 @@ class dblink(object):
_("Manually run `emerge --unmerge =%s` if you "
"really want to remove the above files. Set "
"PORTAGE_PACKAGE_EMPTY_ABORT=\"0\" in "
- "/etc/make.conf if you do not want to "
+ "/etc/portage/make.conf if you do not want to "
"abort in cases like this.") % other_dblink.mycpv,
wrap_width))
eerror(msg)
@@ -3717,13 +3837,38 @@ class dblink(object):
for other in others_in_slot])
prepare_build_dirs(settings=self.settings, cleanup=cleanup)
+ # Check for read-only filesystems.
+ ro_checker = get_ro_checker()
+ rofilesystems = ro_checker(dirlist)
+
+ if rofilesystems:
+ msg = _("One or more files installed to this package are "
+ "set to be installed to read-only filesystems. "
+ "Please mount the following filesystems as read-write "
+ "and retry.")
+ msg = textwrap.wrap(msg, 70)
+ msg.append("")
+ for f in rofilesystems:
+ msg.append("\t%s" % os.path.join(destroot,
+ f.lstrip(os.path.sep)))
+ msg.append("")
+ self._elog("eerror", "preinst", msg)
+
+ msg = _("Package '%s' NOT merged due to read-only file systems.") % \
+ self.settings.mycpv
+ msg += _(" If necessary, refer to your elog "
+ "messages for the whole content of the above message.")
+ msg = textwrap.wrap(msg, 70)
+ eerror(msg)
+ return 1
+
# check for package collisions
blockers = self._blockers
if blockers is None:
blockers = []
collisions, symlink_collisions, plib_collisions = \
self._collision_protect(srcroot, destroot,
- others_in_slot + blockers, myfilelist, mylinklist)
+ others_in_slot + blockers, filelist, linklist)
if symlink_collisions:
# Symlink collisions need to be distinguished from other types
@@ -3765,7 +3910,9 @@ class dblink(object):
" enough information to determine if a real problem"
" exists. Please do NOT file a bug report at"
" http://bugs.gentoo.org unless you report exactly which"
- " two packages install the same file(s). Once again,"
+ " two packages install the same file(s). See"
+ " http://wiki.gentoo.org/wiki/Knowledge_Base:Blockers"
+ " for tips on how to solve the problem. And once again,"
" please do NOT file a bug report unless you have"
" completely understood the above message.")
@@ -3800,17 +3947,28 @@ class dblink(object):
# get_owners is slow for large numbers of files, so
# don't look them all up.
collisions = collisions[:20]
+
+ pkg_info_strs = {}
self.lockdb()
try:
owners = self.vartree.dbapi._owners.get_owners(collisions)
self.vartree.dbapi.flush_cache()
+
+ for pkg in owners:
+ pkg = self.vartree.dbapi._pkg_str(pkg.mycpv, None)
+ pkg_info_str = "%s%s%s" % (pkg,
+ _slot_separator, pkg.slot)
+ if pkg.repo != _unknown_repo:
+ pkg_info_str += "%s%s" % (_repo_separator,
+ pkg.repo)
+ pkg_info_strs[pkg] = pkg_info_str
+
finally:
self.unlockdb()
for pkg, owned_files in owners.items():
- cpv = pkg.mycpv
msg = []
- msg.append("%s" % cpv)
+ msg.append(pkg_info_strs[pkg.mycpv])
for f in sorted(owned_files):
msg.append("\t%s" % os.path.join(destroot,
f.lstrip(os.path.sep)))
@@ -3901,12 +4059,11 @@ class dblink(object):
# write local package counter for recording
if counter is None:
counter = self.vartree.dbapi.counter_tick(mycpv=self.mycpv)
- f = io.open(_unicode_encode(os.path.join(self.dbtmpdir, 'COUNTER'),
+ with io.open(_unicode_encode(os.path.join(self.dbtmpdir, 'COUNTER'),
encoding=_encodings['fs'], errors='strict'),
mode='w', encoding=_encodings['repo.content'],
- errors='backslashreplace')
- f.write(_unicode_decode(str(counter)))
- f.close()
+ errors='backslashreplace') as f:
+ f.write("%s" % counter)
self.updateprotect()
@@ -4031,6 +4188,7 @@ class dblink(object):
try:
self.delete()
_movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
+ self._merged_path(self.dbpkgdir, os.lstat(self.dbpkgdir))
finally:
self.unlockdb()
@@ -4075,9 +4233,9 @@ class dblink(object):
self.vartree.dbapi.lock()
try:
try:
- slot, counter = self.vartree.dbapi.aux_get(
- cpv, ["SLOT", "COUNTER"])
- except KeyError:
+ slot = self.vartree.dbapi._pkg_str(cpv, None).slot
+ counter = self.vartree.dbapi.cpv_counter(cpv)
+ except (KeyError, InvalidData):
pass
else:
has_vdb_entry = True
@@ -4146,6 +4304,7 @@ class dblink(object):
# For gcc upgrades, preserved libs have to be removed after the
# the library path has been updated.
self._prune_plib_registry()
+ self._post_merge_sync()
return os.EX_OK
@@ -4161,7 +4320,7 @@ class dblink(object):
x = -1
while True:
x += 1
- backup_p = p + '.backup.' + str(x).rjust(4, '0')
+ backup_p = '%s.backup.%04d' % (p, x)
try:
os.lstat(backup_p)
except OSError:
@@ -4262,8 +4421,9 @@ class dblink(object):
@type stufftomerge: String or List
@param cfgfiledict: { File:mtime } mapping for config_protected files
@type cfgfiledict: Dictionary
- @param thismtime: The current time (typically long(time.time())
- @type thismtime: Long
+ @param thismtime: None or new mtime for merged files (expressed in seconds
+ in Python <3.3 and nanoseconds in Python >=3.3)
+ @type thismtime: None or Int
@rtype: None or Boolean
@return:
1. True on failure
@@ -4288,18 +4448,18 @@ class dblink(object):
# this is supposed to merge a list of files. There will be 2 forms of argument passing.
if isinstance(stufftomerge, basestring):
#A directory is specified. Figure out protection paths, listdir() it and process it.
- mergelist = os.listdir(join(srcroot, stufftomerge))
- offset = stufftomerge
+ mergelist = [join(stufftomerge, child) for child in \
+ os.listdir(join(srcroot, stufftomerge))]
else:
- mergelist = stufftomerge
- offset = ""
+ mergelist = stufftomerge[:]
- for i, x in enumerate(mergelist):
+ while mergelist:
- mysrc = join(srcroot, offset, x)
- mydest = join(destroot, offset, x)
+ relative_path = mergelist.pop()
+ mysrc = join(srcroot, relative_path)
+ mydest = join(destroot, relative_path)
# myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
- myrealdest = join(sep, offset, x)
+ myrealdest = join(sep, relative_path)
# stat file once, test using S_* macros many times (faster that way)
mystat = os.lstat(mysrc)
mymode = mystat[stat.ST_MODE]
@@ -4394,9 +4554,26 @@ class dblink(object):
mymtime = movefile(mysrc, mydest, newmtime=thismtime,
sstat=mystat, mysettings=self.settings,
encoding=_encodings['merge'])
+
+ try:
+ self._merged_path(mydest, os.lstat(mydest))
+ except OSError:
+ pass
+
if mymtime != None:
+ # Use lexists, since if the target happens to be a broken
+ # symlink then that should trigger an independent warning.
+ if not (os.path.lexists(myrealto) or
+ os.path.lexists(join(srcroot, myabsto))):
+ self._eqawarn('preinst',
+ [_("QA Notice: Symbolic link /%s points to /%s which does not exist.")
+ % (relative_path, myabsto)])
+
showMessage(">>> %s -> %s\n" % (mydest, myto))
- outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
+ if sys.hexversion >= 0x3030000:
+ outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime // 1000000000)+"\n")
+ else:
+ outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
else:
showMessage(_("!!! Failed to move file.\n"),
level=logging.ERROR, noiselevel=-1)
@@ -4490,11 +4667,17 @@ class dblink(object):
os.chmod(mydest, mystat[0])
os.chown(mydest, mystat[4], mystat[5])
showMessage(">>> %s/\n" % mydest)
+
+ try:
+ self._merged_path(mydest, os.lstat(mydest))
+ except OSError:
+ pass
+
outfile.write("dir "+myrealdest+"\n")
# recurse and merge this directory
- if self.mergeme(srcroot, destroot, outfile, secondhand,
- join(offset, x), cfgfiledict, thismtime):
- return 1
+ mergelist.extend(join(relative_path, child) for child in
+ os.listdir(join(srcroot, relative_path)))
+
elif stat.S_ISREG(mymode):
# we are merging a regular file
mymd5 = perform_md5(mysrc, calc_prelink=calc_prelink)
@@ -4550,7 +4733,10 @@ class dblink(object):
cfgprot = cfgfiledict["IGNORE"]
if not moveme:
zing = "---"
- mymtime = mystat[stat.ST_MTIME]
+ if sys.hexversion >= 0x3030000:
+ mymtime = mystat.st_mtime_ns
+ else:
+ mymtime = mystat[stat.ST_MTIME]
else:
moveme = 1
cfgprot = 1
@@ -4586,8 +4772,16 @@ class dblink(object):
hardlink_candidates.append(mydest)
zing = ">>>"
+ try:
+ self._merged_path(mydest, os.lstat(mydest))
+ except OSError:
+ pass
+
if mymtime != None:
- outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
+ if sys.hexversion >= 0x3030000:
+ outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime // 1000000000)+"\n")
+ else:
+ outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
showMessage("%s %s\n" % (zing,mydest))
else:
# we are merging a fifo or device node
@@ -4598,6 +4792,12 @@ class dblink(object):
sstat=mystat, mysettings=self.settings,
encoding=_encodings['merge']) is not None:
zing = ">>>"
+
+ try:
+ self._merged_path(mydest, os.lstat(mydest))
+ except OSError:
+ pass
+
else:
return 1
if stat.S_ISFIFO(mymode):
@@ -4606,6 +4806,52 @@ class dblink(object):
outfile.write("dev %s\n" % myrealdest)
showMessage(zing + " " + mydest + "\n")
+ def _merged_path(self, path, lstatobj, exists=True):
+ previous_path = self._device_path_map.get(lstatobj.st_dev)
+ if previous_path is None or previous_path is False or \
+ (exists and len(path) < len(previous_path)):
+ if exists:
+ self._device_path_map[lstatobj.st_dev] = path
+ else:
+ # This entry is used to indicate that we've unmerged
+ # a file from this device, and later, this entry is
+ # replaced by a parent directory.
+ self._device_path_map[lstatobj.st_dev] = False
+
+ def _post_merge_sync(self):
+ """
+ Call this after merge or unmerge, in order to sync relevant files to
+ disk and avoid data-loss in the event of a power failure. This method
+ does nothing if FEATURES=merge-sync is disabled.
+ """
+ if not self._device_path_map or \
+ "merge-sync" not in self.settings.features:
+ return
+
+ returncode = None
+ if platform.system() == "Linux":
+
+ paths = []
+ for path in self._device_path_map.values():
+ if path is not False:
+ paths.append(path)
+ paths = tuple(paths)
+
+ proc = SyncfsProcess(paths=paths,
+ scheduler=(self._scheduler or
+ portage._internal_caller and global_event_loop() or
+ EventLoop(main=False)))
+ proc.start()
+ returncode = proc.wait()
+
+ if returncode is None or returncode != os.EX_OK:
+ try:
+ proc = subprocess.Popen(["sync"])
+ except EnvironmentError:
+ pass
+ else:
+ proc.wait()
+
def merge(self, mergeroot, inforoot, myroot=None, myebuild=None, cleanup=0,
mydbapi=None, prev_mtimes=None, counter=None):
"""
@@ -4618,7 +4864,8 @@ class dblink(object):
self.lockdb()
self.vartree.dbapi._bump_mtime(self.mycpv)
if self._scheduler is None:
- self._scheduler = PollScheduler().sched_iface
+ self._scheduler = SchedulerInterface(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False))
try:
retval = self.treewalk(mergeroot, myroot, inforoot, myebuild,
cleanup=cleanup, mydbapi=mydbapi, prev_mtimes=prev_mtimes,
@@ -4669,11 +4916,12 @@ class dblink(object):
"returns contents of a file with whitespace converted to spaces"
if not os.path.exists(self.dbdir+"/"+name):
return ""
- mydata = io.open(
+ with io.open(
_unicode_encode(os.path.join(self.dbdir, name),
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'], errors='replace'
- ).read().split()
+ ) as f:
+ mydata = f.read().split()
return " ".join(mydata)
def copyfile(self,fname):
@@ -4682,10 +4930,11 @@ class dblink(object):
def getfile(self,fname):
if not os.path.exists(self.dbdir+"/"+fname):
return ""
- return io.open(_unicode_encode(os.path.join(self.dbdir, fname),
+ with io.open(_unicode_encode(os.path.join(self.dbdir, fname),
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'], errors='replace'
- ).read()
+ ) as f:
+ return f.read()
def setfile(self,fname,data):
kwargs = {}
@@ -4694,16 +4943,18 @@ class dblink(object):
else:
kwargs['mode'] = 'w'
kwargs['encoding'] = _encodings['repo.content']
- write_atomic(os.path.join(self.dbdir, fname), data, **kwargs)
+ write_atomic(os.path.join(self.dbdir, fname), data,
+ **portage._native_kwargs(kwargs))
def getelements(self,ename):
if not os.path.exists(self.dbdir+"/"+ename):
return []
- mylines = io.open(_unicode_encode(
+ with io.open(_unicode_encode(
os.path.join(self.dbdir, ename),
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'], errors='replace'
- ).readlines()
+ ) as f:
+ mylines = f.readlines()
myreturn = []
for x in mylines:
for y in x[:-1].split():
@@ -4711,14 +4962,13 @@ class dblink(object):
return myreturn
def setelements(self,mylist,ename):
- myelement = io.open(_unicode_encode(
+ with io.open(_unicode_encode(
os.path.join(self.dbdir, ename),
encoding=_encodings['fs'], errors='strict'),
mode='w', encoding=_encodings['repo.content'],
- errors='backslashreplace')
- for x in mylist:
- myelement.write(_unicode_decode(x+"\n"))
- myelement.close()
+ errors='backslashreplace') as f:
+ for x in mylist:
+ f.write("%s\n" % x)
def isregular(self):
"Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
@@ -4787,7 +5037,7 @@ class dblink(object):
def merge(mycat, mypkg, pkgloc, infloc,
myroot=None, settings=None, myebuild=None,
mytree=None, mydbapi=None, vartree=None, prev_mtimes=None, blockers=None,
- scheduler=None):
+ scheduler=None, fd_pipes=None):
"""
@param myroot: ignored, settings['EROOT'] is used instead
"""
@@ -4802,10 +5052,12 @@ def merge(mycat, mypkg, pkgloc, infloc,
merge_task = MergeProcess(
mycat=mycat, mypkg=mypkg, settings=settings,
treetype=mytree, vartree=vartree,
- scheduler=(scheduler or PollScheduler().sched_iface),
+ scheduler=(scheduler or portage._internal_caller and
+ global_event_loop() or EventLoop(main=False)),
background=background, blockers=blockers, pkgloc=pkgloc,
infloc=infloc, myebuild=myebuild, mydbapi=mydbapi,
- prev_mtimes=prev_mtimes, logfile=settings.get('PORTAGE_LOG_FILE'))
+ prev_mtimes=prev_mtimes, logfile=settings.get('PORTAGE_LOG_FILE'),
+ fd_pipes=fd_pipes)
merge_task.start()
retcode = merge_task.wait()
return retcode
@@ -4985,13 +5237,11 @@ def tar_contents(contents, root, tar, protect=None, onProgress=None):
tar.addfile(tarinfo, f)
f.close()
else:
- f = open(_unicode_encode(path,
+ with open(_unicode_encode(path,
encoding=encoding,
- errors='strict'), 'rb')
- try:
+ errors='strict'), 'rb') as f:
tar.addfile(tarinfo, f)
- finally:
- f.close()
+
else:
tar.addfile(tarinfo)
if onProgress:
diff --git a/pym/portage/dbapi/virtual.py b/pym/portage/dbapi/virtual.py
index 213708c93..ba9745c2a 100644
--- a/pym/portage/dbapi/virtual.py
+++ b/pym/portage/dbapi/virtual.py
@@ -1,6 +1,7 @@
-# Copyright 1998-2012 Gentoo Foundation
+# Copyright 1998-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
from portage.dbapi import dbapi
from portage.dbapi.dep_expand import dep_expand
@@ -89,8 +90,8 @@ class fakedbapi(dbapi):
if metadata is None:
mycpv = _pkg_str(mycpv)
else:
- mycpv = _pkg_str(mycpv, slot=metadata.get('SLOT'),
- repo=metadata.get('repository'), eapi=metadata.get('EAPI'))
+ mycpv = _pkg_str(mycpv, metadata=metadata,
+ settings=self.settings)
mycp = mycpv.cp
try:
diff --git a/pym/portage/debug.py b/pym/portage/debug.py
index ebf1a138a..d5a8cfbf6 100644
--- a/pym/portage/debug.py
+++ b/pym/portage/debug.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import os
@@ -38,7 +38,7 @@ class trace_handler(object):
self.max_repr_length = 200
def event_handler(self, *args):
- frame, event, arg = args
+ frame, event, _arg = args
if "line" == event:
if self.show_local_lines:
self.trace_line(*args)
@@ -56,7 +56,7 @@ class trace_handler(object):
self.arg_repr(frame, event, arg),
self.locals_repr(frame, event, arg)))
- def arg_repr(self, frame, event, arg):
+ def arg_repr(self, _frame, event, arg):
my_repr = None
if "return" == event:
my_repr = repr(arg)
@@ -71,7 +71,7 @@ class trace_handler(object):
return ""
- def trace_line(self, frame, event, arg):
+ def trace_line(self, frame, _event, _arg):
writemsg("%s line=%d\n" % (self.trim_filename(frame.f_code.co_filename), frame.f_lineno))
def ignore_filename(self, filename):
@@ -81,7 +81,7 @@ class trace_handler(object):
return True
return False
- def locals_repr(self, frame, event, arg):
+ def locals_repr(self, frame, _event, _arg):
"""Create a representation of the locals dict that is suitable for
tracing output."""
diff --git a/pym/portage/dep/__init__.py b/pym/portage/dep/__init__.py
index e547debd4..c457df045 100644
--- a/pym/portage/dep/__init__.py
+++ b/pym/portage/dep/__init__.py
@@ -1,7 +1,9 @@
# deps.py -- Portage dependency resolution functions
-# Copyright 2003-2012 Gentoo Foundation
+# Copyright 2003-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = [
'Atom', 'best_match_to_list', 'cpvequal',
'dep_getcpv', 'dep_getkey', 'dep_getslot',
@@ -27,26 +29,21 @@ from portage.eapi import _get_eapi_attrs
from portage.exception import InvalidAtom, InvalidData, InvalidDependString
from portage.localization import _
from portage.versions import catpkgsplit, catsplit, \
- vercmp, ververify, _cp, _cpv, _pkg_str, _unknown_repo
+ vercmp, ververify, _cp, _cpv, _pkg_str, _slot, _unknown_repo, _vr
import portage.cache.mappings
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
_unicode = str
else:
_unicode = unicode
-# Api consumers included in portage should set this to True.
-# Once the relevant api changes are in a portage release with
-# stable keywords, make these warnings unconditional.
-_internal_warnings = False
-
# \w is [a-zA-Z0-9_]
# PMS 3.1.3: A slot name may contain any of the characters [A-Za-z0-9+_.-].
# It must not begin with a hyphen or a dot.
_slot_separator = ":"
-_slot = r'([\w+][\w+.-]*)'
# loosly match SLOT, which may have an optional ABI part
_slot_loose = r'([\w+./*=-]+)'
@@ -55,51 +52,34 @@ _op = r'([=~]|[><]=?)'
_repo_separator = "::"
_repo_name = r'[\w][\w-]*'
+_repo_name_re = re.compile('^' + _repo_name + '$', re.UNICODE)
_repo = r'(?:' + _repo_separator + '(' + _repo_name + ')' + ')?'
_extended_cat = r'[\w+*][\w+.*-]*'
-_slot_re_cache = {}
-
-def _get_slot_re(eapi_attrs):
- cache_key = eapi_attrs.slot_abi
- slot_re = _slot_re_cache.get(cache_key)
- if slot_re is not None:
- return slot_re
-
- if eapi_attrs.slot_abi:
- slot_re = _slot + r'(/' + _slot + r'=?)?'
- else:
- slot_re = _slot
-
- slot_re = re.compile('^' + slot_re + '$', re.VERBOSE)
-
- _slot_re_cache[cache_key] = slot_re
- return slot_re
-
_slot_dep_re_cache = {}
def _get_slot_dep_re(eapi_attrs):
- cache_key = eapi_attrs.slot_abi
+ cache_key = eapi_attrs.slot_operator
slot_re = _slot_dep_re_cache.get(cache_key)
if slot_re is not None:
return slot_re
- if eapi_attrs.slot_abi:
+ if eapi_attrs.slot_operator:
slot_re = _slot + r'?(\*|=|/' + _slot + r'=?)?'
else:
slot_re = _slot
- slot_re = re.compile('^' + slot_re + '$', re.VERBOSE)
+ slot_re = re.compile('^' + slot_re + '$', re.VERBOSE | re.UNICODE)
_slot_dep_re_cache[cache_key] = slot_re
return slot_re
def _match_slot(atom, pkg):
if pkg.slot == atom.slot:
- if not atom.slot_abi:
+ if not atom.sub_slot:
return True
- elif atom.slot_abi == pkg.slot_abi:
+ elif atom.sub_slot == pkg.sub_slot:
return True
return False
@@ -123,7 +103,7 @@ def _get_atom_re(eapi_attrs):
'(?P<star>=' + cpv_re + r'\*)|' +
'(?P<simple>' + cp_re + '))' +
'(' + _slot_separator + _slot_loose + ')?' +
- _repo + ')(' + _use + ')?$', re.VERBOSE)
+ _repo + ')(' + _use + ')?$', re.VERBOSE | re.UNICODE)
_atom_re_cache[cache_key] = atom_re
return atom_re
@@ -142,10 +122,10 @@ def _get_atom_wildcard_re(eapi_attrs):
pkg_re = r'[\w+*][\w+*-]*?'
atom_re = re.compile(r'((?P<simple>(' +
- _extended_cat + r')/(' + pkg_re + r'))' + \
- '|(?P<star>=((' + _extended_cat + r')/(' + pkg_re + r'))-(?P<version>\*\d+\*)))' + \
+ _extended_cat + r')/(' + pkg_re + r'(-' + _vr + ')?))' + \
+ '|(?P<star>=((' + _extended_cat + r')/(' + pkg_re + r'))-(?P<version>\*\w+\*)))' + \
'(:(?P<slot>' + _slot_loose + r'))?(' +
- _repo_separator + r'(?P<repo>' + _repo_name + r'))?$')
+ _repo_separator + r'(?P<repo>' + _repo_name + r'))?$', re.UNICODE)
_atom_wildcard_re_cache[cache_key] = atom_re
return atom_re
@@ -259,7 +239,7 @@ def strip_empty(myarr):
('portage.dep.strip_empty',), DeprecationWarning, stacklevel=2)
return [x for x in myarr if x]
-def paren_reduce(mystr):
+def paren_reduce(mystr, _deprecation_warn=True):
"""
Take a string and convert all paren enclosed entities into sublists and
split the list elements by spaces. All redundant brackets are removed.
@@ -273,7 +253,7 @@ def paren_reduce(mystr):
@rtype: Array
@return: The reduced string in an array
"""
- if _internal_warnings:
+ if portage._internal_caller and _deprecation_warn:
warnings.warn(_("%s is deprecated and will be removed without replacement.") % \
('portage.dep.paren_reduce',), DeprecationWarning, stacklevel=2)
mysplit = mystr.split()
@@ -365,7 +345,7 @@ class paren_normalize(list):
"""Take a dependency structure as returned by paren_reduce or use_reduce
and generate an equivalent structure that has no redundant lists."""
def __init__(self, src):
- if _internal_warnings:
+ if portage._internal_caller:
warnings.warn(_("%s is deprecated and will be removed without replacement.") % \
('portage.dep.paren_normalize',), DeprecationWarning, stacklevel=2)
list.__init__(self)
@@ -461,7 +441,7 @@ def use_reduce(depstr, uselist=[], masklist=[], matchall=False, excludeall=[], i
@return: The use reduced depend array
"""
if isinstance(depstr, list):
- if _internal_warnings:
+ if portage._internal_caller:
warnings.warn(_("Passing paren_reduced dep arrays to %s is deprecated. " + \
"Pass the original dep string instead.") % \
('portage.dep.use_reduce',), DeprecationWarning, stacklevel=2)
@@ -762,7 +742,7 @@ def dep_opconvert(deplist):
@return:
The new list with the new ordering
"""
- if _internal_warnings:
+ if portage._internal_caller:
warnings.warn(_("%s is deprecated. Use %s with the opconvert parameter set to True instead.") % \
('portage.dep.dep_opconvert', 'portage.dep.use_reduce'), DeprecationWarning, stacklevel=2)
@@ -793,7 +773,7 @@ def flatten(mylist):
@rtype: List
@return: A single list containing only non-list elements.
"""
- if _internal_warnings:
+ if portage._internal_caller:
warnings.warn(_("%s is deprecated and will be removed without replacement.") % \
('portage.dep.flatten',), DeprecationWarning, stacklevel=2)
@@ -1233,11 +1213,14 @@ class Atom(_unicode):
if allow_repo is None:
allow_repo = True
+ blocker_prefix = ""
if "!" == s[:1]:
blocker = self._blocker(forbid_overlap=("!" == s[1:2]))
if blocker.overlap.forbid:
+ blocker_prefix = s[:2]
s = s[2:]
else:
+ blocker_prefix = s[:1]
s = s[1:]
else:
blocker = False
@@ -1261,6 +1244,8 @@ class Atom(_unicode):
else:
op = None
cpv = cp = m.group('simple')
+ if m.group(atom_re.groupindex['simple'] + 3) is not None:
+ raise InvalidAtom(self)
if cpv.find("**") != -1:
raise InvalidAtom(self)
slot = m.group('slot')
@@ -1311,32 +1296,34 @@ class Atom(_unicode):
self.__dict__['repo'] = repo
if slot is None:
self.__dict__['slot'] = None
- self.__dict__['slot_abi'] = None
- self.__dict__['slot_abi_op'] = None
+ self.__dict__['sub_slot'] = None
+ self.__dict__['slot_operator'] = None
else:
slot_re = _get_slot_dep_re(eapi_attrs)
slot_match = slot_re.match(slot)
if slot_match is None:
raise InvalidAtom(self)
- if eapi_attrs.slot_abi:
+ if eapi_attrs.slot_operator:
self.__dict__['slot'] = slot_match.group(1)
- slot_abi = slot_match.group(2)
- if slot_abi is not None:
- slot_abi = slot_abi.lstrip("/")
- if slot_abi in ("*", "="):
- self.__dict__['slot_abi'] = None
- self.__dict__['slot_abi_op'] = slot_abi
+ sub_slot = slot_match.group(2)
+ if sub_slot is not None:
+ sub_slot = sub_slot.lstrip("/")
+ if sub_slot in ("*", "="):
+ self.__dict__['sub_slot'] = None
+ self.__dict__['slot_operator'] = sub_slot
else:
- slot_abi_op = None
- if slot_abi is not None and slot_abi[-1:] == "=":
- slot_abi_op = slot_abi[-1:]
- slot_abi = slot_abi[:-1]
- self.__dict__['slot_abi'] = slot_abi
- self.__dict__['slot_abi_op'] = slot_abi_op
+ slot_operator = None
+ if sub_slot is not None and sub_slot[-1:] == "=":
+ slot_operator = sub_slot[-1:]
+ sub_slot = sub_slot[:-1]
+ self.__dict__['sub_slot'] = sub_slot
+ self.__dict__['slot_operator'] = slot_operator
+ if self.slot is not None and self.slot_operator == "*":
+ raise InvalidAtom(self)
else:
self.__dict__['slot'] = slot
- self.__dict__['slot_abi'] = None
- self.__dict__['slot_abi_op'] = None
+ self.__dict__['sub_slot'] = None
+ self.__dict__['slot_operator'] = None
self.__dict__['operator'] = op
self.__dict__['extended_syntax'] = extended_syntax
@@ -1348,15 +1335,18 @@ class Atom(_unicode):
use = _use
else:
use = _use_dep(use_str[1:-1].split(","), eapi_attrs)
- without_use = Atom(m.group('without_use'), allow_repo=allow_repo)
+ without_use = Atom(blocker_prefix + m.group('without_use'),
+ allow_repo=allow_repo)
else:
use = None
if unevaluated_atom is not None and \
unevaluated_atom.use is not None:
# unevaluated_atom.use is used for IUSE checks when matching
# packages, so it must not propagate to without_use
- without_use = Atom(s, allow_wildcard=allow_wildcard,
- allow_repo=allow_repo)
+ without_use = Atom(_unicode(self),
+ allow_wildcard=allow_wildcard,
+ allow_repo=allow_repo,
+ eapi=eapi)
else:
without_use = self
@@ -1410,13 +1400,13 @@ class Atom(_unicode):
% (eapi, self), category='EAPI.incompatible')
@property
- def slot_abi_built(self):
+ def slot_operator_built(self):
"""
- Returns True if slot_abi_op == "=" and slot_abi is not None.
+ Returns True if slot_operator == "=" and sub_slot is not None.
NOTE: foo/bar:2= is unbuilt and returns False, whereas foo/bar:2/2=
is built and returns True.
"""
- return self.slot_abi_op == "=" and self.slot_abi is not None
+ return self.slot_operator == "=" and self.sub_slot is not None
@property
def without_repo(self):
@@ -1427,7 +1417,7 @@ class Atom(_unicode):
@property
def without_slot(self):
- if self.slot is None and self.slot_abi_op is None:
+ if self.slot is None and self.slot_operator is None:
return self
atom = remove_slot(self)
if self.repo is not None:
@@ -1439,14 +1429,14 @@ class Atom(_unicode):
def with_repo(self, repo):
atom = remove_slot(self)
- if self.slot is not None or self.slot_abi_op is not None:
+ if self.slot is not None or self.slot_operator is not None:
atom += _slot_separator
if self.slot is not None:
atom += self.slot
- if self.slot_abi is not None:
- atom += "/%s" % self.slot_abi
- if self.slot_abi_op is not None:
- atom += self.slot_abi_op
+ if self.sub_slot is not None:
+ atom += "/%s" % self.sub_slot
+ if self.slot_operator is not None:
+ atom += self.slot_operator
atom += _repo_separator + repo
if self.use is not None:
atom += _unicode(self.use)
@@ -1506,14 +1496,14 @@ class Atom(_unicode):
if not (self.use and self.use.conditional):
return self
atom = remove_slot(self)
- if self.slot is not None or self.slot_abi_op is not None:
+ if self.slot is not None or self.slot_operator is not None:
atom += _slot_separator
if self.slot is not None:
atom += self.slot
- if self.slot_abi is not None:
- atom += "/%s" % self.slot_abi
- if self.slot_abi_op is not None:
- atom += self.slot_abi_op
+ if self.sub_slot is not None:
+ atom += "/%s" % self.sub_slot
+ if self.slot_operator is not None:
+ atom += self.slot_operator
use_dep = self.use.evaluate_conditionals(use)
atom += _unicode(use_dep)
return Atom(atom, unevaluated_atom=self, allow_repo=(self.repo is not None), _use=use_dep)
@@ -1534,14 +1524,14 @@ class Atom(_unicode):
if not self.use:
return self
atom = remove_slot(self)
- if self.slot is not None or self.slot_abi_op is not None:
+ if self.slot is not None or self.slot_operator is not None:
atom += _slot_separator
if self.slot is not None:
atom += self.slot
- if self.slot_abi is not None:
- atom += "/%s" % self.slot_abi
- if self.slot_abi_op is not None:
- atom += self.slot_abi_op
+ if self.sub_slot is not None:
+ atom += "/%s" % self.sub_slot
+ if self.slot_operator is not None:
+ atom += self.slot_operator
use_dep = self.use.violated_conditionals(other_use, is_valid_flag, parent_use)
atom += _unicode(use_dep)
return Atom(atom, unevaluated_atom=self, allow_repo=(self.repo is not None), _use=use_dep)
@@ -1550,14 +1540,14 @@ class Atom(_unicode):
if not (self.use and self.use.conditional):
return self
atom = remove_slot(self)
- if self.slot is not None or self.slot_abi_op is not None:
+ if self.slot is not None or self.slot_operator is not None:
atom += _slot_separator
if self.slot is not None:
atom += self.slot
- if self.slot_abi is not None:
- atom += "/%s" % self.slot_abi
- if self.slot_abi_op is not None:
- atom += self.slot_abi_op
+ if self.sub_slot is not None:
+ atom += "/%s" % self.sub_slot
+ if self.slot_operator is not None:
+ atom += self.slot_operator
use_dep = self.use._eval_qa_conditionals(use_mask, use_force)
atom += _unicode(use_dep)
return Atom(atom, unevaluated_atom=self, allow_repo=(self.repo is not None), _use=use_dep)
@@ -1583,7 +1573,7 @@ def extended_cp_match(extended_cp, other_cp):
extended_cp_re = _extended_cp_re_cache.get(extended_cp)
if extended_cp_re is None:
extended_cp_re = re.compile("^" + re.escape(extended_cp).replace(
- r'\*', '[^/]*') + "$")
+ r'\*', '[^/]*') + "$", re.UNICODE)
_extended_cp_re_cache[extended_cp] = extended_cp_re
return extended_cp_re.match(other_cp) is not None
@@ -2132,7 +2122,7 @@ def match_from_list(mydep, candidate_list):
candidate_list = mylist
mylist = []
- # Currently, only \*\d+\* is supported.
+ # Currently, only \*\w+\* is supported.
ver = mydep.version[1:-1]
for x in candidate_list:
@@ -2172,19 +2162,40 @@ def match_from_list(mydep, candidate_list):
# XXX: Nasty special casing for leading zeros
# Required as =* is a literal prefix match, so can't
# use vercmp
- mysplit = catpkgsplit(mycpv)
- myver = mysplit[2].lstrip("0")
+ myver = mycpv_cps[2].lstrip("0")
if not myver or not myver[0].isdigit():
myver = "0"+myver
- mycpv_cmp = mysplit[0]+"/"+mysplit[1]+"-"+myver
+ if myver == mycpv_cps[2]:
+ mycpv_cmp = mycpv
+ else:
+ # Use replace to preserve the revision part if it exists
+ # (mycpv_cps[3] can't be trusted because in contains r0
+ # even when the input has no revision part).
+ mycpv_cmp = mycpv.replace(
+ mydep.cp + "-" + mycpv_cps[2],
+ mydep.cp + "-" + myver, 1)
for x in candidate_list:
- xs = getattr(x, "cpv_split", None)
- if xs is None:
- xs = catpkgsplit(remove_slot(x))
+ try:
+ x.cp
+ except AttributeError:
+ try:
+ pkg = _pkg_str(remove_slot(x))
+ except InvalidData:
+ continue
+ else:
+ pkg = x
+
+ xs = pkg.cpv_split
myver = xs[2].lstrip("0")
if not myver or not myver[0].isdigit():
myver = "0"+myver
- xcpv = xs[0]+"/"+xs[1]+"-"+myver
+ if myver == xs[2]:
+ xcpv = pkg.cpv
+ else:
+ # Use replace to preserve the revision part if it exists.
+ xcpv = pkg.cpv.replace(
+ pkg.cp + "-" + xs[2],
+ pkg.cp + "-" + myver, 1)
if xcpv.startswith(mycpv_cmp):
mylist.append(x)
@@ -2277,9 +2288,11 @@ def match_from_list(mydep, candidate_list):
continue
if mydep.use:
-
- missing_enabled = mydep.use.missing_enabled.difference(x.iuse.all)
- missing_disabled = mydep.use.missing_disabled.difference(x.iuse.all)
+ is_valid_flag = x.iuse.is_valid_flag
+ missing_enabled = frozenset(flag for flag in
+ mydep.use.missing_enabled if not is_valid_flag(flag))
+ missing_disabled = frozenset(flag for flag in
+ mydep.use.missing_disabled if not is_valid_flag(flag))
if mydep.use.enabled:
if any(f in mydep.use.enabled for f in missing_disabled):
@@ -2314,9 +2327,9 @@ def match_from_list(mydep, candidate_list):
return mylist
def human_readable_required_use(required_use):
- return required_use.replace("^^", "exactly-one-of").replace("||", "any-of")
+ return required_use.replace("^^", "exactly-one-of").replace("||", "any-of").replace("??", "at-most-one-of")
-def get_required_use_flags(required_use):
+def get_required_use_flags(required_use, eapi=None):
"""
Returns a set of use flags that are used in the given REQUIRED_USE string
@@ -2326,6 +2339,12 @@ def get_required_use_flags(required_use):
@return: Set of use flags that are used in the given REQUIRED_USE string
"""
+ eapi_attrs = _get_eapi_attrs(eapi)
+ if eapi_attrs.required_use_at_most_one_of:
+ valid_operators = ("||", "^^", "??")
+ else:
+ valid_operators = ("||", "^^")
+
mysplit = required_use.split()
level = 0
stack = [[]]
@@ -2354,7 +2373,7 @@ def get_required_use_flags(required_use):
l = stack.pop()
ignore = False
if stack[level]:
- if stack[level][-1] in ("||", "^^") or \
+ if stack[level][-1] in valid_operators or \
(not isinstance(stack[level][-1], bool) and \
stack[level][-1][-1] == "?"):
ignore = True
@@ -2366,15 +2385,14 @@ def get_required_use_flags(required_use):
else:
raise InvalidDependString(
_("malformed syntax: '%s'") % required_use)
- elif token in ("||", "^^"):
+ elif token in valid_operators:
if need_bracket:
raise InvalidDependString(
_("malformed syntax: '%s'") % required_use)
need_bracket = True
stack[level].append(token)
else:
- if need_bracket or "(" in token or ")" in token or \
- "|" in token or "^" in token:
+ if need_bracket:
raise InvalidDependString(
_("malformed syntax: '%s'") % required_use)
@@ -2429,7 +2447,7 @@ class _RequiredUseBranch(object):
complex_nesting = False
node = self
while node != None and not complex_nesting:
- if node._operator in ("||", "^^"):
+ if node._operator in ("||", "^^", "??"):
complex_nesting = True
else:
node = node._parent
@@ -2450,7 +2468,7 @@ class _RequiredUseBranch(object):
if sys.hexversion < 0x3000000:
__nonzero__ = __bool__
-def check_required_use(required_use, use, iuse_match):
+def check_required_use(required_use, use, iuse_match, eapi=None):
"""
Checks if the use flags listed in 'use' satisfy all
constraints specified in 'constraints'.
@@ -2466,6 +2484,12 @@ def check_required_use(required_use, use, iuse_match):
@return: Indicates if REQUIRED_USE constraints are satisfied
"""
+ eapi_attrs = _get_eapi_attrs(eapi)
+ if eapi_attrs.required_use_at_most_one_of:
+ valid_operators = ("||", "^^", "??")
+ else:
+ valid_operators = ("||", "^^")
+
def is_active(token):
if token.startswith("!"):
flag = token[1:]
@@ -2475,6 +2499,11 @@ def check_required_use(required_use, use, iuse_match):
is_negated = False
if not flag or not iuse_match(flag):
+ if not eapi_attrs.required_use_at_most_one_of and flag == "?":
+ msg = _("Operator '??' is not supported with EAPI '%s'") \
+ % (eapi,)
+ e = InvalidData(msg, category='EAPI.incompatible')
+ raise InvalidDependString(msg, errors=(e,))
msg = _("USE flag '%s' is not in IUSE") \
% (flag,)
e = InvalidData(msg, category='IUSE.missing')
@@ -2492,6 +2521,8 @@ def check_required_use(required_use, use, iuse_match):
return (True in argument)
elif operator == "^^":
return (argument.count(True) == 1)
+ elif operator == "??":
+ return (argument.count(True) <= 1)
elif operator[-1] == "?":
return (False not in argument)
@@ -2521,7 +2552,7 @@ def check_required_use(required_use, use, iuse_match):
l = stack.pop()
op = None
if stack[level]:
- if stack[level][-1] in ("||", "^^"):
+ if stack[level][-1] in valid_operators:
op = stack[level].pop()
satisfied = is_satisfied(op, l)
stack[level].append(satisfied)
@@ -2550,7 +2581,7 @@ def check_required_use(required_use, use, iuse_match):
stack[level].append(satisfied)
if len(node._children) <= 1 or \
- node._parent._operator not in ("||", "^^"):
+ node._parent._operator not in valid_operators:
last_node = node._parent._children.pop()
if last_node is not node:
raise AssertionError(
@@ -2566,7 +2597,7 @@ def check_required_use(required_use, use, iuse_match):
raise AssertionError(
"node is not last child of parent")
- elif len(node._children) == 1 and op in ("||", "^^"):
+ elif len(node._children) == 1 and op in valid_operators:
last_node = node._parent._children.pop()
if last_node is not node:
raise AssertionError(
@@ -2576,7 +2607,7 @@ def check_required_use(required_use, use, iuse_match):
node._children[0]._parent = node._parent
node = node._children[0]
if node._operator is None and \
- node._parent._operator not in ("||", "^^"):
+ node._parent._operator not in valid_operators:
last_node = node._parent._children.pop()
if last_node is not node:
raise AssertionError(
@@ -2590,7 +2621,7 @@ def check_required_use(required_use, use, iuse_match):
else:
raise InvalidDependString(
_("malformed syntax: '%s'") % required_use)
- elif token in ("||", "^^"):
+ elif token in valid_operators:
if need_bracket:
raise InvalidDependString(
_("malformed syntax: '%s'") % required_use)
@@ -2600,8 +2631,7 @@ def check_required_use(required_use, use, iuse_match):
node._children.append(child)
node = child
else:
- if need_bracket or "(" in token or ")" in token or \
- "|" in token or "^" in token:
+ if need_bracket:
raise InvalidDependString(
_("malformed syntax: '%s'") % required_use)
@@ -2629,16 +2659,16 @@ def extract_affecting_use(mystr, atom, eapi=None):
that decide if the given atom is in effect.
Example usage:
- >>> extract_use_cond('sasl? ( dev-libs/cyrus-sasl ) \
+ >>> extract_affecting_use('sasl? ( dev-libs/cyrus-sasl ) \
!minimal? ( cxx? ( dev-libs/cyrus-sasl ) )', 'dev-libs/cyrus-sasl')
- (['sasl', 'minimal', 'cxx'])
+ {'cxx', 'minimal', 'sasl'}
- @param dep: The dependency string
+ @param mystr: The dependency string
@type mystr: String
@param atom: The atom to get into effect
@type atom: String
- @rtype: Tuple of two lists of strings
- @return: List of use flags that need to be enabled, List of use flag that need to be disabled
+ @rtype: Set of strings
+ @return: Set of use flags affecting given atom
"""
useflag_re = _get_useflag_re(eapi)
mysplit = mystr.split()
@@ -2744,3 +2774,48 @@ def extract_affecting_use(mystr, atom, eapi=None):
_("malformed syntax: '%s'") % mystr)
return affecting_use
+
+def extract_unpack_dependencies(src_uri, unpackers):
+ """
+ Return unpack dependencies string for given SRC_URI string.
+
+ @param src_uri: SRC_URI string
+ @type src_uri: String
+ @param unpackers: Dictionary mapping archive suffixes to dependency strings
+ @type unpackers: Dictionary
+ @rtype: String
+ @return: Dependency string specifying packages required to unpack archives.
+ """
+ src_uri = src_uri.split()
+
+ depend = []
+ for i in range(len(src_uri)):
+ if src_uri[i][-1] == "?" or src_uri[i] in ("(", ")"):
+ depend.append(src_uri[i])
+ elif (i+1 < len(src_uri) and src_uri[i+1] == "->") or src_uri[i] == "->":
+ continue
+ else:
+ for suffix in sorted(unpackers, key=lambda x: len(x), reverse=True):
+ suffix = suffix.lower()
+ if src_uri[i].lower().endswith(suffix):
+ depend.append(unpackers[suffix])
+ break
+
+ while True:
+ cleaned_depend = depend[:]
+ for i in range(len(cleaned_depend)):
+ if cleaned_depend[i] is None:
+ continue
+ elif cleaned_depend[i] == "(" and cleaned_depend[i+1] == ")":
+ cleaned_depend[i] = None
+ cleaned_depend[i+1] = None
+ elif cleaned_depend[i][-1] == "?" and cleaned_depend[i+1] == "(" and cleaned_depend[i+2] == ")":
+ cleaned_depend[i] = None
+ cleaned_depend[i+1] = None
+ cleaned_depend[i+2] = None
+ if depend == cleaned_depend:
+ break
+ else:
+ depend = [x for x in cleaned_depend if x is not None]
+
+ return " ".join(depend)
diff --git a/pym/portage/dep/_slot_abi.py b/pym/portage/dep/_slot_operator.py
index 7c36e52dc..7b6444403 100644
--- a/pym/portage/dep/_slot_abi.py
+++ b/pym/portage/dep/_slot_operator.py
@@ -1,59 +1,64 @@
-# Copyright 2012 Gentoo Foundation
+# Copyright 2012-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
from portage.dep import Atom, paren_enclose, use_reduce
+from portage.eapi import _get_eapi_attrs
from portage.exception import InvalidData
+from _emerge.Package import Package
-_dep_keys = ('DEPEND', 'PDEPEND', 'RDEPEND')
-_runtime_keys = ('PDEPEND', 'RDEPEND')
-
-def find_built_slot_abi_atoms(pkg):
+def find_built_slot_operator_atoms(pkg):
atoms = {}
- for k in _dep_keys:
- atom_list = list(_find_built_slot_abi_op(use_reduce(pkg.metadata[k],
- uselist=pkg.use.enabled, eapi=pkg.metadata['EAPI'],
+ for k in Package._dep_keys:
+ atom_list = list(_find_built_slot_operator(use_reduce(pkg._metadata[k],
+ uselist=pkg.use.enabled, eapi=pkg.eapi,
token_class=Atom)))
if atom_list:
atoms[k] = atom_list
return atoms
-def _find_built_slot_abi_op(dep_struct):
+def _find_built_slot_operator(dep_struct):
for x in dep_struct:
if isinstance(x, list):
- for atom in _find_built_slot_abi_op(x):
+ for atom in _find_built_slot_operator(x):
yield atom
- elif isinstance(x, Atom) and x.slot_abi_built:
+ elif isinstance(x, Atom) and x.slot_operator_built:
yield x
-def ignore_built_slot_abi_deps(dep_struct):
+def ignore_built_slot_operator_deps(dep_struct):
for i, x in enumerate(dep_struct):
if isinstance(x, list):
- ignore_built_slot_abi_deps(x)
- elif isinstance(x, Atom) and x.slot_abi_built:
+ ignore_built_slot_operator_deps(x)
+ elif isinstance(x, Atom) and x.slot_operator_built:
# There's no way of knowing here whether the SLOT
- # part of the SLOT/ABI pair should be kept, so we
+ # part of the slot/sub-slot pair should be kept, so we
# ignore both parts.
dep_struct[i] = x.without_slot
-def evaluate_slot_abi_equal_deps(settings, use, trees):
+def evaluate_slot_operator_equal_deps(settings, use, trees):
metadata = settings.configdict['pkg']
eapi = metadata['EAPI']
+ eapi_attrs = _get_eapi_attrs(eapi)
running_vardb = trees[trees._running_eroot]["vartree"].dbapi
target_vardb = trees[trees._target_eroot]["vartree"].dbapi
vardbs = [target_vardb]
deps = {}
- for k in _dep_keys:
+ for k in Package._dep_keys:
deps[k] = use_reduce(metadata[k],
uselist=use, eapi=eapi, token_class=Atom)
- for k in _runtime_keys:
+ for k in Package._runtime_keys:
_eval_deps(deps[k], vardbs)
- if running_vardb is not target_vardb:
- vardbs.append(running_vardb)
-
- _eval_deps(deps["DEPEND"], vardbs)
+ if eapi_attrs.hdepend:
+ _eval_deps(deps["HDEPEND"], [running_vardb])
+ _eval_deps(deps["DEPEND"], [target_vardb])
+ else:
+ if running_vardb is not target_vardb:
+ vardbs.append(running_vardb)
+ _eval_deps(deps["DEPEND"], vardbs)
result = {}
for k, v in deps.items():
@@ -65,7 +70,7 @@ def _eval_deps(dep_struct, vardbs):
for i, x in enumerate(dep_struct):
if isinstance(x, list):
_eval_deps(x, vardbs)
- elif isinstance(x, Atom) and x.slot_abi_op == "=":
+ elif isinstance(x, Atom) and x.slot_operator == "=":
for vardb in vardbs:
best_version = vardb.match(x)
if best_version:
@@ -77,7 +82,7 @@ def _eval_deps(dep_struct, vardbs):
pass
else:
slot_part = "%s/%s=" % \
- (best_version.slot, best_version.slot_abi)
+ (best_version.slot, best_version.sub_slot)
x = x.with_slot(slot_part)
dep_struct[i] = x
break
diff --git a/pym/portage/dep/dep_check.py b/pym/portage/dep/dep_check.py
index d575ab3bc..b5ace3d39 100644
--- a/pym/portage/dep/dep_check.py
+++ b/pym/portage/dep/dep_check.py
@@ -1,16 +1,19 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = ['dep_check', 'dep_eval', 'dep_wordreduce', 'dep_zapdeps']
import logging
+import operator
import portage
-from portage import _unicode_decode
from portage.dep import Atom, match_from_list, use_reduce
from portage.exception import InvalidDependString, ParseError
from portage.localization import _
from portage.util import writemsg, writemsg_level
+from portage.util.SlotObject import SlotObject
from portage.versions import vercmp, _pkg_str
def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/",
@@ -160,7 +163,7 @@ def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/",
# According to GLEP 37, RDEPEND is the only dependency
# type that is valid for new-style virtuals. Repoman
# should enforce this.
- depstring = pkg.metadata['RDEPEND']
+ depstring = pkg._metadata['RDEPEND']
pkg_kwargs = kwargs.copy()
pkg_kwargs["myuse"] = pkg_use_enabled(pkg)
if edebug:
@@ -183,7 +186,7 @@ def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/",
del mytrees["virt_parent"]
if not mycheck[0]:
- raise ParseError(_unicode_decode("%s: %s '%s'") % \
+ raise ParseError("%s: %s '%s'" % \
(pkg, mycheck[1], depstring))
# pull in the new-style virtual
@@ -254,6 +257,10 @@ def dep_eval(deplist):
return 0
return 1
+class _dep_choice(SlotObject):
+ __slots__ = ('atoms', 'slot_map', 'cp_map', 'all_available',
+ 'all_installed_slots')
+
def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
"""
Takes an unreduced and reduced deplist and removes satisfied dependencies.
@@ -316,6 +323,7 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
priority = trees[myroot].get("priority")
graph_db = trees[myroot].get("graph_db")
graph = trees[myroot].get("graph")
+ want_update_pkg = trees[myroot].get("want_update_pkg")
vardb = None
if "vartree" in trees[myroot]:
vardb = trees[myroot]["vartree"].dbapi
@@ -324,6 +332,13 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
else:
mydbapi = trees[myroot]["porttree"].dbapi
+ try:
+ mydbapi_match_pkgs = mydbapi.match_pkgs
+ except AttributeError:
+ def mydbapi_match_pkgs(atom):
+ return [mydbapi._pkg_str(cpv, atom.repo)
+ for cpv in mydbapi.match(atom)]
+
# Sort the deps into installed, not installed but already
# in the graph and other, not installed and not in the graph
# and other, with values of [[required_atom], availablility]
@@ -347,24 +362,17 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
continue
# Ignore USE dependencies here since we don't want USE
# settings to adversely affect || preference evaluation.
- avail_pkg = mydbapi.match(atom.without_use)
+ avail_pkg = mydbapi_match_pkgs(atom.without_use)
if avail_pkg:
avail_pkg = avail_pkg[-1] # highest (ascending order)
- try:
- slot = avail_pkg.slot
- except AttributeError:
- eapi, slot, repo = mydbapi.aux_get(avail_pkg,
- ["EAPI", "SLOT", "repository"])
- avail_pkg = _pkg_str(avail_pkg, eapi=eapi,
- slot=slot, repo=repo)
- avail_slot = Atom("%s:%s" % (atom.cp, slot))
+ avail_slot = Atom("%s:%s" % (atom.cp, avail_pkg.slot))
if not avail_pkg:
all_available = False
all_use_satisfied = False
break
if atom.use:
- avail_pkg_use = mydbapi.match(atom)
+ avail_pkg_use = mydbapi_match_pkgs(atom)
if not avail_pkg_use:
all_use_satisfied = False
else:
@@ -372,13 +380,7 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
avail_pkg_use = avail_pkg_use[-1]
if avail_pkg_use != avail_pkg:
avail_pkg = avail_pkg_use
- try:
- slot = avail_pkg.slot
- except AttributeError:
- eapi, slot, repo = mydbapi.aux_get(avail_pkg,
- ["EAPI", "SLOT", "repository"])
- avail_pkg = _pkg_str(avail_pkg,
- eapi=eapi, slot=slot, repo=repo)
+ avail_slot = Atom("%s:%s" % (atom.cp, avail_pkg.slot))
slot_map[avail_slot] = avail_pkg
highest_cpv = cp_map.get(avail_pkg.cp)
@@ -386,7 +388,9 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
vercmp(avail_pkg.version, highest_cpv.version) > 0:
cp_map[avail_pkg.cp] = avail_pkg
- this_choice = (atoms, slot_map, cp_map, all_available)
+ this_choice = _dep_choice(atoms=atoms, slot_map=slot_map,
+ cp_map=cp_map, all_available=all_available,
+ all_installed_slots=False)
if all_available:
# The "all installed" criterion is not version or slot specific.
# If any version of a package is already in the graph then we
@@ -407,6 +411,7 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
not slot_atom.startswith("virtual/"):
all_installed_slots = False
break
+ this_choice.all_installed_slots = all_installed_slots
if graph_db is None:
if all_use_satisfied:
if all_installed:
@@ -468,8 +473,27 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
elif all_installed:
if all_installed_slots:
preferred_installed.append(this_choice)
- else:
+ elif parent is None or want_update_pkg is None:
preferred_any_slot.append(this_choice)
+ else:
+ # When appropriate, prefer a slot that is not
+ # installed yet for bug #478188.
+ want_update = True
+ for slot_atom, avail_pkg in slot_map.items():
+ if avail_pkg in graph:
+ continue
+ # New-style virtuals have zero cost to install.
+ if slot_atom.startswith("virtual/") or \
+ vardb.match(slot_atom):
+ continue
+ if not want_update_pkg(parent, avail_pkg):
+ want_update = False
+ break
+
+ if want_update:
+ preferred_installed.append(this_choice)
+ else:
+ preferred_any_slot.append(this_choice)
else:
preferred_non_installed.append(this_choice)
else:
@@ -490,6 +514,7 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
all_installed = False
if all_installed:
+ this_choice.all_installed_slots = True
other_installed.append(this_choice)
elif some_installed:
other_installed_some.append(this_choice)
@@ -506,22 +531,23 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
for choices in choice_bins:
if len(choices) < 2:
continue
+ # Prefer choices with all_installed_slots for bug #480736.
+ choices.sort(key=operator.attrgetter('all_installed_slots'),
+ reverse=True)
for choice_1 in choices[1:]:
- atoms_1, slot_map_1, cp_map_1, all_available_1 = choice_1
- cps = set(cp_map_1)
+ cps = set(choice_1.cp_map)
for choice_2 in choices:
if choice_1 is choice_2:
# choice_1 will not be promoted, so move on
break
- atoms_2, slot_map_2, cp_map_2, all_available_2 = choice_2
- intersecting_cps = cps.intersection(cp_map_2)
+ intersecting_cps = cps.intersection(choice_2.cp_map)
if not intersecting_cps:
continue
has_upgrade = False
has_downgrade = False
for cp in intersecting_cps:
- version_1 = cp_map_1[cp]
- version_2 = cp_map_2[cp]
+ version_1 = choice_1.cp_map[cp]
+ version_2 = choice_2.cp_map[cp]
difference = vercmp(version_1.version, version_2.version)
if difference != 0:
if difference > 0:
@@ -538,9 +564,9 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
for allow_masked in (False, True):
for choices in choice_bins:
- for atoms, slot_map, cp_map, all_available in choices:
- if all_available or allow_masked:
- return atoms
+ for choice in choices:
+ if choice.all_available or allow_masked:
+ return choice.atoms
assert(False) # This point should not be reachable
@@ -575,18 +601,15 @@ def dep_check(depstring, mydbapi, mysettings, use="yes", mode=None, myuse=None,
mymasks = set()
useforce = set()
- useforce.add(mysettings["ARCH"])
if use == "all":
- # This masking/forcing is only for repoman. In other cases, relevant
- # masking/forcing should have already been applied via
- # config.regenerate(). Also, binary or installed packages may have
- # been built with flags that are now masked, and it would be
- # inconsistent to mask them now. Additionally, myuse may consist of
- # flags from a parent package that is being merged to a $ROOT that is
- # different from the one that mysettings represents.
+ # This is only for repoman, in order to constrain the use_reduce
+ # matchall behavior to account for profile use.mask/force. The
+ # ARCH/archlist code here may be redundant, since the profile
+ # really should be handling ARCH masking/forcing itself.
mymasks.update(mysettings.usemask)
mymasks.update(mysettings.archlist())
mymasks.discard(mysettings["ARCH"])
+ useforce.add(mysettings["ARCH"])
useforce.update(mysettings.useforce)
useforce.difference_update(mymasks)
@@ -609,7 +632,7 @@ def dep_check(depstring, mydbapi, mysettings, use="yes", mode=None, myuse=None,
# dependencies so that things like --depclean work as well as possible
# in spite of partial invalidity.
if not current_parent.installed:
- eapi = current_parent.metadata['EAPI']
+ eapi = current_parent.eapi
if isinstance(depstring, list):
mysplit = depstring
@@ -619,7 +642,7 @@ def dep_check(depstring, mydbapi, mysettings, use="yes", mode=None, myuse=None,
masklist=mymasks, matchall=(use=="all"), excludeall=useforce,
opconvert=True, token_class=Atom, eapi=eapi)
except InvalidDependString as e:
- return [0, _unicode_decode("%s") % (e,)]
+ return [0, "%s" % (e,)]
if mysplit == []:
#dependencies were reduced to nothing
@@ -633,10 +656,10 @@ def dep_check(depstring, mydbapi, mysettings, use="yes", mode=None, myuse=None,
use_force=useforce, use_mask=mymasks, use_cache=use_cache,
use_binaries=use_binaries, myroot=myroot, trees=trees)
except ParseError as e:
- return [0, _unicode_decode("%s") % (e,)]
+ return [0, "%s" % (e,)]
- mysplit2=mysplit[:]
- mysplit2=dep_wordreduce(mysplit2,mysettings,mydbapi,mode,use_cache=use_cache)
+ mysplit2 = dep_wordreduce(mysplit,
+ mysettings, mydbapi, mode, use_cache=use_cache)
if mysplit2 is None:
return [0, _("Invalid token")]
diff --git a/pym/portage/dispatch_conf.py b/pym/portage/dispatch_conf.py
index 4c68dfc7b..f975ccd59 100644
--- a/pym/portage/dispatch_conf.py
+++ b/pym/portage/dispatch_conf.py
@@ -1,5 +1,5 @@
# archive_conf.py -- functionality common to archive-conf and dispatch-conf
-# Copyright 2003-2012 Gentoo Foundation
+# Copyright 2003-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
@@ -24,175 +24,187 @@ RCS_MERGE = "rcsmerge -p -r" + RCS_BRANCH + " '%s' > '%s'"
DIFF3_MERGE = "diff3 -mE '%s' '%s' '%s' > '%s'"
def diffstatusoutput(cmd, file1, file2):
- """
- Execute the string cmd in a shell with getstatusoutput() and return a
- 2-tuple (status, output).
- """
- # Use Popen to emulate getstatusoutput(), since getstatusoutput() may
- # raise a UnicodeDecodeError which makes the output inaccessible.
- args = shlex_split(cmd % (file1, file2))
- if sys.hexversion < 0x3000000 or sys.hexversion >= 0x3020000:
- # Python 3.1 does not support bytes in Popen args.
- args = [portage._unicode_encode(x, errors='strict') for x in args]
- proc = subprocess.Popen(args,
- stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- output = portage._unicode_decode(proc.communicate()[0])
- if output and output[-1] == "\n":
- # getstatusoutput strips one newline
- output = output[:-1]
- return (proc.wait(), output)
+ """
+ Execute the string cmd in a shell with getstatusoutput() and return a
+ 2-tuple (status, output).
+ """
+ # Use Popen to emulate getstatusoutput(), since getstatusoutput() may
+ # raise a UnicodeDecodeError which makes the output inaccessible.
+ args = shlex_split(cmd % (file1, file2))
+
+ if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000 and \
+ not os.path.isabs(args[0]):
+ # Python 3.1 _execvp throws TypeError for non-absolute executable
+ # path passed as bytes (see http://bugs.python.org/issue8513).
+ fullname = portage.process.find_binary(args[0])
+ if fullname is None:
+ raise portage.exception.CommandNotFound(args[0])
+ args[0] = fullname
+
+ args = [portage._unicode_encode(x, errors='strict') for x in args]
+ proc = subprocess.Popen(args,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ output = portage._unicode_decode(proc.communicate()[0])
+ if output and output[-1] == "\n":
+ # getstatusoutput strips one newline
+ output = output[:-1]
+ return (proc.wait(), output)
def read_config(mandatory_opts):
- eprefix = portage.const.EPREFIX
- config_path = os.path.join(eprefix or os.sep, "etc/dispatch-conf.conf")
- loader = KeyValuePairFileLoader(config_path, None)
- opts, errors = loader.load()
- if not opts:
- print(_('dispatch-conf: Error reading /etc/dispatch-conf.conf; fatal'), file=sys.stderr)
- sys.exit(1)
+ eprefix = portage.settings["EPREFIX"]
+ if portage._not_installed:
+ config_path = os.path.join(portage.PORTAGE_BASE_PATH, "cnf", "dispatch-conf.conf")
+ else:
+ config_path = os.path.join(eprefix or os.sep, "etc/dispatch-conf.conf")
+ loader = KeyValuePairFileLoader(config_path, None)
+ opts, _errors = loader.load()
+ if not opts:
+ print(_('dispatch-conf: Error reading /etc/dispatch-conf.conf; fatal'), file=sys.stderr)
+ sys.exit(1)
# Handle quote removal here, since KeyValuePairFileLoader doesn't do that.
- quotes = "\"'"
- for k, v in opts.items():
- if v[:1] in quotes and v[:1] == v[-1:]:
- opts[k] = v[1:-1]
-
- for key in mandatory_opts:
- if key not in opts:
- if key == "merge":
- opts["merge"] = "sdiff --suppress-common-lines --output='%s' '%s' '%s'"
- else:
- print(_('dispatch-conf: Missing option "%s" in /etc/dispatch-conf.conf; fatal') % (key,), file=sys.stderr)
-
- # archive-dir supports ${EPREFIX} expansion, in order to avoid hardcoding
- variables = {"EPREFIX": eprefix}
- opts['archive-dir'] = varexpand(opts['archive-dir'], mydict=variables)
-
- if not os.path.exists(opts['archive-dir']):
- os.mkdir(opts['archive-dir'])
- # Use restrictive permissions by default, in order to protect
- # against vulnerabilities (like bug #315603 involving rcs).
- os.chmod(opts['archive-dir'], 0o700)
- elif not os.path.isdir(opts['archive-dir']):
- print(_('dispatch-conf: Config archive dir [%s] must exist; fatal') % (opts['archive-dir'],), file=sys.stderr)
- sys.exit(1)
-
- return opts
+ quotes = "\"'"
+ for k, v in opts.items():
+ if v[:1] in quotes and v[:1] == v[-1:]:
+ opts[k] = v[1:-1]
+
+ for key in mandatory_opts:
+ if key not in opts:
+ if key == "merge":
+ opts["merge"] = "sdiff --suppress-common-lines --output='%s' '%s' '%s'"
+ else:
+ print(_('dispatch-conf: Missing option "%s" in /etc/dispatch-conf.conf; fatal') % (key,), file=sys.stderr)
+
+ # archive-dir supports ${EPREFIX} expansion, in order to avoid hardcoding
+ variables = {"EPREFIX": eprefix}
+ opts['archive-dir'] = varexpand(opts['archive-dir'], mydict=variables)
+
+ if not os.path.exists(opts['archive-dir']):
+ os.mkdir(opts['archive-dir'])
+ # Use restrictive permissions by default, in order to protect
+ # against vulnerabilities (like bug #315603 involving rcs).
+ os.chmod(opts['archive-dir'], 0o700)
+ elif not os.path.isdir(opts['archive-dir']):
+ print(_('dispatch-conf: Config archive dir [%s] must exist; fatal') % (opts['archive-dir'],), file=sys.stderr)
+ sys.exit(1)
+
+ return opts
def rcs_archive(archive, curconf, newconf, mrgconf):
- """Archive existing config in rcs (on trunk). Then, if mrgconf is
- specified and an old branch version exists, merge the user's changes
- and the distributed changes and put the result into mrgconf. Lastly,
- if newconf was specified, leave it in the archive dir with a .dist.new
- suffix along with the last 1.1.1 branch version with a .dist suffix."""
-
- try:
- os.makedirs(os.path.dirname(archive))
- except OSError:
- pass
-
- if os.path.isfile(curconf):
- try:
- shutil.copy2(curconf, archive)
- except(IOError, os.error) as why:
- print(_('dispatch-conf: Error copying %(curconf)s to %(archive)s: %(reason)s; fatal') % \
- {"curconf": curconf, "archive": archive, "reason": str(why)}, file=sys.stderr)
-
- if os.path.exists(archive + ',v'):
- os.system(RCS_LOCK + ' ' + archive)
- os.system(RCS_PUT + ' ' + archive)
-
- ret = 0
- if newconf != '':
- os.system(RCS_GET + ' -r' + RCS_BRANCH + ' ' + archive)
- has_branch = os.path.exists(archive)
- if has_branch:
- os.rename(archive, archive + '.dist')
-
- try:
- shutil.copy2(newconf, archive)
- except(IOError, os.error) as why:
- print(_('dispatch-conf: Error copying %(newconf)s to %(archive)s: %(reason)s; fatal') % \
- {"newconf": newconf, "archive": archive, "reason": str(why)}, file=sys.stderr)
-
- if has_branch:
- if mrgconf != '':
- # This puts the results of the merge into mrgconf.
- ret = os.system(RCS_MERGE % (archive, mrgconf))
- mystat = os.lstat(newconf)
- os.chmod(mrgconf, mystat.st_mode)
- os.chown(mrgconf, mystat.st_uid, mystat.st_gid)
- os.rename(archive, archive + '.dist.new')
- return ret
+ """Archive existing config in rcs (on trunk). Then, if mrgconf is
+ specified and an old branch version exists, merge the user's changes
+ and the distributed changes and put the result into mrgconf. Lastly,
+ if newconf was specified, leave it in the archive dir with a .dist.new
+ suffix along with the last 1.1.1 branch version with a .dist suffix."""
+
+ try:
+ os.makedirs(os.path.dirname(archive))
+ except OSError:
+ pass
+
+ if os.path.isfile(curconf):
+ try:
+ shutil.copy2(curconf, archive)
+ except(IOError, os.error) as why:
+ print(_('dispatch-conf: Error copying %(curconf)s to %(archive)s: %(reason)s; fatal') % \
+ {"curconf": curconf, "archive": archive, "reason": str(why)}, file=sys.stderr)
+
+ if os.path.exists(archive + ',v'):
+ os.system(RCS_LOCK + ' ' + archive)
+ os.system(RCS_PUT + ' ' + archive)
+
+ ret = 0
+ if newconf != '':
+ os.system(RCS_GET + ' -r' + RCS_BRANCH + ' ' + archive)
+ has_branch = os.path.exists(archive)
+ if has_branch:
+ os.rename(archive, archive + '.dist')
+
+ try:
+ shutil.copy2(newconf, archive)
+ except(IOError, os.error) as why:
+ print(_('dispatch-conf: Error copying %(newconf)s to %(archive)s: %(reason)s; fatal') % \
+ {"newconf": newconf, "archive": archive, "reason": str(why)}, file=sys.stderr)
+
+ if has_branch:
+ if mrgconf != '':
+ # This puts the results of the merge into mrgconf.
+ ret = os.system(RCS_MERGE % (archive, mrgconf))
+ mystat = os.lstat(newconf)
+ os.chmod(mrgconf, mystat.st_mode)
+ os.chown(mrgconf, mystat.st_uid, mystat.st_gid)
+ os.rename(archive, archive + '.dist.new')
+
+ return ret
def file_archive(archive, curconf, newconf, mrgconf):
- """Archive existing config to the archive-dir, bumping old versions
- out of the way into .# versions (log-rotate style). Then, if mrgconf
- was specified and there is a .dist version, merge the user's changes
- and the distributed changes and put the result into mrgconf. Lastly,
- if newconf was specified, archive it as a .dist.new version (which
- gets moved to the .dist version at the end of the processing)."""
-
- try:
- os.makedirs(os.path.dirname(archive))
- except OSError:
- pass
-
- # Archive the current config file if it isn't already saved
- if os.path.exists(archive) \
- and len(diffstatusoutput("diff -aq '%s' '%s'", curconf, archive)[1]) != 0:
- suf = 1
- while suf < 9 and os.path.exists(archive + '.' + str(suf)):
- suf += 1
-
- while suf > 1:
- os.rename(archive + '.' + str(suf-1), archive + '.' + str(suf))
- suf -= 1
-
- os.rename(archive, archive + '.1')
-
- if os.path.isfile(curconf):
- try:
- shutil.copy2(curconf, archive)
- except(IOError, os.error) as why:
- print(_('dispatch-conf: Error copying %(curconf)s to %(archive)s: %(reason)s; fatal') % \
- {"curconf": curconf, "archive": archive, "reason": str(why)}, file=sys.stderr)
-
- if newconf != '':
- # Save off new config file in the archive dir with .dist.new suffix
- try:
- shutil.copy2(newconf, archive + '.dist.new')
- except(IOError, os.error) as why:
- print(_('dispatch-conf: Error copying %(newconf)s to %(archive)s: %(reason)s; fatal') % \
- {"newconf": newconf, "archive": archive + '.dist.new', "reason": str(why)}, file=sys.stderr)
-
- ret = 0
- if mrgconf != '' and os.path.exists(archive + '.dist'):
- # This puts the results of the merge into mrgconf.
- ret = os.system(DIFF3_MERGE % (curconf, archive + '.dist', newconf, mrgconf))
- mystat = os.lstat(newconf)
- os.chmod(mrgconf, mystat.st_mode)
- os.chown(mrgconf, mystat.st_uid, mystat.st_gid)
-
- return ret
+ """Archive existing config to the archive-dir, bumping old versions
+ out of the way into .# versions (log-rotate style). Then, if mrgconf
+ was specified and there is a .dist version, merge the user's changes
+ and the distributed changes and put the result into mrgconf. Lastly,
+ if newconf was specified, archive it as a .dist.new version (which
+ gets moved to the .dist version at the end of the processing)."""
+
+ try:
+ os.makedirs(os.path.dirname(archive))
+ except OSError:
+ pass
+
+ # Archive the current config file if it isn't already saved
+ if (os.path.exists(archive) and
+ len(diffstatusoutput("diff -aq '%s' '%s'", curconf, archive)[1]) != 0):
+ suf = 1
+ while suf < 9 and os.path.exists(archive + '.' + str(suf)):
+ suf += 1
+
+ while suf > 1:
+ os.rename(archive + '.' + str(suf-1), archive + '.' + str(suf))
+ suf -= 1
+
+ os.rename(archive, archive + '.1')
+
+ if os.path.isfile(curconf):
+ try:
+ shutil.copy2(curconf, archive)
+ except(IOError, os.error) as why:
+ print(_('dispatch-conf: Error copying %(curconf)s to %(archive)s: %(reason)s; fatal') % \
+ {"curconf": curconf, "archive": archive, "reason": str(why)}, file=sys.stderr)
+
+ if newconf != '':
+ # Save off new config file in the archive dir with .dist.new suffix
+ try:
+ shutil.copy2(newconf, archive + '.dist.new')
+ except(IOError, os.error) as why:
+ print(_('dispatch-conf: Error copying %(newconf)s to %(archive)s: %(reason)s; fatal') % \
+ {"newconf": newconf, "archive": archive + '.dist.new', "reason": str(why)}, file=sys.stderr)
+
+ ret = 0
+ if mrgconf != '' and os.path.exists(archive + '.dist'):
+ # This puts the results of the merge into mrgconf.
+ ret = os.system(DIFF3_MERGE % (curconf, archive + '.dist', newconf, mrgconf))
+ mystat = os.lstat(newconf)
+ os.chmod(mrgconf, mystat.st_mode)
+ os.chown(mrgconf, mystat.st_uid, mystat.st_gid)
+
+ return ret
def rcs_archive_post_process(archive):
- """Check in the archive file with the .dist.new suffix on the branch
- and remove the one with the .dist suffix."""
- os.rename(archive + '.dist.new', archive)
- if os.path.exists(archive + '.dist'):
- # Commit the last-distributed version onto the branch.
- os.system(RCS_LOCK + RCS_BRANCH + ' ' + archive)
- os.system(RCS_PUT + ' -r' + RCS_BRANCH + ' ' + archive)
- os.unlink(archive + '.dist')
- else:
- # Forcefully commit the last-distributed version onto the branch.
- os.system(RCS_PUT + ' -f -r' + RCS_BRANCH + ' ' + archive)
+ """Check in the archive file with the .dist.new suffix on the branch
+ and remove the one with the .dist suffix."""
+ os.rename(archive + '.dist.new', archive)
+ if os.path.exists(archive + '.dist'):
+ # Commit the last-distributed version onto the branch.
+ os.system(RCS_LOCK + RCS_BRANCH + ' ' + archive)
+ os.system(RCS_PUT + ' -r' + RCS_BRANCH + ' ' + archive)
+ os.unlink(archive + '.dist')
+ else:
+ # Forcefully commit the last-distributed version onto the branch.
+ os.system(RCS_PUT + ' -f -r' + RCS_BRANCH + ' ' + archive)
def file_archive_post_process(archive):
- """Rename the archive file with the .dist.new suffix to a .dist suffix"""
- os.rename(archive + '.dist.new', archive + '.dist')
+ """Rename the archive file with the .dist.new suffix to a .dist suffix"""
+ os.rename(archive + '.dist.new', archive + '.dist')
diff --git a/pym/portage/eapi.py b/pym/portage/eapi.py
index 8b03f830e..4f77910c5 100644
--- a/pym/portage/eapi.py
+++ b/pym/portage/eapi.py
@@ -3,14 +3,19 @@
import collections
+from portage import eapi_is_supported
+
def eapi_has_iuse_defaults(eapi):
return eapi != "0"
+def eapi_has_iuse_effective(eapi):
+ return eapi not in ("0", "1", "2", "3", "4", "4-python", "4-slot-abi")
+
def eapi_has_slot_deps(eapi):
return eapi != "0"
-def eapi_has_slot_abi(eapi):
- return eapi in ("4-slot-abi",)
+def eapi_has_slot_operator(eapi):
+ return eapi not in ("0", "1", "2", "3", "4", "4-python")
def eapi_has_src_uri_arrows(eapi):
return eapi not in ("0", "1")
@@ -39,8 +44,11 @@ def eapi_exports_merge_type(eapi):
def eapi_exports_replace_vars(eapi):
return eapi not in ("0", "1", "2", "3")
+def eapi_exports_EBUILD_PHASE_FUNC(eapi):
+ return eapi not in ("0", "1", "2", "3", "4", "4-python", "4-slot-abi")
+
def eapi_exports_REPOSITORY(eapi):
- return eapi in ("4-python",)
+ return eapi in ("4-python", "5-progress")
def eapi_has_pkg_pretend(eapi):
return eapi not in ("0", "1", "2", "3")
@@ -54,21 +62,44 @@ def eapi_has_dosed_dohard(eapi):
def eapi_has_required_use(eapi):
return eapi not in ("0", "1", "2", "3")
+def eapi_has_required_use_at_most_one_of(eapi):
+ return eapi not in ("0", "1", "2", "3", "4", "4-python", "4-slot-abi")
+
def eapi_has_use_dep_defaults(eapi):
return eapi not in ("0", "1", "2", "3")
def eapi_has_repo_deps(eapi):
- return eapi in ("4-python",)
+ return eapi in ("4-python", "5-progress")
def eapi_allows_dots_in_PN(eapi):
- return eapi in ("4-python",)
+ return eapi in ("4-python", "5-progress")
def eapi_allows_dots_in_use_flags(eapi):
- return eapi in ("4-python",)
+ return eapi in ("4-python", "5-progress")
+
+def eapi_supports_stable_use_forcing_and_masking(eapi):
+ return eapi not in ("0", "1", "2", "3", "4", "4-python", "4-slot-abi")
+
+def eapi_allows_directories_on_profile_level_and_repository_level(eapi):
+ return eapi in ("4-python", "5-progress")
+
+def eapi_has_use_aliases(eapi):
+ return eapi in ("4-python", "5-progress")
+
+def eapi_has_automatic_unpack_dependencies(eapi):
+ return eapi in ("5-progress",)
+
+def eapi_has_hdepend(eapi):
+ return eapi in ("5-hdepend",)
+
+def eapi_has_targetroot(eapi):
+ return eapi in ("5-hdepend",)
_eapi_attrs = collections.namedtuple('_eapi_attrs',
- 'dots_in_PN dots_in_use_flags iuse_defaults '
- 'repo_deps required_use slot_abi slot_deps '
+ 'dots_in_PN dots_in_use_flags exports_EBUILD_PHASE_FUNC '
+ 'feature_flag_test feature_flag_targetroot '
+ 'hdepend iuse_defaults iuse_effective '
+ 'repo_deps required_use required_use_at_most_one_of slot_operator slot_deps '
'src_uri_arrows strong_blocks use_deps use_dep_defaults')
_eapi_attrs_cache = {}
@@ -77,24 +108,37 @@ def _get_eapi_attrs(eapi):
"""
When eapi is None then validation is not as strict, since we want the
same to work for multiple EAPIs that may have slightly different rules.
+ An unsupported eapi is handled the same as when eapi is None, which may
+ be helpful for handling of corrupt EAPI metadata in essential functions
+ such as pkgsplit.
"""
eapi_attrs = _eapi_attrs_cache.get(eapi)
if eapi_attrs is not None:
return eapi_attrs
+ orig_eapi = eapi
+ if eapi is not None and not eapi_is_supported(eapi):
+ eapi = None
+
eapi_attrs = _eapi_attrs(
dots_in_PN = (eapi is None or eapi_allows_dots_in_PN(eapi)),
dots_in_use_flags = (eapi is None or eapi_allows_dots_in_use_flags(eapi)),
+ exports_EBUILD_PHASE_FUNC = (eapi is None or eapi_exports_EBUILD_PHASE_FUNC(eapi)),
+ feature_flag_test = True,
+ feature_flag_targetroot = (eapi is not None and eapi_has_targetroot(eapi)),
+ hdepend = (eapi is not None and eapi_has_hdepend(eapi)),
iuse_defaults = (eapi is None or eapi_has_iuse_defaults(eapi)),
+ iuse_effective = (eapi is not None and eapi_has_iuse_effective(eapi)),
repo_deps = (eapi is None or eapi_has_repo_deps(eapi)),
required_use = (eapi is None or eapi_has_required_use(eapi)),
+ required_use_at_most_one_of = (eapi is None or eapi_has_required_use_at_most_one_of(eapi)),
slot_deps = (eapi is None or eapi_has_slot_deps(eapi)),
- slot_abi = (eapi is None or eapi_has_slot_abi(eapi)),
+ slot_operator = (eapi is None or eapi_has_slot_operator(eapi)),
src_uri_arrows = (eapi is None or eapi_has_src_uri_arrows(eapi)),
strong_blocks = (eapi is None or eapi_has_strong_blocks(eapi)),
use_deps = (eapi is None or eapi_has_use_deps(eapi)),
use_dep_defaults = (eapi is None or eapi_has_use_dep_defaults(eapi))
)
- _eapi_attrs_cache[eapi] = eapi_attrs
+ _eapi_attrs_cache[orig_eapi] = eapi_attrs
return eapi_attrs
diff --git a/pym/portage/eclass_cache.py b/pym/portage/eclass_cache.py
index cb2cf8a98..2988d25d6 100644
--- a/pym/portage/eclass_cache.py
+++ b/pym/portage/eclass_cache.py
@@ -1,19 +1,24 @@
-# Copyright 2005-2011 Gentoo Foundation
+# Copyright 2005-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# Author(s): Nicholas Carpaski (carpaski@gentoo.org), Brian Harring (ferringb@gentoo.org)
+from __future__ import unicode_literals
+
__all__ = ["cache"]
import stat
import sys
import operator
+import warnings
from portage.util import normalize_path
import errno
from portage.exception import FileNotFound, PermissionDenied
from portage import os
from portage import checksum
+from portage import _shell_quote
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
long = int
@@ -56,17 +61,20 @@ class cache(object):
"""
Maintains the cache information about eclasses used in ebuild.
"""
- def __init__(self, porttree_root, overlays=[]):
+ def __init__(self, porttree_root, overlays=None):
+ if overlays is not None:
+ warnings.warn("overlays parameter of portage.eclass_cache.cache constructor is deprecated and no longer used",
+ DeprecationWarning, stacklevel=2)
self.eclasses = {} # {"Name": hashed_path}
self._eclass_locations = {}
+ self._eclass_locations_str = None
# screw with the porttree ordering, w/out having bash inherit match it, and I'll hurt you.
# ~harring
if porttree_root:
self.porttree_root = porttree_root
- self.porttrees = [self.porttree_root] + overlays
- self.porttrees = tuple(map(normalize_path, self.porttrees))
+ self.porttrees = (normalize_path(self.porttree_root),)
self._master_eclass_root = os.path.join(self.porttrees[0], "eclass")
self.update_eclasses()
else:
@@ -98,6 +106,7 @@ class cache(object):
self.porttrees = self.porttrees + other.porttrees
self.eclasses.update(other.eclasses)
self._eclass_locations.update(other._eclass_locations)
+ self._eclass_locations_str = None
def update_eclasses(self):
self.eclasses = {}
@@ -124,7 +133,7 @@ class cache(object):
mtime = obj.mtime
except FileNotFound:
continue
- ys=y[:-eclass_len]
+ ys = y[:-eclass_len]
if x == self._master_eclass_root:
master_eclasses[ys] = mtime
self.eclasses[ys] = obj
@@ -169,3 +178,10 @@ class cache(object):
ec_dict[x] = self.eclasses[x]
return ec_dict
+
+ @property
+ def eclass_locations_string(self):
+ if self._eclass_locations_str is None:
+ self._eclass_locations_str = " ".join(_shell_quote(x)
+ for x in reversed(self.porttrees))
+ return self._eclass_locations_str
diff --git a/pym/portage/elog/__init__.py b/pym/portage/elog/__init__.py
index 33dac178d..cc086123f 100644
--- a/pym/portage/elog/__init__.py
+++ b/pym/portage/elog/__init__.py
@@ -1,9 +1,10 @@
# elog/__init__.py - elog core functions
-# Copyright 2006-2011 Gentoo Foundation
+# Copyright 2006-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import sys
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
import portage
diff --git a/pym/portage/elog/mod_echo.py b/pym/portage/elog/mod_echo.py
index 59117beb3..f9cc53788 100644
--- a/pym/portage/elog/mod_echo.py
+++ b/pym/portage/elog/mod_echo.py
@@ -1,5 +1,5 @@
# elog/mod_echo.py - elog dispatch module
-# Copyright 2007 Gentoo Foundation
+# Copyright 2007-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -10,6 +10,7 @@ from portage.const import EBUILD_PHASES
from portage.localization import _
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
_items = []
diff --git a/pym/portage/elog/mod_save.py b/pym/portage/elog/mod_save.py
index c69f4a3cf..7b1cd46a8 100644
--- a/pym/portage/elog/mod_save.py
+++ b/pym/portage/elog/mod_save.py
@@ -1,7 +1,8 @@
# elog/mod_save.py - elog dispatch module
-# Copyright 2006-2011 Gentoo Foundation
+# Copyright 2006-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+import errno
import io
import time
import portage
@@ -47,11 +48,22 @@ def process(mysettings, key, logentries, fulltext):
elogfilename = os.path.join(log_subdir, cat + ':' + elogfilename)
_ensure_log_subdirs(logdir, log_subdir)
- elogfile = io.open(_unicode_encode(elogfilename,
- encoding=_encodings['fs'], errors='strict'),
- mode='w', encoding=_encodings['content'], errors='backslashreplace')
- elogfile.write(_unicode_decode(fulltext))
- elogfile.close()
+ try:
+ with io.open(_unicode_encode(elogfilename,
+ encoding=_encodings['fs'], errors='strict'), mode='w',
+ encoding=_encodings['content'],
+ errors='backslashreplace') as elogfile:
+ elogfile.write(_unicode_decode(fulltext))
+ except IOError as e:
+ func_call = "open('%s', 'w')" % elogfilename
+ if e.errno == errno.EACCES:
+ raise portage.exception.PermissionDenied(func_call)
+ elif e.errno == errno.EPERM:
+ raise portage.exception.OperationNotPermitted(func_call)
+ elif e.errno == errno.EROFS:
+ raise portage.exception.ReadOnlyFileSystem(func_call)
+ else:
+ raise
# Copy group permission bits from parent directory.
elogdir_st = os.stat(log_subdir)
diff --git a/pym/portage/elog/mod_save_summary.py b/pym/portage/elog/mod_save_summary.py
index 347f66e6e..786f89454 100644
--- a/pym/portage/elog/mod_save_summary.py
+++ b/pym/portage/elog/mod_save_summary.py
@@ -1,8 +1,12 @@
# elog/mod_save_summary.py - elog dispatch module
-# Copyright 2006-2011 Gentoo Foundation
+# Copyright 2006-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
+import errno
import io
+import sys
import time
import portage
from portage import os
@@ -37,9 +41,21 @@ def process(mysettings, key, logentries, fulltext):
# TODO: Locking
elogfilename = elogdir+"/summary.log"
- elogfile = io.open(_unicode_encode(elogfilename,
- encoding=_encodings['fs'], errors='strict'),
- mode='a', encoding=_encodings['content'], errors='backslashreplace')
+ try:
+ elogfile = io.open(_unicode_encode(elogfilename,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='a', encoding=_encodings['content'],
+ errors='backslashreplace')
+ except IOError as e:
+ func_call = "open('%s', 'a')" % elogfilename
+ if e.errno == errno.EACCES:
+ raise portage.exception.PermissionDenied(func_call)
+ elif e.errno == errno.EPERM:
+ raise portage.exception.OperationNotPermitted(func_call)
+ elif e.errno == errno.EROFS:
+ raise portage.exception.ReadOnlyFileSystem(func_call)
+ else:
+ raise
# Copy group permission bits from parent directory.
elogdir_st = os.stat(elogdir)
@@ -58,17 +74,19 @@ def process(mysettings, key, logentries, fulltext):
apply_permissions(elogfilename, uid=logfile_uid, gid=elogdir_gid,
mode=elogdir_grp_mode, mask=0)
- time_str = time.strftime("%Y-%m-%d %H:%M:%S %Z",
- time.localtime(time.time()))
- # Avoid potential UnicodeDecodeError later.
+ time_fmt = "%Y-%m-%d %H:%M:%S %Z"
+ if sys.hexversion < 0x3000000:
+ time_fmt = _unicode_encode(time_fmt)
+ time_str = time.strftime(time_fmt, time.localtime(time.time()))
+ # Avoid potential UnicodeDecodeError in Python 2, since strftime
+ # returns bytes in Python 2, and %Z may contain non-ascii chars.
time_str = _unicode_decode(time_str,
encoding=_encodings['content'], errors='replace')
- elogfile.write(_unicode_decode(
- _(">>> Messages generated by process " +
+ elogfile.write(_(">>> Messages generated by process "
"%(pid)d on %(time)s for package %(pkg)s:\n\n") %
- {"pid": os.getpid(), "time": time_str, "pkg": key}))
+ {"pid": os.getpid(), "time": time_str, "pkg": key})
elogfile.write(_unicode_decode(fulltext))
- elogfile.write(_unicode_decode("\n"))
+ elogfile.write("\n")
elogfile.close()
return elogfilename
diff --git a/pym/portage/elog/mod_syslog.py b/pym/portage/elog/mod_syslog.py
index c8bf44172..8b26ffa1e 100644
--- a/pym/portage/elog/mod_syslog.py
+++ b/pym/portage/elog/mod_syslog.py
@@ -1,5 +1,5 @@
# elog/mod_syslog.py - elog dispatch module
-# Copyright 2006-2011 Gentoo Foundation
+# Copyright 2006-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import sys
@@ -8,12 +8,13 @@ from portage.const import EBUILD_PHASES
from portage import _encodings
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
_pri = {
- "INFO" : syslog.LOG_INFO,
- "WARN" : syslog.LOG_WARNING,
- "ERROR" : syslog.LOG_ERR,
+ "INFO" : syslog.LOG_INFO,
+ "WARN" : syslog.LOG_WARNING,
+ "ERROR" : syslog.LOG_ERR,
"LOG" : syslog.LOG_NOTICE,
"QA" : syslog.LOG_WARNING
}
@@ -23,14 +24,14 @@ def process(mysettings, key, logentries, fulltext):
for phase in EBUILD_PHASES:
if not phase in logentries:
continue
- for msgtype,msgcontent in logentries[phase]:
+ for msgtype, msgcontent in logentries[phase]:
if isinstance(msgcontent, basestring):
msgcontent = [msgcontent]
for line in msgcontent:
line = "%s: %s: %s" % (key, phase, line)
if sys.hexversion < 0x3000000 and not isinstance(line, bytes):
# Avoid TypeError from syslog.syslog()
- line = line.encode(_encodings['content'],
+ line = line.encode(_encodings['content'],
'backslashreplace')
syslog.syslog(_pri[msgtype], line.rstrip("\n"))
syslog.closelog()
diff --git a/pym/portage/emaint/__init__.py b/pym/portage/emaint/__init__.py
index 5e0ae700a..48bc6e2ae 100644
--- a/pym/portage/emaint/__init__.py
+++ b/pym/portage/emaint/__init__.py
@@ -1,7 +1,5 @@
# Copyright 2005-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-"""'The emaint program provides checks and maintenance
-on a gentoo system.
+"""System health checks and maintenance utilities.
"""
-
diff --git a/pym/portage/emaint/defaults.py b/pym/portage/emaint/defaults.py
index d9d83ffbb..30f36af50 100644
--- a/pym/portage/emaint/defaults.py
+++ b/pym/portage/emaint/defaults.py
@@ -1,18 +1,25 @@
-# Copyright 2005-2012 Gentoo Foundation
+# Copyright 2005-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# parser option data
CHECK = {"short": "-c", "long": "--check",
"help": "Check for problems (a default option for most modules)",
'status': "Checking %s for problems",
+ 'action': 'store_true',
'func': 'check'
}
FIX = {"short": "-f", "long": "--fix",
"help": "Attempt to fix problems (a default option for most modules)",
'status': "Attempting to fix %s",
+ 'action': 'store_true',
'func': 'fix'
}
+VERSION = {"long": "--version",
+ "help": "show program's version number and exit",
+ 'action': 'store_true',
+ }
+
# parser options
-DEFAULT_OPTIONS = {'check': CHECK, 'fix': FIX}
+DEFAULT_OPTIONS = {'check': CHECK, 'fix': FIX, 'version': VERSION}
diff --git a/pym/portage/emaint/main.py b/pym/portage/emaint/main.py
index dbc5f18cc..6a17027b5 100644
--- a/pym/portage/emaint/main.py
+++ b/pym/portage/emaint/main.py
@@ -1,4 +1,4 @@
-# Copyright 2005-2012 Gentoo Foundation
+# Copyright 2005-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -6,61 +6,59 @@ from __future__ import print_function
import sys
import textwrap
-from optparse import OptionParser, OptionValueError
-
import portage
from portage import os
from portage.emaint.module import Modules
from portage.emaint.progress import ProgressBar
from portage.emaint.defaults import DEFAULT_OPTIONS
+from portage.util._argparse import ArgumentParser
class OptionItem(object):
- """class to hold module OptionParser options data
+ """class to hold module ArgumentParser options data
"""
- def __init__(self, opt, parser):
+ def __init__(self, opt):
"""
@type opt: dictionary
@param opt: options parser options
"""
- self.parser = parser
- self.short = opt['short']
- self.long = opt['long']
- self.help = opt['help']
- self.status = opt['status']
- self.func = opt['func']
- self.action = opt.get('action', "callback")
- self.type = opt.get('type', None)
- self.dest = opt.get('dest', None)
- self.callback = opt.get('callback', self._exclusive)
- self.callback_kwargs = opt.get('callback_kwargs', {"var":"action"})
-
-
- def _exclusive(self, option, *args, **kw):
- """Generic check for the 2 default options
- """
- var = kw.get("var", None)
- if var is None:
- raise ValueError("var not specified to exclusive()")
- if getattr(self.parser, var, ""):
- raise OptionValueError("%s and %s are exclusive options"
- % (getattr(self.parser, var), option))
- setattr(self.parser, var, str(option))
-
- def check_action(self, action):
- """Checks if 'action' is the same as this option
-
- @type action: string
- @param action: the action to compare
- @rtype: boolean
- """
- if action == self.action:
- return True
- elif action == '/'.join([self.short, self.long]):
- return True
- return False
-
+ self.short = opt.get('short')
+ self.long = opt.get('long')
+ # '-' are not allowed in python identifiers
+ # so store the sanitized target variable name
+ self.target = self.long[2:].replace('-','_')
+ self.help = opt.get('help')
+ self.status = opt.get('status')
+ self.func = opt.get('func')
+ self.action = opt.get('action')
+ self.type = opt.get('type')
+ self.dest = opt.get('dest')
+
+ @property
+ def pargs(self):
+ pargs = []
+ if self.short is not None:
+ pargs.append(self.short)
+ if self.long is not None:
+ pargs.append(self.long)
+ return pargs
+
+ @property
+ def kwargs(self):
+ # Support for keyword arguments varies depending on the action,
+ # so only pass in the keywords that are needed, in order
+ # to avoid a TypeError.
+ kwargs = {}
+ if self.help is not None:
+ kwargs['help'] = self.help
+ if self.action is not None:
+ kwargs['action'] = self.action
+ if self.type is not None:
+ kwargs['type'] = self.type
+ if self.dest is not None:
+ kwargs['dest'] = self.dest
+ return kwargs
def usage(module_controller):
_usage = "usage: emaint [options] COMMAND"
@@ -91,15 +89,14 @@ def module_opts(module_controller, module):
opts = DEFAULT_OPTIONS
for opt in sorted(opts):
optd = opts[opt]
- opto = " %s, %s" %(optd['short'], optd['long'])
- _usage += '%s %s\n' % (opto.ljust(15),optd['help'])
+ opto = " %s, %s" % (optd['short'], optd['long'])
+ _usage += '%s %s\n' % (opto.ljust(15), optd['help'])
_usage += '\n'
return _usage
class TaskHandler(object):
- """Handles the running of the tasks it is given
- """
+ """Handles the running of the tasks it is given"""
def __init__(self, show_progress_bar=True, verbose=True, callback=None):
self.show_progress_bar = show_progress_bar
@@ -108,14 +105,13 @@ class TaskHandler(object):
self.isatty = os.environ.get('TERM') != 'dumb' and sys.stdout.isatty()
self.progress_bar = ProgressBar(self.isatty, title="Emaint", max_desc_length=27)
-
def run_tasks(self, tasks, func, status=None, verbose=True, options=None):
"""Runs the module tasks"""
if tasks is None or func is None:
return
for task in tasks:
inst = task()
- show_progress = self.show_progress_bar
+ show_progress = self.show_progress_bar and self.isatty
# check if the function is capable of progressbar
# and possibly override it off
if show_progress and hasattr(inst, 'can_progressbar'):
@@ -133,7 +129,7 @@ class TaskHandler(object):
'options': options.copy()
}
result = getattr(inst, func)(**kwargs)
- if self.isatty and show_progress:
+ if show_progress:
# make sure the final progress is displayed
self.progress_bar.display()
print()
@@ -160,59 +156,68 @@ def emaint_main(myargv):
module_names.insert(0, "all")
- parser = OptionParser(usage=usage(module_controller), version=portage.VERSION)
+ parser = ArgumentParser(usage=usage(module_controller))
# add default options
parser_options = []
for opt in DEFAULT_OPTIONS:
- parser_options.append(OptionItem(DEFAULT_OPTIONS[opt], parser))
+ parser_options.append(OptionItem(DEFAULT_OPTIONS[opt]))
for mod in module_names[1:]:
desc = module_controller.get_func_descriptions(mod)
if desc:
for opt in desc:
- parser_options.append(OptionItem(desc[opt], parser))
+ parser_options.append(OptionItem(desc[opt]))
for opt in parser_options:
- parser.add_option(opt.short, opt.long, help=opt.help, action=opt.action,
- type=opt.type, dest=opt.dest,
- callback=opt.callback, callback_kwargs=opt.callback_kwargs)
+ parser.add_argument(*opt.pargs, **opt.kwargs)
- parser.action = None
+ options, args = parser.parse_known_args(args=myargv)
+
+ if options.version:
+ print(portage.VERSION)
+ return os.EX_OK
- (options, args) = parser.parse_args(args=myargv)
- #print('options', options, '\nargs', args, '\naction', parser.action)
if len(args) != 1:
parser.error("Incorrect number of arguments")
if args[0] not in module_names:
parser.error("%s target is not a known target" % args[0])
- if parser.action:
- action = parser.action
- else:
- action = "-c/--check"
- long_action = action.split('/')[1].lstrip('-')
- #print("DEBUG: action = ", action, long_action)
+ check_opt = None
+ func = status = long_action = None
+ for opt in parser_options:
+ if opt.long == '--check':
+ # Default action
+ check_opt = opt
+ if opt.status and getattr(options, opt.target, False):
+ if long_action is not None:
+ parser.error("--%s and %s are exclusive options" %
+ (long_action, opt.long))
+ status = opt.status
+ func = opt.func
+ long_action = opt.long.lstrip('-')
+
+ if long_action is None:
+ #print("DEBUG: long_action is None: setting to 'check'")
+ long_action = 'check'
+ func = check_opt.func
+ status = check_opt.status
if args[0] == "all":
tasks = []
for m in module_names[1:]:
- #print("DEBUG: module: %s, functions: " %(m, str(module_controller.get_functions(m))))
- if long_action in module_controller.get_functions(m):
+ #print("DEBUG: module: %s, functions: " % (m, str(module_controller.get_functions(m))))
+ if func in module_controller.get_functions(m):
tasks.append(module_controller.get_class(m))
- elif long_action in module_controller.get_functions(args[0]):
+ elif func in module_controller.get_functions(args[0]):
tasks = [module_controller.get_class(args[0] )]
else:
- print("\nERROR: module '%s' does not have option '%s'\n" %(args[0], action))
- print(module_opts(module_controller, args[0]))
+ portage.util.writemsg(
+ "\nERROR: module '%s' does not have option '--%s'\n\n" %
+ (args[0], long_action), noiselevel=-1)
+ portage.util.writemsg(module_opts(module_controller, args[0]),
+ noiselevel=-1)
sys.exit(1)
- func = status = None
- for opt in parser_options:
- if opt.check_action(action):
- status = opt.status
- func = opt.func
- break
# need to pass the parser options dict to the modules
# so they are available if needed.
task_opts = options.__dict__
taskmaster = TaskHandler(callback=print_results)
taskmaster.run_tasks(tasks, func, status, options=task_opts)
-
diff --git a/pym/portage/emaint/module.py b/pym/portage/emaint/module.py
index 64b0c64b5..bf7d25fc5 100644
--- a/pym/portage/emaint/module.py
+++ b/pym/portage/emaint/module.py
@@ -1,4 +1,4 @@
-# Copyright 2005-2012 Gentoo Foundation
+# Copyright 2005-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
@@ -37,10 +37,10 @@ class Module(object):
self.valid = False
try:
mod_name = ".".join([self._namepath, self.name])
- self._module = __import__(mod_name, [],[], ["not empty"])
+ self._module = __import__(mod_name, [], [], ["not empty"])
self.valid = True
except ImportError as e:
- print("MODULE; failed import", mod_name, " error was:",e)
+ print("MODULE; failed import", mod_name, " error was:", e)
return False
self.module_spec = self._module.module_spec
for submodule in self.module_spec['provides']:
@@ -61,7 +61,7 @@ class Module(object):
module = kid['instance']
else:
try:
- module = __import__(kid['module_name'], [],[], ["not empty"])
+ module = __import__(kid['module_name'], [], [], ["not empty"])
kid['instance'] = module
kid['is_imported'] = True
except ImportError:
diff --git a/pym/portage/emaint/modules/__init__.py b/pym/portage/emaint/modules/__init__.py
index 35674e342..f67197d9f 100644
--- a/pym/portage/emaint/modules/__init__.py
+++ b/pym/portage/emaint/modules/__init__.py
@@ -1,7 +1,5 @@
# Copyright 2005-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-"""'The emaint program plug-in module provides an automatic method
-of adding/removing modules to perform checks and maintenance
-on a gentoo system.
+"""Plug-in modules for system health checks and maintenance.
"""
diff --git a/pym/portage/emaint/modules/binhost/__init__.py b/pym/portage/emaint/modules/binhost/__init__.py
index 1a61af42b..c60e8bcb4 100644
--- a/pym/portage/emaint/modules/binhost/__init__.py
+++ b/pym/portage/emaint/modules/binhost/__init__.py
@@ -1,20 +1,18 @@
# Copyright 2005-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-"""'The emaint program module provides checks and maintenancefor:
- Scanning, checking and fixing problems in the world file.
+"""Scan and generate metadata indexes for binary packages.
"""
module_spec = {
'name': 'binhost',
- 'description': "Provides functions to scan, check and " + \
- "Generate a metadata index for binary packages",
+ 'description': __doc__,
'provides':{
'module1': {
'name': "binhost",
'class': "BinhostHandler",
- 'description': "Generate a metadata index for binary packages",
+ 'description': __doc__,
'functions': ['check', 'fix'],
'func_desc': {}
}
diff --git a/pym/portage/emaint/modules/binhost/binhost.py b/pym/portage/emaint/modules/binhost/binhost.py
index b540d7686..1138a8c7e 100644
--- a/pym/portage/emaint/modules/binhost/binhost.py
+++ b/pym/portage/emaint/modules/binhost/binhost.py
@@ -1,4 +1,4 @@
-# Copyright 2005-2012 Gentoo Foundation
+# Copyright 2005-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import errno
@@ -9,7 +9,9 @@ from portage import os
from portage.util import writemsg
import sys
+
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
long = int
class BinhostHandler(object):
@@ -151,12 +153,8 @@ class BinhostHandler(object):
del pkgindex.packages[:]
pkgindex.packages.extend(metadata.values())
- from portage.util import atomic_ofstream
- f = atomic_ofstream(self._pkgindex_file)
- try:
- self._pkgindex.write(f)
- finally:
- f.close()
+ bintree._pkgindex_write(self._pkgindex)
+
finally:
locks.unlockfile(pkgindex_lock)
diff --git a/pym/portage/emaint/modules/config/__init__.py b/pym/portage/emaint/modules/config/__init__.py
index 22abb07b1..f0585b39a 100644
--- a/pym/portage/emaint/modules/config/__init__.py
+++ b/pym/portage/emaint/modules/config/__init__.py
@@ -1,20 +1,18 @@
# Copyright 2005-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-"""'This emaint module provides checks and maintenance for:
-Cleaning the emerge config tracker list
+"""Check and clean the config tracker list for uninstalled packages.
"""
module_spec = {
'name': 'config',
- 'description': "Provides functions to scan, check for and fix no " +\
- "longer installed config files in emerge's tracker file",
+ 'description': __doc__,
'provides':{
'module1': {
'name': "cleanconfmem",
'class': "CleanConfig",
- 'description': "Discard no longer installed config tracker entries",
+ 'description': __doc__,
'functions': ['check', 'fix'],
'func_desc': {}
}
diff --git a/pym/portage/emaint/modules/config/config.py b/pym/portage/emaint/modules/config/config.py
index a80d87d29..dad024b21 100644
--- a/pym/portage/emaint/modules/config/config.py
+++ b/pym/portage/emaint/modules/config/config.py
@@ -4,14 +4,14 @@
import portage
from portage import os
from portage.const import PRIVATE_PATH
-from portage.checksum import perform_md5
-
+from portage.util import grabdict, writedict
class CleanConfig(object):
short_desc = "Discard any no longer installed configs from emerge's tracker list"
def __init__(self):
+ self._root = portage.settings["ROOT"]
self.target = os.path.join(portage.settings["EROOT"], PRIVATE_PATH, 'config')
def name():
@@ -19,70 +19,55 @@ class CleanConfig(object):
name = staticmethod(name)
def load_configlist(self):
-
- configs = {}
- with open(self.target, 'r') as configfile:
- lines = configfile.readlines()
- for line in lines:
- ls = line.split()
- configs[ls[0]] = ls[1]
- return configs
+ return grabdict(self.target)
def check(self, **kwargs):
onProgress = kwargs.get('onProgress', None)
configs = self.load_configlist()
messages = []
- chksums = []
maxval = len(configs)
if onProgress:
onProgress(maxval, 0)
i = 0
keys = sorted(configs)
for config in keys:
- if os.path.exists(config):
- md5sumactual = perform_md5(config)
- if md5sumactual != configs[config]:
- chksums.append(" %s" % config)
- else:
+ if not os.path.exists(config):
messages.append(" %s" % config)
if onProgress:
onProgress(maxval, i+1)
i += 1
- return self._format_output(messages, chksums)
+ return self._format_output(messages)
def fix(self, **kwargs):
onProgress = kwargs.get('onProgress', None)
configs = self.load_configlist()
messages = []
- chksums = []
maxval = len(configs)
if onProgress:
onProgress(maxval, 0)
i = 0
- keys = sorted(configs)
- for config in keys:
- if os.path.exists(config):
- md5sumactual = perform_md5(config)
- if md5sumactual != configs[config]:
- chksums.append(" %s" % config)
- configs.pop(config)
+
+ root = self._root
+ if root == "/":
+ root = None
+ modified = False
+ for config in sorted(configs):
+ if root is None:
+ full_path = config
else:
- configs.pop(config)
- messages.append(" %s" % config)
+ full_path = os.path.join(root, config.lstrip(os.sep))
+ if not os.path.exists(full_path):
+ modified = True
+ configs.pop(config)
+ messages.append(" %s" % config)
if onProgress:
onProgress(maxval, i+1)
i += 1
- lines = []
- keys = sorted(configs)
- for key in keys:
- line = ' '.join([key, configs[key]])
- lines.append(line)
- lines.append('')
- with open(self.target, 'w') as configfile:
- configfile.write('\n'.join(lines))
- return self._format_output(messages, chksums, True)
+ if modified:
+ writedict(configs, self.target)
+ return self._format_output(messages, True)
- def _format_output(self, messages=[], chksums=[], cleaned=False):
+ def _format_output(self, messages=[], cleaned=False):
output = []
if messages:
output.append('Not Installed:')
@@ -91,11 +76,4 @@ class CleanConfig(object):
if cleaned:
tot += ' ...Cleaned'
output.append(tot % len(messages))
- if chksums:
- output.append('\nChecksums did not match:')
- output += chksums
- tot = '------------------------------------\n Total %i Checksums did not match'
- if cleaned:
- tot += ' ...Cleaned'
- output.append(tot % len(chksums))
return output
diff --git a/pym/portage/emaint/modules/logs/__init__.py b/pym/portage/emaint/modules/logs/__init__.py
index 005b608a6..0407efe2b 100644
--- a/pym/portage/emaint/modules/logs/__init__.py
+++ b/pym/portage/emaint/modules/logs/__init__.py
@@ -1,38 +1,34 @@
-# Copyright 2005-2012 Gentoo Foundation
+# Copyright 2005-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-"""'This emaint module provides checks and maintenance for:
-Cleaning the PORT_LOGDIR logs
+"""Check and clean old logs in the PORT_LOGDIR.
"""
module_spec = {
'name': 'logs',
- 'description': "Provides functions to scan, check and clean old logs " +\
- "in the PORT_LOGDIR",
+ 'description': __doc__,
'provides':{
'module1': {
'name': "logs",
'class': "CleanLogs",
- 'description': "Clean out old logs from the PORT_LOGDIR",
+ 'description': __doc__,
'functions': ['check','clean'],
'func_desc': {
'clean': {
"short": "-C", "long": "--clean",
"help": "Cleans out logs more than 7 days old (cleanlogs only)" + \
- " modulke-options: -t, -p",
+ " module-options: -t, -p",
'status': "Cleaning %s",
- 'func': 'clean'
+ 'action': 'store_true',
+ 'func': 'clean',
},
'time': {
"short": "-t", "long": "--time",
"help": "(cleanlogs only): -t, --time Delete logs older than NUM of days",
'status': "",
- 'action': 'store',
- 'type': 'int',
+ 'type': int,
'dest': 'NUM',
- 'callback': None,
- 'callback_kwargs': None,
'func': 'clean'
},
'pretend': {
@@ -41,8 +37,6 @@ module_spec = {
'status': "",
'action': 'store_true',
'dest': 'pretend',
- 'callback': None,
- 'callback_kwargs': None,
'func': 'clean'
}
}
diff --git a/pym/portage/emaint/modules/logs/logs.py b/pym/portage/emaint/modules/logs/logs.py
index 32c8508f7..fe65cf587 100644
--- a/pym/portage/emaint/modules/logs/logs.py
+++ b/pym/portage/emaint/modules/logs/logs.py
@@ -39,11 +39,10 @@ class CleanLogs(object):
options: dict:
'NUM': int: number of days
'pretend': boolean
- 'eerror': defaults to None, optional output module to output errors.
- 'einfo': defaults to None, optional output module to output info msgs.
"""
messages = []
num_of_days = None
+ pretend = False
if kwargs:
# convuluted, I know, but portage.settings does not exist in
# kwargs.get() when called from _emerge.main.clean_logs()
@@ -54,8 +53,6 @@ class CleanLogs(object):
if options:
num_of_days = options.get('NUM', None)
pretend = options.get('pretend', False)
- eerror = options.get('eerror', None)
- einfo = options.get('einfo', None)
clean_cmd = settings.get("PORT_LOGDIR_CLEAN")
if clean_cmd:
@@ -75,7 +72,7 @@ class CleanLogs(object):
if not clean_cmd:
return []
rval = self._clean_logs(clean_cmd, settings)
- messages += self._convert_errors(rval, eerror, einfo)
+ messages += self._convert_errors(rval)
return messages
@@ -96,19 +93,11 @@ class CleanLogs(object):
@staticmethod
- def _convert_errors(rval, eerror=None, einfo=None):
+ def _convert_errors(rval):
msg = []
if rval != os.EX_OK:
msg.append("PORT_LOGDIR_CLEAN command returned %s"
% ("%d" % rval if rval else "None"))
msg.append("See the make.conf(5) man page for "
"PORT_LOGDIR_CLEAN usage instructions.")
- if eerror:
- for m in msg:
- eerror(m)
- else:
- msg.append("PORT_LOGDIR_CLEAN command succeeded")
- if einfo:
- for m in msg:
- einfo(m)
return msg
diff --git a/pym/portage/emaint/modules/move/__init__.py b/pym/portage/emaint/modules/move/__init__.py
index 5399440ce..d31d7b346 100644
--- a/pym/portage/emaint/modules/move/__init__.py
+++ b/pym/portage/emaint/modules/move/__init__.py
@@ -1,21 +1,18 @@
# Copyright 2005-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-"""'This emaint module provides checks and maintenance for:
- 1) "Performing package move updates for installed packages",
- 2)"Perform package move updates for binary packages"
+"""Perform package move updates for installed and binary packages.
"""
module_spec = {
'name': 'move',
- 'description': "Provides functions to check for and move packages " +\
- "either installed or binary packages stored on this system",
+ 'description': __doc__,
'provides':{
'module1': {
'name': "moveinst",
'class': "MoveInstalled",
- 'description': "Perform package move updates for installed packages",
+ 'description': __doc__,
'options': ['check', 'fix'],
'functions': ['check', 'fix'],
'func_desc': {
diff --git a/pym/portage/emaint/modules/move/move.py b/pym/portage/emaint/modules/move/move.py
index 018e6cac1..ef674d47a 100644
--- a/pym/portage/emaint/modules/move/move.py
+++ b/pym/portage/emaint/modules/move/move.py
@@ -3,14 +3,16 @@
import portage
from portage import os
-
+from portage.exception import InvalidData
+from _emerge.Package import Package
+from portage.versions import _pkg_str
class MoveHandler(object):
def __init__(self, tree, porttree):
self._tree = tree
self._portdb = porttree.dbapi
- self._update_keys = ["DEPEND", "RDEPEND", "PDEPEND", "PROVIDE"]
+ self._update_keys = Package._dep_keys + ("PROVIDE",)
self._master_repo = \
self._portdb.getRepositoryName(self._portdb.porttree_root)
@@ -48,6 +50,8 @@ class MoveHandler(object):
# progress bar is updated in indeterminate mode.
match = self._tree.dbapi.match
aux_get = self._tree.dbapi.aux_get
+ pkg_str = self._tree.dbapi._pkg_str
+ settings = self._tree.dbapi.settings
if onProgress:
onProgress(0, 0)
for repo, updates in allupdates.items():
@@ -65,13 +69,21 @@ class MoveHandler(object):
if update_cmd[0] == "move":
origcp, newcp = update_cmd[1:]
for cpv in match(origcp):
- if repo_match(aux_get(cpv, ["repository"])[0]):
+ try:
+ cpv = pkg_str(cpv, origcp.repo)
+ except (KeyError, InvalidData):
+ continue
+ if repo_match(cpv.repo):
errors.append("'%s' moved to '%s'" % (cpv, newcp))
elif update_cmd[0] == "slotmove":
pkg, origslot, newslot = update_cmd[1:]
- for cpv in match(pkg):
- slot, prepo = aux_get(cpv, ["SLOT", "repository"])
- if slot == origslot and repo_match(prepo):
+ atom = pkg.with_slot(origslot)
+ for cpv in match(atom):
+ try:
+ cpv = pkg_str(cpv, atom.repo)
+ except (KeyError, InvalidData):
+ continue
+ if repo_match(cpv.repo):
errors.append("'%s' slot moved from '%s' to '%s'" % \
(cpv, origslot, newslot))
if onProgress:
@@ -82,15 +94,21 @@ class MoveHandler(object):
cpv_all = self._tree.dbapi.cpv_all()
cpv_all.sort()
maxval = len(cpv_all)
- meta_keys = self._update_keys + ['repository', 'EAPI']
+ meta_keys = self._update_keys + self._portdb._pkg_str_aux_keys
if onProgress:
onProgress(maxval, 0)
for i, cpv in enumerate(cpv_all):
- metadata = dict(zip(meta_keys, aux_get(cpv, meta_keys)))
- eapi = metadata.pop('EAPI')
- repository = metadata.pop('repository')
try:
- updates = allupdates[repository]
+ metadata = dict(zip(meta_keys, aux_get(cpv, meta_keys)))
+ except KeyError:
+ continue
+ try:
+ pkg = _pkg_str(cpv, metadata=metadata, settings=settings)
+ except InvalidData:
+ continue
+ metadata = dict((k, metadata[k]) for k in self._update_keys)
+ try:
+ updates = allupdates[pkg.repo]
except KeyError:
try:
updates = allupdates['DEFAULT']
@@ -99,7 +117,7 @@ class MoveHandler(object):
if not updates:
continue
metadata_updates = \
- portage.update_dbentries(updates, metadata, eapi=eapi)
+ portage.update_dbentries(updates, metadata, parent=pkg)
if metadata_updates:
errors.append("'%s' has outdated metadata" % cpv)
if onProgress:
diff --git a/pym/portage/emaint/modules/resume/__init__.py b/pym/portage/emaint/modules/resume/__init__.py
index 60cffe9db..965e8f945 100644
--- a/pym/portage/emaint/modules/resume/__init__.py
+++ b/pym/portage/emaint/modules/resume/__init__.py
@@ -1,15 +1,13 @@
# Copyright 2005-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-"""'This emaint module provides checks and maintenance for:
-Cleaning the "emerge --resume" lists
+"""Check and fix problems in the resume and/or resume_backup files.
"""
module_spec = {
'name': 'resume',
- 'description': "Provides functions to scan, check and fix problems " +\
- "in the resume and/or resume_backup files",
+ 'description': __doc__,
'provides':{
'module1': {
'name': "cleanresume",
diff --git a/pym/portage/emaint/modules/world/__init__.py b/pym/portage/emaint/modules/world/__init__.py
index 103b5c5ba..3f62270ee 100644
--- a/pym/portage/emaint/modules/world/__init__.py
+++ b/pym/portage/emaint/modules/world/__init__.py
@@ -1,20 +1,18 @@
# Copyright 2005-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-"""'This emaint module provides checks and maintenance for:
-Fixing problems with the "world" file.
+"""Check and fix problems in the world file.
"""
module_spec = {
'name': 'world',
- 'description': "Provides functions to scan, " +
- "check and fix problems in the world file",
+ 'description': __doc__,
'provides':{
'module1':{
'name': "world",
'class': "WorldHandler",
- 'description': "Fix problems in the world file",
+ 'description': __doc__,
'functions': ['check', 'fix'],
'func_desc': {}
}
diff --git a/pym/portage/env/loaders.py b/pym/portage/env/loaders.py
index 372bc12fa..f86988471 100644
--- a/pym/portage/env/loaders.py
+++ b/pym/portage/env/loaders.py
@@ -1,10 +1,14 @@
# config.py -- Portage Config
-# Copyright 2007-2011 Gentoo Foundation
+# Copyright 2007-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import errno
import io
import stat
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.util:writemsg',
+)
from portage import os
from portage import _encodings
from portage import _unicode_decode
@@ -149,17 +153,21 @@ class FileLoader(DataLoader):
func = self.lineParser
for fn in RecursiveFileLoader(self.fname):
try:
- f = io.open(_unicode_encode(fn,
+ with io.open(_unicode_encode(fn,
encoding=_encodings['fs'], errors='strict'), mode='r',
- encoding=_encodings['content'], errors='replace')
+ encoding=_encodings['content'], errors='replace') as f:
+ lines = f.readlines()
except EnvironmentError as e:
- if e.errno not in (errno.ENOENT, errno.ESTALE):
+ if e.errno == errno.EACCES:
+ writemsg(_("Permission denied: '%s'\n") % fn, noiselevel=-1)
+ del e
+ elif e.errno in (errno.ENOENT, errno.ESTALE):
+ del e
+ else:
raise
- del e
- continue
- for line_num, line in enumerate(f):
- func(line, line_num, data, errors)
- f.close()
+ else:
+ for line_num, line in enumerate(lines):
+ func(line, line_num, data, errors)
return (data, errors)
def lineParser(self, line, line_num, data, errors):
diff --git a/pym/portage/exception.py b/pym/portage/exception.py
index 5ccd750ab..6fa5447a7 100644
--- a/pym/portage/exception.py
+++ b/pym/portage/exception.py
@@ -1,4 +1,4 @@
-# Copyright 1998-2011 Gentoo Foundation
+# Copyright 1998-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import signal
@@ -7,30 +7,40 @@ from portage import _encodings, _unicode_encode, _unicode_decode
from portage.localization import _
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
class PortageException(Exception):
"""General superclass for portage exceptions"""
- def __init__(self,value):
- self.value = value[:]
- if isinstance(self.value, basestring):
- self.value = _unicode_decode(self.value,
- encoding=_encodings['content'], errors='replace')
+ if sys.hexversion >= 0x3000000:
+ def __init__(self, value):
+ self.value = value[:]
- def __str__(self):
- if isinstance(self.value, basestring):
- return self.value
- else:
- return _unicode_decode(repr(self.value),
- encoding=_encodings['content'], errors='replace')
-
- if sys.hexversion < 0x3000000:
-
- __unicode__ = __str__
+ def __str__(self):
+ if isinstance(self.value, str):
+ return self.value
+ else:
+ return repr(self.value)
+ else:
+ def __init__(self, value):
+ self.value = value[:]
+ if isinstance(self.value, basestring):
+ self.value = _unicode_decode(self.value,
+ encoding=_encodings['content'], errors='replace')
+
+ def __unicode__(self):
+ if isinstance(self.value, unicode):
+ return self.value
+ else:
+ return _unicode_decode(repr(self.value),
+ encoding=_encodings['content'], errors='replace')
def __str__(self):
- return _unicode_encode(self.__unicode__(),
- encoding=_encodings['content'], errors='backslashreplace')
+ if isinstance(self.value, unicode):
+ return _unicode_encode(self.value,
+ encoding=_encodings['content'], errors='backslashreplace')
+ else:
+ return repr(self.value)
class CorruptionError(PortageException):
"""Corruption indication"""
@@ -75,20 +85,20 @@ class DirectoryNotFound(InvalidLocation):
"""A directory was not found when it was expected to exist"""
class OperationNotPermitted(PortageException):
- from errno import EPERM as errno
"""An operation was not permitted operating system"""
+ from errno import EPERM as errno
class OperationNotSupported(PortageException):
- from errno import EOPNOTSUPP as errno
"""Operation not supported"""
+ from errno import EOPNOTSUPP as errno
class PermissionDenied(PortageException):
- from errno import EACCES as errno
"""Permission denied"""
+ from errno import EACCES as errno
class TryAgain(PortageException):
- from errno import EAGAIN as errno
"""Try again"""
+ from errno import EAGAIN as errno
class TimeoutException(PortageException):
"""Operation timed out"""
diff --git a/pym/portage/getbinpkg.py b/pym/portage/getbinpkg.py
index 212f78889..14dc149b1 100644
--- a/pym/portage/getbinpkg.py
+++ b/pym/portage/getbinpkg.py
@@ -1,7 +1,9 @@
# getbinpkg.py -- Portage binary-package helper functions
-# Copyright 2003-2012 Gentoo Foundation
+# Copyright 2003-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
from portage.output import colorize
from portage.cache.mappings import slot_dict_class
from portage.localization import _
@@ -18,6 +20,7 @@ import socket
import time
import tempfile
import base64
+import warnings
_all_errors = [NotImplementedError, ValueError, socket.error]
@@ -39,7 +42,7 @@ except ImportError:
try:
import ftplib
except ImportError as e:
- sys.stderr.write(colorize("BAD","!!! CANNOT IMPORT FTPLIB: ")+str(e)+"\n")
+ sys.stderr.write(colorize("BAD", "!!! CANNOT IMPORT FTPLIB: ") + str(e) + "\n")
else:
_all_errors.extend(ftplib.all_errors)
@@ -55,24 +58,28 @@ try:
from httplib import ResponseNotReady as http_client_ResponseNotReady
from httplib import error as http_client_error
except ImportError as e:
- sys.stderr.write(colorize("BAD","!!! CANNOT IMPORT HTTP.CLIENT: ")+str(e)+"\n")
+ sys.stderr.write(colorize("BAD", "!!! CANNOT IMPORT HTTP.CLIENT: ") + str(e) + "\n")
else:
_all_errors.append(http_client_error)
_all_errors = tuple(_all_errors)
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
long = int
def make_metadata_dict(data):
- myid,myglob = data
+
+ warnings.warn("portage.getbinpkg.make_metadata_dict() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
+ myid, _myglob = data
mydict = {}
for k_bytes in portage.xpak.getindex_mem(myid):
k = _unicode_decode(k_bytes,
encoding=_encodings['repo.content'], errors='replace')
- if k not in _all_metadata_keys and \
- k != "CATEGORY":
+ if k not in _all_metadata_keys and k != "CATEGORY":
continue
v = _unicode_decode(portage.xpak.getitem(data, k_bytes),
encoding=_encodings['repo.content'], errors='replace')
@@ -84,13 +91,17 @@ class ParseLinks(html_parser_HTMLParser):
"""Parser class that overrides HTMLParser to grab all anchors from an html
page and provide suffix and prefix limitors"""
def __init__(self):
+
+ warnings.warn("portage.getbinpkg.ParseLinks is deprecated",
+ DeprecationWarning, stacklevel=2)
+
self.PL_anchors = []
html_parser_HTMLParser.__init__(self)
def get_anchors(self):
return self.PL_anchors
- def get_anchors_by_prefix(self,prefix):
+ def get_anchors_by_prefix(self, prefix):
newlist = []
for x in self.PL_anchors:
if x.startswith(prefix):
@@ -98,7 +109,7 @@ class ParseLinks(html_parser_HTMLParser):
newlist.append(x[:])
return newlist
- def get_anchors_by_suffix(self,suffix):
+ def get_anchors_by_suffix(self, suffix):
newlist = []
for x in self.PL_anchors:
if x.endswith(suffix):
@@ -106,10 +117,10 @@ class ParseLinks(html_parser_HTMLParser):
newlist.append(x[:])
return newlist
- def handle_endtag(self,tag):
+ def handle_endtag(self, tag):
pass
- def handle_starttag(self,tag,attrs):
+ def handle_starttag(self, tag, attrs):
if tag == "a":
for x in attrs:
if x[0] == 'href':
@@ -117,16 +128,19 @@ class ParseLinks(html_parser_HTMLParser):
self.PL_anchors.append(urllib_parse_unquote(x[1]))
-def create_conn(baseurl,conn=None):
- """(baseurl,conn) --- Takes a protocol://site:port/address url, and an
+def create_conn(baseurl, conn=None):
+ """Takes a protocol://site:port/address url, and an
optional connection. If connection is already active, it is passed on.
baseurl is reduced to address and is returned in tuple (conn,address)"""
- parts = baseurl.split("://",1)
+ warnings.warn("portage.getbinpkg.create_conn() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
+ parts = baseurl.split("://", 1)
if len(parts) != 2:
raise ValueError(_("Provided URI does not "
"contain protocol identifier. '%s'") % baseurl)
- protocol,url_parts = parts
+ protocol, url_parts = parts
del parts
url_parts = url_parts.split("/")
@@ -137,7 +151,7 @@ def create_conn(baseurl,conn=None):
address = "/"+"/".join(url_parts[1:])
del url_parts
- userpass_host = host.split("@",1)
+ userpass_host = host.split("@", 1)
if len(userpass_host) == 1:
host = userpass_host[0]
userpass = ["anonymous"]
@@ -196,10 +210,10 @@ def create_conn(baseurl,conn=None):
host = host[:-1]
conn = ftplib.FTP(host)
if password:
- conn.login(username,password)
+ conn.login(username, password)
else:
sys.stderr.write(colorize("WARN",
- _(" * No password provided for username"))+" '%s'" % \
+ _(" * No password provided for username")) + " '%s'" % \
(username,) + "\n\n")
conn.login(username)
conn.set_pasv(passive)
@@ -216,11 +230,15 @@ def create_conn(baseurl,conn=None):
else:
raise NotImplementedError(_("%s is not a supported protocol.") % protocol)
- return (conn,protocol,address, http_params, http_headers)
+ return (conn, protocol, address, http_params, http_headers)
def make_ftp_request(conn, address, rest=None, dest=None):
- """(conn,address,rest) --- uses the conn object to request the data
+ """Uses the |conn| object to request the data
from address and issuing a rest if it is passed."""
+
+ warnings.warn("portage.getbinpkg.make_ftp_request() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
try:
if dest:
@@ -235,9 +253,9 @@ def make_ftp_request(conn, address, rest=None, dest=None):
rest = 0
if rest != None:
- mysocket = conn.transfercmd("RETR "+str(address), rest)
+ mysocket = conn.transfercmd("RETR %s" % str(address), rest)
else:
- mysocket = conn.transfercmd("RETR "+str(address))
+ mysocket = conn.transfercmd("RETR %s" % str(address))
mydata = ""
while 1:
@@ -259,28 +277,31 @@ def make_ftp_request(conn, address, rest=None, dest=None):
conn.voidresp()
conn.voidcmd("TYPE A")
- return mydata,not (fsize==data_size),""
+ return mydata, (fsize != data_size), ""
except ValueError as e:
- return None,int(str(e)[:4]),str(e)
+ return None, int(str(e)[:4]), str(e)
-def make_http_request(conn, address, params={}, headers={}, dest=None):
- """(conn,address,params,headers) --- uses the conn object to request
+def make_http_request(conn, address, _params={}, headers={}, dest=None):
+ """Uses the |conn| object to request
the data from address, performing Location forwarding and using the
optional params and headers."""
+ warnings.warn("portage.getbinpkg.make_http_request() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
rc = 0
response = None
while (rc == 0) or (rc == 301) or (rc == 302):
try:
- if (rc != 0):
- conn,ignore,ignore,ignore,ignore = create_conn(address)
+ if rc != 0:
+ conn = create_conn(address)[0]
conn.request("GET", address, body=None, headers=headers)
except SystemExit as e:
raise
except Exception as e:
- return None,None,"Server request failed: "+str(e)
+ return None, None, "Server request failed: %s" % str(e)
response = conn.getresponse()
rc = response.status
@@ -289,7 +310,7 @@ def make_http_request(conn, address, params={}, headers={}, dest=None):
ignored_data = response.read()
del ignored_data
for x in str(response.msg).split("\n"):
- parts = x.split(": ",1)
+ parts = x.split(": ", 1)
if parts[0] == "Location":
if (rc == 301):
sys.stderr.write(colorize("BAD",
@@ -302,16 +323,20 @@ def make_http_request(conn, address, params={}, headers={}, dest=None):
break
if (rc != 200) and (rc != 206):
- return None,rc,"Server did not respond successfully ("+str(response.status)+": "+str(response.reason)+")"
+ return None, rc, "Server did not respond successfully (%s: %s)" % (str(response.status), str(response.reason))
if dest:
dest.write(response.read())
- return "",0,""
+ return "", 0, ""
- return response.read(),0,""
+ return response.read(), 0, ""
def match_in_array(array, prefix="", suffix="", match_both=1, allow_overlap=0):
+
+ warnings.warn("portage.getbinpkg.match_in_array() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
myarray = []
if not (prefix and suffix):
@@ -344,20 +369,22 @@ def match_in_array(array, prefix="", suffix="", match_both=1, allow_overlap=0):
continue # Doesn't match.
return myarray
-
-def dir_get_list(baseurl,conn=None):
- """(baseurl[,connection]) -- Takes a base url to connect to and read from.
+def dir_get_list(baseurl, conn=None):
+ """Takes a base url to connect to and read from.
URI should be in the form <proto>://<site>[:port]<path>
Connection is used for persistent connection instances."""
+ warnings.warn("portage.getbinpkg.dir_get_list() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
if not conn:
keepconnection = 0
else:
keepconnection = 1
- conn,protocol,address,params,headers = create_conn(baseurl, conn)
+ conn, protocol, address, params, headers = create_conn(baseurl, conn)
listing = None
if protocol in ["http","https"]:
@@ -365,7 +392,7 @@ def dir_get_list(baseurl,conn=None):
# http servers can return a 400 error here
# if the address doesn't end with a slash.
address += "/"
- page,rc,msg = make_http_request(conn,address,params,headers)
+ page, rc, msg = make_http_request(conn, address, params, headers)
if page:
parser = ParseLinks()
@@ -395,23 +422,26 @@ def dir_get_list(baseurl,conn=None):
return listing
-def file_get_metadata(baseurl,conn=None, chunk_size=3000):
- """(baseurl[,connection]) -- Takes a base url to connect to and read from.
+def file_get_metadata(baseurl, conn=None, chunk_size=3000):
+ """Takes a base url to connect to and read from.
URI should be in the form <proto>://<site>[:port]<path>
Connection is used for persistent connection instances."""
+ warnings.warn("portage.getbinpkg.file_get_metadata() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
if not conn:
keepconnection = 0
else:
keepconnection = 1
- conn,protocol,address,params,headers = create_conn(baseurl, conn)
+ conn, protocol, address, params, headers = create_conn(baseurl, conn)
if protocol in ["http","https"]:
- headers["Range"] = "bytes=-"+str(chunk_size)
- data,rc,msg = make_http_request(conn, address, params, headers)
+ headers["Range"] = "bytes=-%s" % str(chunk_size)
+ data, _x, _x = make_http_request(conn, address, params, headers)
elif protocol in ["ftp"]:
- data,rc,msg = make_ftp_request(conn, address, -chunk_size)
+ data, _x, _x = make_ftp_request(conn, address, -chunk_size)
elif protocol == "sftp":
f = conn.open(address)
try:
@@ -424,21 +454,21 @@ def file_get_metadata(baseurl,conn=None, chunk_size=3000):
if data:
xpaksize = portage.xpak.decodeint(data[-8:-4])
- if (xpaksize+8) > chunk_size:
- myid = file_get_metadata(baseurl, conn, (xpaksize+8))
+ if (xpaksize + 8) > chunk_size:
+ myid = file_get_metadata(baseurl, conn, xpaksize + 8)
if not keepconnection:
conn.close()
return myid
else:
- xpak_data = data[len(data)-(xpaksize+8):-8]
+ xpak_data = data[len(data) - (xpaksize + 8):-8]
del data
myid = portage.xpak.xsplit_mem(xpak_data)
if not myid:
- myid = None,None
+ myid = None, None
del xpak_data
else:
- myid = None,None
+ myid = None, None
if not keepconnection:
conn.close()
@@ -446,53 +476,79 @@ def file_get_metadata(baseurl,conn=None, chunk_size=3000):
return myid
-def file_get(baseurl,dest,conn=None,fcmd=None,filename=None):
- """(baseurl,dest,fcmd=) -- Takes a base url to connect to and read from.
+def file_get(baseurl=None, dest=None, conn=None, fcmd=None, filename=None,
+ fcmd_vars=None):
+ """Takes a base url to connect to and read from.
URI should be in the form <proto>://[user[:pass]@]<site>[:port]<path>"""
if not fcmd:
- return file_get_lib(baseurl,dest,conn)
- if not filename:
- filename = os.path.basename(baseurl)
-
- variables = {
- "DISTDIR": dest,
- "URI": baseurl,
- "FILE": filename
- }
+
+ warnings.warn("Use of portage.getbinpkg.file_get() without the fcmd "
+ "parameter is deprecated", DeprecationWarning, stacklevel=2)
+
+ return file_get_lib(baseurl, dest, conn)
+
+ variables = {}
+
+ if fcmd_vars is not None:
+ variables.update(fcmd_vars)
+
+ if "DISTDIR" not in variables:
+ if dest is None:
+ raise portage.exception.MissingParameter(
+ _("%s is missing required '%s' key") %
+ ("fcmd_vars", "DISTDIR"))
+ variables["DISTDIR"] = dest
+
+ if "URI" not in variables:
+ if baseurl is None:
+ raise portage.exception.MissingParameter(
+ _("%s is missing required '%s' key") %
+ ("fcmd_vars", "URI"))
+ variables["URI"] = baseurl
+
+ if "FILE" not in variables:
+ if filename is None:
+ filename = os.path.basename(variables["URI"])
+ variables["FILE"] = filename
from portage.util import varexpand
from portage.process import spawn
myfetch = portage.util.shlex_split(fcmd)
myfetch = [varexpand(x, mydict=variables) for x in myfetch]
- fd_pipes= {
- 0:sys.stdin.fileno(),
- 1:sys.stdout.fileno(),
- 2:sys.stdout.fileno()
+ fd_pipes = {
+ 0: portage._get_stdin().fileno(),
+ 1: sys.__stdout__.fileno(),
+ 2: sys.__stdout__.fileno()
}
+ sys.__stdout__.flush()
+ sys.__stderr__.flush()
retval = spawn(myfetch, env=os.environ.copy(), fd_pipes=fd_pipes)
if retval != os.EX_OK:
sys.stderr.write(_("Fetcher exited with a failure condition.\n"))
return 0
return 1
-def file_get_lib(baseurl,dest,conn=None):
- """(baseurl[,connection]) -- Takes a base url to connect to and read from.
+def file_get_lib(baseurl, dest, conn=None):
+ """Takes a base url to connect to and read from.
URI should be in the form <proto>://<site>[:port]<path>
Connection is used for persistent connection instances."""
+ warnings.warn("portage.getbinpkg.file_get_lib() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
if not conn:
keepconnection = 0
else:
keepconnection = 1
- conn,protocol,address,params,headers = create_conn(baseurl, conn)
+ conn, protocol, address, params, headers = create_conn(baseurl, conn)
- sys.stderr.write("Fetching '"+str(os.path.basename(address)+"'\n"))
- if protocol in ["http","https"]:
- data,rc,msg = make_http_request(conn, address, params, headers, dest=dest)
+ sys.stderr.write("Fetching '" + str(os.path.basename(address)) + "'\n")
+ if protocol in ["http", "https"]:
+ data, rc, _msg = make_http_request(conn, address, params, headers, dest=dest)
elif protocol in ["ftp"]:
- data,rc,msg = make_ftp_request(conn, address, dest=dest)
+ data, rc, _msg = make_ftp_request(conn, address, dest=dest)
elif protocol == "sftp":
rc = 0
try:
@@ -522,8 +578,10 @@ def file_get_lib(baseurl,dest,conn=None):
def dir_get_metadata(baseurl, conn=None, chunk_size=3000, verbose=1, usingcache=1, makepickle=None):
- """(baseurl,conn,chunk_size,verbose) --
- """
+
+ warnings.warn("portage.getbinpkg.dir_get_metadata() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
if not conn:
keepconnection = 0
else:
@@ -536,7 +594,7 @@ def dir_get_metadata(baseurl, conn=None, chunk_size=3000, verbose=1, usingcache=
makepickle = "/var/cache/edb/metadata.idx.most_recent"
try:
- conn, protocol, address, params, headers = create_conn(baseurl, conn)
+ conn = create_conn(baseurl, conn)[0]
except _all_errors as e:
# ftplib.FTP(host) can raise errors like this:
# socket.error: (111, 'Connection refused')
@@ -557,18 +615,20 @@ def dir_get_metadata(baseurl, conn=None, chunk_size=3000, verbose=1, usingcache=
out.write(_("Loaded metadata pickle.\n"))
out.flush()
metadatafile.close()
- except (AttributeError, EOFError, EnvironmentError, ValueError, pickle.UnpicklingError):
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except Exception:
metadata = {}
if baseurl not in metadata:
- metadata[baseurl]={}
+ metadata[baseurl] = {}
if "indexname" not in metadata[baseurl]:
- metadata[baseurl]["indexname"]=""
+ metadata[baseurl]["indexname"] = ""
if "timestamp" not in metadata[baseurl]:
- metadata[baseurl]["timestamp"]=0
+ metadata[baseurl]["timestamp"] = 0
if "unmodified" not in metadata[baseurl]:
- metadata[baseurl]["unmodified"]=0
+ metadata[baseurl]["unmodified"] = 0
if "data" not in metadata[baseurl]:
- metadata[baseurl]["data"]={}
+ metadata[baseurl]["data"] = {}
if not os.access(cache_path, os.W_OK):
sys.stderr.write(_("!!! Unable to write binary metadata to disk!\n"))
@@ -594,36 +654,36 @@ def dir_get_metadata(baseurl, conn=None, chunk_size=3000, verbose=1, usingcache=
for mfile in metalist:
if usingcache and \
((metadata[baseurl]["indexname"] != mfile) or \
- (metadata[baseurl]["timestamp"] < int(time.time()-(60*60*24)))):
+ (metadata[baseurl]["timestamp"] < int(time.time() - (60 * 60 * 24)))):
# Try to download new cache until we succeed on one.
- data=""
- for trynum in [1,2,3]:
+ data = ""
+ for trynum in [1, 2, 3]:
mytempfile = tempfile.TemporaryFile()
try:
- file_get(baseurl+"/"+mfile, mytempfile, conn)
+ file_get(baseurl + "/" + mfile, mytempfile, conn)
if mytempfile.tell() > len(data):
mytempfile.seek(0)
data = mytempfile.read()
except ValueError as e:
- sys.stderr.write("--- "+str(e)+"\n")
+ sys.stderr.write("--- %s\n" % str(e))
if trynum < 3:
sys.stderr.write(_("Retrying...\n"))
sys.stderr.flush()
mytempfile.close()
continue
- if match_in_array([mfile],suffix=".gz"):
+ if match_in_array([mfile], suffix=".gz"):
out.write("gzip'd\n")
out.flush()
try:
import gzip
mytempfile.seek(0)
- gzindex = gzip.GzipFile(mfile[:-3],'rb',9,mytempfile)
+ gzindex = gzip.GzipFile(mfile[:-3], 'rb', 9, mytempfile)
data = gzindex.read()
except SystemExit as e:
raise
except Exception as e:
mytempfile.close()
- sys.stderr.write(_("!!! Failed to use gzip: ")+str(e)+"\n")
+ sys.stderr.write(_("!!! Failed to use gzip: ") + str(e) + "\n")
sys.stderr.flush()
mytempfile.close()
try:
@@ -638,8 +698,8 @@ def dir_get_metadata(baseurl, conn=None, chunk_size=3000, verbose=1, usingcache=
except SystemExit as e:
raise
except Exception as e:
- sys.stderr.write(_("!!! Failed to read data from index: ")+str(mfile)+"\n")
- sys.stderr.write("!!! "+str(e)+"\n")
+ sys.stderr.write(_("!!! Failed to read data from index: ") + str(mfile) + "\n")
+ sys.stderr.write("!!! %s" % str(e))
sys.stderr.flush()
try:
metadatafile = open(_unicode_encode(metadatafilename,
@@ -650,7 +710,7 @@ def dir_get_metadata(baseurl, conn=None, chunk_size=3000, verbose=1, usingcache=
raise
except Exception as e:
sys.stderr.write(_("!!! Failed to write binary metadata to disk!\n"))
- sys.stderr.write("!!! "+str(e)+"\n")
+ sys.stderr.write("!!! %s\n" % str(e))
sys.stderr.flush()
break
# We may have metadata... now we run through the tbz2 list and check.
@@ -670,8 +730,8 @@ def dir_get_metadata(baseurl, conn=None, chunk_size=3000, verbose=1, usingcache=
self.display()
def display(self):
self.out.write("\r"+colorize("WARN",
- _("cache miss: '")+str(self.misses)+"'") + \
- " --- "+colorize("GOOD", _("cache hit: '")+str(self.hits)+"'"))
+ _("cache miss: '") + str(self.misses) + "'") + \
+ " --- " + colorize("GOOD", _("cache hit: '") + str(self.hits) + "'"))
self.out.flush()
cache_stats = CacheStats(out)
@@ -688,7 +748,7 @@ def dir_get_metadata(baseurl, conn=None, chunk_size=3000, verbose=1, usingcache=
cache_stats.update()
metadata[baseurl]["modified"] = 1
myid = None
- for retry in range(3):
+ for _x in range(3):
try:
myid = file_get_metadata(
"/".join((baseurl.rstrip("/"), x.lstrip("/"))),
@@ -699,22 +759,20 @@ def dir_get_metadata(baseurl, conn=None, chunk_size=3000, verbose=1, usingcache=
# make_http_request(). The docstring for this error in
# httplib.py says "Presumably, the server closed the
# connection before sending a valid response".
- conn, protocol, address, params, headers = create_conn(
- baseurl)
+ conn = create_conn(baseurl)[0]
except http_client_ResponseNotReady:
# With some http servers this error is known to be thrown
# from conn.getresponse() in make_http_request() when the
# remote file does not have appropriate read permissions.
# Maybe it's possible to recover from this exception in
# cases though, so retry.
- conn, protocol, address, params, headers = create_conn(
- baseurl)
+ conn = create_conn(baseurl)[0]
if myid and myid[0]:
metadata[baseurl]["data"][x] = make_metadata_dict(myid)
elif verbose:
sys.stderr.write(colorize("BAD",
- _("!!! Failed to retrieve metadata on: "))+str(x)+"\n")
+ _("!!! Failed to retrieve metadata on: ")) + str(x) + "\n")
sys.stderr.flush()
else:
cache_stats.hits += 1
@@ -861,7 +919,6 @@ class PackageIndex(object):
for metadata in sorted(self.packages,
key=portage.util.cmp_sort_key(_cmp_cpv)):
metadata = metadata.copy()
- cpv = metadata["CPV"]
if self._inherited_keys:
for k in self._inherited_keys:
v = self.header.get(k)
diff --git a/pym/portage/glsa.py b/pym/portage/glsa.py
index 185769574..834572ac7 100644
--- a/pym/portage/glsa.py
+++ b/pym/portage/glsa.py
@@ -1,7 +1,7 @@
-# Copyright 2003-2012 Gentoo Foundation
+# Copyright 2003-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-from __future__ import absolute_import
+from __future__ import absolute_import, unicode_literals
import io
import sys
@@ -9,23 +9,27 @@ try:
from urllib.request import urlopen as urllib_request_urlopen
except ImportError:
from urllib import urlopen as urllib_request_urlopen
+import codecs
import re
+import operator
import xml.dom.minidom
+from io import StringIO
+from functools import reduce
import portage
from portage import os
from portage import _encodings
from portage import _unicode_decode
from portage import _unicode_encode
-from portage.versions import pkgsplit, vercmp, best
+from portage.versions import pkgsplit, vercmp
from portage.util import grabfile
-from portage.const import CACHE_PATH
+from portage.const import PRIVATE_PATH
from portage.localization import _
from portage.dep import _slot_separator
# Note: the space for rgt and rlt is important !!
# FIXME: use slot deps instead, requires GLSA format versioning
-opMapping = {"le": "<=", "lt": "<", "eq": "=", "gt": ">", "ge": ">=",
+opMapping = {"le": "<=", "lt": "<", "eq": "=", "gt": ">", "ge": ">=",
"rge": ">=~", "rle": "<=~", "rgt": " >~", "rlt": " <~"}
NEWLINE_ESCAPE = "!;\\n" # some random string to mark newlines that should be preserved
SPACE_ESCAPE = "!;_" # some random string to mark spaces that should be preserved
@@ -39,22 +43,22 @@ def get_applied_glsas(settings):
@rtype: list
@return: list of glsa IDs
"""
- return grabfile(os.path.join(settings["EROOT"], CACHE_PATH, "glsa"))
+ return grabfile(os.path.join(settings["EROOT"], PRIVATE_PATH, "glsa_injected"))
# TODO: use the textwrap module instead
def wrap(text, width, caption=""):
"""
Wraps the given text at column I{width}, optionally indenting
- it so that no text is under I{caption}. It's possible to encode
+ it so that no text is under I{caption}. It's possible to encode
hard linebreaks in I{text} with L{NEWLINE_ESCAPE}.
-
+
@type text: String
@param text: the text to be wrapped
@type width: Integer
@param width: the column at which the text should be wrapped
@type caption: String
- @param caption: this string is inserted at the beginning of the
+ @param caption: this string is inserted at the beginning of the
return value and the paragraph is indented up to
C{len(caption)}.
@rtype: String
@@ -65,7 +69,7 @@ def wrap(text, width, caption=""):
text = text.replace(2*NEWLINE_ESCAPE, NEWLINE_ESCAPE+" "+NEWLINE_ESCAPE)
words = text.split()
indentLevel = len(caption)+1
-
+
for w in words:
if line != "" and line[-1] == "\n":
rValue += line
@@ -94,10 +98,10 @@ def get_glsa_list(myconfig):
Returns a list of all available GLSAs in the given repository
by comparing the filelist there with the pattern described in
the config.
-
+
@type myconfig: portage.config
@param myconfig: Portage settings instance
-
+
@rtype: List of Strings
@return: a list of GLSA IDs in this repository
"""
@@ -113,10 +117,10 @@ def get_glsa_list(myconfig):
dirlist = os.listdir(repository)
prefix = "glsa-"
suffix = ".xml"
-
+
for f in dirlist:
try:
- if f[:len(prefix)] == prefix:
+ if f[:len(prefix)] == prefix and f[-1*len(suffix):] == suffix:
rValue.append(f[len(prefix):-1*len(suffix)])
except IndexError:
pass
@@ -125,22 +129,20 @@ def get_glsa_list(myconfig):
def getListElements(listnode):
"""
Get all <li> elements for a given <ol> or <ul> node.
-
+
@type listnode: xml.dom.Node
@param listnode: <ul> or <ol> list to get the elements for
@rtype: List of Strings
@return: a list that contains the value of the <li> elements
"""
- rValue = []
if not listnode.nodeName in ["ul", "ol"]:
raise GlsaFormatException("Invalid function call: listnode is not <ul> or <ol>")
- for li in listnode.childNodes:
- if li.nodeType != xml.dom.Node.ELEMENT_NODE:
- continue
- rValue.append(getText(li, format="strip"))
+ rValue = [getText(li, format="strip") \
+ for li in listnode.childNodes \
+ if li.nodeType == xml.dom.Node.ELEMENT_NODE]
return rValue
-def getText(node, format):
+def getText(node, format, textfd = None):
"""
This is the main parser function. It takes a node and traverses
recursive over the subnodes, getting the text of each (and the
@@ -148,7 +150,7 @@ def getText(node, format):
parameter the text might be formatted by adding/removing newlines,
tabs and spaces. This function is only useful for the GLSA DTD,
it's not applicable for other DTDs.
-
+
@type node: xml.dom.Node
@param node: the root node to start with the parsing
@type format: String
@@ -158,45 +160,54 @@ def getText(node, format):
replaces multiple spaces with one space.
I{xml} does some more formatting, depending on the
type of the encountered nodes.
+ @type textfd: writable file-like object
+ @param textfd: the file-like object to write the output to
@rtype: String
@return: the (formatted) content of the node and its subnodes
+ except if textfd was not none
"""
- rValue = ""
+ if not textfd:
+ textfd = StringIO()
+ returnNone = False
+ else:
+ returnNone = True
if format in ["strip", "keep"]:
if node.nodeName in ["uri", "mail"]:
- rValue += node.childNodes[0].data+": "+node.getAttribute("link")
+ textfd.write(node.childNodes[0].data+": "+node.getAttribute("link"))
else:
for subnode in node.childNodes:
if subnode.nodeName == "#text":
- rValue += subnode.data
+ textfd.write(subnode.data)
else:
- rValue += getText(subnode, format)
- else:
+ getText(subnode, format, textfd)
+ else: # format = "xml"
for subnode in node.childNodes:
if subnode.nodeName == "p":
for p_subnode in subnode.childNodes:
if p_subnode.nodeName == "#text":
- rValue += p_subnode.data.strip()
+ textfd.write(p_subnode.data.strip())
elif p_subnode.nodeName in ["uri", "mail"]:
- rValue += p_subnode.childNodes[0].data
- rValue += " ( "+p_subnode.getAttribute("link")+" )"
- rValue += NEWLINE_ESCAPE
+ textfd.write(p_subnode.childNodes[0].data)
+ textfd.write(" ( "+p_subnode.getAttribute("link")+" )")
+ textfd.write(NEWLINE_ESCAPE)
elif subnode.nodeName == "ul":
for li in getListElements(subnode):
- rValue += "-"+SPACE_ESCAPE+li+NEWLINE_ESCAPE+" "
+ textfd.write("-"+SPACE_ESCAPE+li+NEWLINE_ESCAPE+" ")
elif subnode.nodeName == "ol":
i = 0
for li in getListElements(subnode):
i = i+1
- rValue += str(i)+"."+SPACE_ESCAPE+li+NEWLINE_ESCAPE+" "
+ textfd.write(str(i)+"."+SPACE_ESCAPE+li+NEWLINE_ESCAPE+" ")
elif subnode.nodeName == "code":
- rValue += getText(subnode, format="keep").replace("\n", NEWLINE_ESCAPE)
- if rValue[-1*len(NEWLINE_ESCAPE):] != NEWLINE_ESCAPE:
- rValue += NEWLINE_ESCAPE
+ textfd.write(getText(subnode, format="keep").lstrip().replace("\n", NEWLINE_ESCAPE))
+ textfd.write(NEWLINE_ESCAPE)
elif subnode.nodeName == "#text":
- rValue += subnode.data
+ textfd.write(subnode.data)
else:
raise GlsaFormatException(_("Invalid Tag found: "), subnode.nodeName)
+ if returnNone:
+ return None
+ rValue = textfd.getvalue()
if format == "strip":
rValue = rValue.strip(" \n\t")
rValue = re.sub("[\s]{2,}", " ", rValue)
@@ -206,7 +217,7 @@ def getMultiTagsText(rootnode, tagname, format):
"""
Returns a list with the text of all subnodes of type I{tagname}
under I{rootnode} (which itself is not parsed) using the given I{format}.
-
+
@type rootnode: xml.dom.Node
@param rootnode: the node to search for I{tagname}
@type tagname: String
@@ -216,16 +227,15 @@ def getMultiTagsText(rootnode, tagname, format):
@rtype: List of Strings
@return: a list containing the text of all I{tagname} childnodes
"""
- rValue = []
- for e in rootnode.getElementsByTagName(tagname):
- rValue.append(getText(e, format))
+ rValue = [getText(e, format) \
+ for e in rootnode.getElementsByTagName(tagname)]
return rValue
def makeAtom(pkgname, versionNode):
"""
- creates from the given package name and information in the
+ creates from the given package name and information in the
I{versionNode} a (syntactical) valid portage atom.
-
+
@type pkgname: String
@param pkgname: the name of the package for this atom
@type versionNode: xml.dom.Node
@@ -248,9 +258,9 @@ def makeAtom(pkgname, versionNode):
def makeVersion(versionNode):
"""
- creates from the information in the I{versionNode} a
+ creates from the information in the I{versionNode} a
version string (format <op><version>).
-
+
@type versionNode: xml.dom.Node
@param versionNode: a <vulnerable> or <unaffected> Node that
contains the version information for this atom
@@ -270,17 +280,17 @@ def makeVersion(versionNode):
def match(atom, dbapi, match_type="default"):
"""
- wrapper that calls revisionMatch() or portage.dbapi.dbapi.match() depending on
+ wrapper that calls revisionMatch() or portage.dbapi.dbapi.match() depending on
the given atom.
-
+
@type atom: string
@param atom: a <~ or >~ atom or a normal portage atom that contains the atom to match against
@type dbapi: portage.dbapi.dbapi
@param dbapi: one of the portage databases to use as information source
@type match_type: string
- @param match_type: if != "default" passed as first argument to dbapi.xmatch
+ @param match_type: if != "default" passed as first argument to dbapi.xmatch
to apply the wanted visibility filters
-
+
@rtype: list of strings
@return: a list with the matching versions
"""
@@ -296,15 +306,15 @@ def revisionMatch(revisionAtom, dbapi, match_type="default"):
handler for the special >~, >=~, <=~ and <~ atoms that are supposed to behave
as > and < except that they are limited to the same version, the range only
applies to the revision part.
-
+
@type revisionAtom: string
@param revisionAtom: a <~ or >~ atom that contains the atom to match against
@type dbapi: portage.dbapi.dbapi
@param dbapi: one of the portage databases to use as information source
@type match_type: string
- @param match_type: if != "default" passed as first argument to portdb.xmatch
+ @param match_type: if != "default" passed as first argument to portdb.xmatch
to apply the wanted visibility filters
-
+
@rtype: list of strings
@return: a list with the matching versions
"""
@@ -325,18 +335,19 @@ def revisionMatch(revisionAtom, dbapi, match_type="default"):
if eval(r1+" "+revisionAtom[0:2]+" "+r2):
rValue.append(v)
return rValue
-
+
def getMinUpgrade(vulnerableList, unaffectedList, portdbapi, vardbapi, minimize=True):
"""
Checks if the systemstate is matching an atom in
I{vulnerableList} and returns string describing
- the lowest version for the package that matches an atom in
+ the lowest version for the package that matches an atom in
I{unaffectedList} and is greater than the currently installed
- version or None if the system is not affected. Both
- I{vulnerableList} and I{unaffectedList} should have the
+ version. It will return an empty list if the system is affected,
+ and no upgrade is possible or None if the system is not affected.
+ Both I{vulnerableList} and I{unaffectedList} should have the
same base package.
-
+
@type vulnerableList: List of Strings
@param vulnerableList: atoms matching vulnerable package versions
@type unaffectedList: List of Strings
@@ -347,46 +358,51 @@ def getMinUpgrade(vulnerableList, unaffectedList, portdbapi, vardbapi, minimize=
@param vardbapi: Installed package repository
@type minimize: Boolean
@param minimize: True for a least-change upgrade, False for emerge-like algorithm
-
+
@rtype: String | None
@return: the lowest unaffected version that is greater than
the installed version.
- """
- rValue = None
- v_installed = []
- u_installed = []
- for v in vulnerableList:
- v_installed += match(v, vardbapi)
+ """
+ rValue = ""
+ v_installed = reduce(operator.add, [match(v, vardbapi) for v in vulnerableList], [])
+ u_installed = reduce(operator.add, [match(u, vardbapi) for u in unaffectedList], [])
- for u in unaffectedList:
- u_installed += match(u, vardbapi)
-
- install_unaffected = True
- for i in v_installed:
- if i not in u_installed:
- install_unaffected = False
+ # remove all unaffected atoms from vulnerable list
+ v_installed = list(set(v_installed).difference(set(u_installed)))
- if install_unaffected:
- return rValue
-
+ if not v_installed:
+ return None
+
+ # this tuple holds all vulnerable atoms, and the related upgrade atom
+ vuln_update = []
+ avail_updates = set()
for u in unaffectedList:
- mylist = match(u, portdbapi, match_type="match-all")
- for c in mylist:
- i = best(v_installed)
- if vercmp(c.version, i.version) > 0 \
- and (rValue == None \
- or not match("="+rValue, portdbapi) \
- or (minimize ^ (vercmp(c.version, rValue.version) > 0)) \
- and match("="+c, portdbapi)) \
- and portdbapi.aux_get(c, ["SLOT"]) == vardbapi.aux_get(best(v_installed), ["SLOT"]):
- rValue = c
- return rValue
+ # TODO: This had match_type="match-all" before. I don't think it should
+ # since we disregarded masked items later anyway (match(=rValue, "porttree"))
+ avail_updates.update(match(u, portdbapi))
+ # if an atom is already installed, we should not consider it for upgrades
+ avail_updates.difference_update(u_installed)
+
+ for vuln in v_installed:
+ update = ""
+ for c in avail_updates:
+ c_pv = portage.catpkgsplit(c)
+ if vercmp(c.version, vuln.version) > 0 \
+ and (update == "" \
+ or (minimize ^ (vercmp(c.version, update.version) > 0))) \
+ and portdbapi._pkg_str(c, None).slot == vardbapi._pkg_str(vuln, None).slot:
+ update = c_pv[0]+"/"+c_pv[1]+"-"+c_pv[2]
+ if c_pv[3] != "r0": # we don't like -r0 for display
+ update += "-"+c_pv[3]
+ vuln_update.append([vuln, update])
+
+ return vuln_update
def format_date(datestr):
"""
Takes a date (announced, revised) date from a GLSA and formats
it as readable text (i.e. "January 1, 2008").
-
+
@type date: String
@param date: the date string to reformat
@rtype: String
@@ -396,16 +412,16 @@ def format_date(datestr):
splitdate = datestr.split("-", 2)
if len(splitdate) != 3:
return datestr
-
+
# This cannot raise an error as we use () instead of []
splitdate = (int(x) for x in splitdate)
-
+
from datetime import date
try:
d = date(*splitdate)
except ValueError:
return datestr
-
+
# TODO We could format to local date format '%x' here?
return _unicode_decode(d.strftime("%B %d, %Y"),
encoding=_encodings['content'], errors='replace')
@@ -417,7 +433,7 @@ class GlsaTypeException(Exception):
class GlsaFormatException(Exception):
pass
-
+
class GlsaArgumentException(Exception):
pass
@@ -429,9 +445,9 @@ class Glsa:
"""
def __init__(self, myid, myconfig, vardbapi, portdbapi):
"""
- Simple constructor to set the ID, store the config and gets the
+ Simple constructor to set the ID, store the config and gets the
XML data by calling C{self.read()}.
-
+
@type myid: String
@param myid: String describing the id for the GLSA object (standard
GLSAs have an ID of the form YYYYMM-nn) or an existing
@@ -461,7 +477,7 @@ class Glsa:
"""
Here we build the filename from the config and the ID and pass
it to urllib to fetch it from the filesystem or a remote server.
-
+
@rtype: None
@return: None
"""
@@ -473,15 +489,21 @@ class Glsa:
myurl = "file://"+self.nr
else:
myurl = repository + "glsa-%s.xml" % str(self.nr)
- self.parse(urllib_request_urlopen(myurl))
+
+ f = urllib_request_urlopen(myurl)
+ try:
+ self.parse(f)
+ finally:
+ f.close()
+
return None
def parse(self, myfile):
"""
- This method parses the XML file and sets up the internal data
+ This method parses the XML file and sets up the internal data
structures by calling the different helper functions in this
module.
-
+
@type myfile: String
@param myfile: Filename to grab the XML data from
@rtype: None
@@ -504,27 +526,27 @@ class Glsa:
self.title = getText(myroot.getElementsByTagName("title")[0], format="strip")
self.synopsis = getText(myroot.getElementsByTagName("synopsis")[0], format="strip")
self.announced = format_date(getText(myroot.getElementsByTagName("announced")[0], format="strip"))
-
- count = 1
+
# Support both formats of revised:
# <revised>December 30, 2007: 02</revised>
# <revised count="2">2007-12-30</revised>
revisedEl = myroot.getElementsByTagName("revised")[0]
self.revised = getText(revisedEl, format="strip")
- if ((sys.hexversion >= 0x3000000 and "count" in revisedEl.attributes) or
- (sys.hexversion < 0x3000000 and revisedEl.attributes.has_key("count"))):
- count = revisedEl.getAttribute("count")
- elif (self.revised.find(":") >= 0):
- (self.revised, count) = self.revised.split(":")
-
+ count = revisedEl.attributes.get("count")
+ if count is None:
+ if self.revised.find(":") >= 0:
+ (self.revised, count) = self.revised.split(":")
+ else:
+ count = 1
+
self.revised = format_date(self.revised)
-
+
try:
self.count = int(count)
except ValueError:
# TODO should this raise a GlsaFormatException?
self.count = 1
-
+
# now the optional and 0-n toplevel, #PCDATA tags and references
try:
self.access = getText(myroot.getElementsByTagName("access")[0], format="strip")
@@ -532,7 +554,7 @@ class Glsa:
self.access = ""
self.bugs = getMultiTagsText(myroot, "bug", format="strip")
self.references = getMultiTagsText(myroot.getElementsByTagName("references")[0], "uri", format="keep")
-
+
# and now the formatted text elements
self.description = getText(myroot.getElementsByTagName("description")[0], format="xml")
self.workaround = getText(myroot.getElementsByTagName("workaround")[0], format="xml")
@@ -542,7 +564,7 @@ class Glsa:
try:
self.background = getText(myroot.getElementsByTagName("background")[0], format="xml")
except IndexError:
- self.background = ""
+ self.background = ""
# finally the interesting tags (product, affected, package)
self.glsatype = myroot.getElementsByTagName("product")[0].getAttribute("type")
@@ -572,16 +594,18 @@ class Glsa:
self.services = self.affected.getElementsByTagName("service")
return None
- def dump(self, outstream=sys.stdout):
+ def dump(self, outstream=sys.stdout, encoding="utf-8"):
"""
- Dumps a plaintext representation of this GLSA to I{outfile} or
+ Dumps a plaintext representation of this GLSA to I{outfile} or
B{stdout} if it is ommitted. You can specify an alternate
- I{encoding} if needed (default is latin1).
-
+ I{encoding} if needed (default is utf-8).
+
@type outstream: File
@param outfile: Stream that should be used for writing
(defaults to sys.stdout)
"""
+ outstream = getattr(outstream, "buffer", outstream)
+ outstream = codecs.getwriter(encoding)(outstream)
width = 76
outstream.write(("GLSA %s: \n%s" % (self.nr, self.title)).center(width)+"\n")
outstream.write((width*"=")+"\n")
@@ -606,30 +630,24 @@ class Glsa:
pass
if len(self.bugs) > 0:
outstream.write(_("\nRelated bugs: "))
- for i in range(0, len(self.bugs)):
- outstream.write(self.bugs[i])
- if i < len(self.bugs)-1:
- outstream.write(", ")
- else:
- outstream.write("\n")
+ outstream.write(", ".join(self.bugs))
+ outstream.write("\n")
if self.background:
outstream.write("\n"+wrap(self.background, width, caption=_("Background: ")))
outstream.write("\n"+wrap(self.description, width, caption=_("Description: ")))
outstream.write("\n"+wrap(self.impact_text, width, caption=_("Impact: ")))
outstream.write("\n"+wrap(self.workaround, width, caption=_("Workaround: ")))
outstream.write("\n"+wrap(self.resolution, width, caption=_("Resolution: ")))
- myreferences = ""
- for r in self.references:
- myreferences += (r.replace(" ", SPACE_ESCAPE)+NEWLINE_ESCAPE+" ")
+ myreferences = " ".join(r.replace(" ", SPACE_ESCAPE)+NEWLINE_ESCAPE for r in self.references)
outstream.write("\n"+wrap(myreferences, width, caption=_("References: ")))
outstream.write("\n")
-
+
def isVulnerable(self):
"""
Tests if the system is affected by this GLSA by checking if any
vulnerable package versions are installed. Also checks for affected
architectures.
-
+
@rtype: Boolean
@return: True if the system is affected, False if not
"""
@@ -641,56 +659,67 @@ class Glsa:
for v in path["vul_atoms"]:
rValue = rValue \
or (len(match(v, self.vardbapi)) > 0 \
- and getMinUpgrade(path["vul_atoms"], path["unaff_atoms"], \
+ and None != getMinUpgrade(path["vul_atoms"], path["unaff_atoms"], \
self.portdbapi, self.vardbapi))
return rValue
-
- def isApplied(self):
+
+ def isInjected(self):
"""
- Looks if the GLSA IDis in the GLSA checkfile to check if this
- GLSA was already applied.
-
+ Looks if the GLSA ID is in the GLSA checkfile to check if this
+ GLSA should be marked as applied.
+
@rtype: Boolean
- @return: True if the GLSA was applied, False if not
+ @returns: True if the GLSA is in the inject file, False if not
"""
+ if not os.access(os.path.join(self.config["EROOT"],
+ PRIVATE_PATH, "glsa_injected"), os.R_OK):
+ return False
return (self.nr in get_applied_glsas(self.config))
def inject(self):
"""
Puts the ID of this GLSA into the GLSA checkfile, so it won't
- show up on future checks. Should be called after a GLSA is
+ show up on future checks. Should be called after a GLSA is
applied or on explicit user request.
@rtype: None
@return: None
"""
- if not self.isApplied():
+ if not self.isInjected():
checkfile = io.open(
_unicode_encode(os.path.join(self.config["EROOT"],
- CACHE_PATH, "glsa"),
- encoding=_encodings['fs'], errors='strict'),
+ PRIVATE_PATH, "glsa_injected"),
+ encoding=_encodings['fs'], errors='strict'),
mode='a+', encoding=_encodings['content'], errors='strict')
checkfile.write(_unicode_decode(self.nr + "\n"))
checkfile.close()
return None
-
+
def getMergeList(self, least_change=True):
"""
Returns the list of package-versions that have to be merged to
- apply this GLSA properly. The versions are as low as possible
+ apply this GLSA properly. The versions are as low as possible
while avoiding downgrades (see L{getMinUpgrade}).
-
+
@type least_change: Boolean
@param least_change: True if the smallest possible upgrade should be selected,
False for an emerge-like algorithm
@rtype: List of Strings
@return: list of package-versions that have to be merged
"""
- rValue = []
- for pkg in self.packages:
+ return list(set(update for (vuln, update) in self.getAffectionTable(least_change) if update))
+
+ def getAffectionTable(self, least_change=True):
+ """
+ Will initialize the self.systemAffection list of
+ atoms installed on the system that are affected
+ by this GLSA, and the atoms that are minimal upgrades.
+ """
+ systemAffection = []
+ for pkg in self.packages.keys():
for path in self.packages[pkg]:
- update = getMinUpgrade(path["vul_atoms"], path["unaff_atoms"], \
+ update = getMinUpgrade(path["vul_atoms"], path["unaff_atoms"],
self.portdbapi, self.vardbapi, minimize=least_change)
if update:
- rValue.append(update)
- return rValue
+ systemAffection.extend(update)
+ return systemAffection
diff --git a/pym/portage/localization.py b/pym/portage/localization.py
index d16c4b131..b54835a42 100644
--- a/pym/portage/localization.py
+++ b/pym/portage/localization.py
@@ -1,12 +1,18 @@
# localization.py -- Code to manage/help portage localization.
-# Copyright 2004 Gentoo Foundation
+# Copyright 2004-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from portage import _unicode_decode
# We define this to make the transition easier for us.
def _(mystr):
- return mystr
-
+ """
+ Always returns unicode, regardless of the input type. This is
+ helpful for avoiding UnicodeDecodeError from __str__() with
+ Python 2, by ensuring that string format operations invoke
+ __unicode__() instead of __str__().
+ """
+ return _unicode_decode(mystr)
def localization_example():
# Dict references allow translators to rearrange word order.
@@ -15,6 +21,7 @@ def localization_example():
a_value = "value.of.a"
b_value = 123
- c_value = [1,2,3,4]
- print(_("A: %(a)s -- B: %(b)s -- C: %(c)s") % {"a":a_value,"b":b_value,"c":c_value})
+ c_value = [1, 2, 3, 4]
+ print(_("A: %(a)s -- B: %(b)s -- C: %(c)s") %
+ {"a": a_value, "b": b_value, "c": c_value})
diff --git a/pym/portage/locks.py b/pym/portage/locks.py
index 59fbc6ec0..0789f8941 100644
--- a/pym/portage/locks.py
+++ b/pym/portage/locks.py
@@ -1,5 +1,5 @@
# portage: Lock management code
-# Copyright 2004-2012 Gentoo Foundation
+# Copyright 2004-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
__all__ = ["lockdir", "unlockdir", "lockfile", "unlockfile", \
@@ -17,11 +17,11 @@ import portage
from portage import os, _encodings, _unicode_decode
from portage.exception import DirectoryNotFound, FileNotFound, \
InvalidData, TryAgain, OperationNotPermitted, PermissionDenied
-from portage.data import portage_gid
from portage.util import writemsg
from portage.localization import _
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
HARDLINK_FD = -2
@@ -64,6 +64,9 @@ def lockfile(mypath, wantnewlockfile=0, unlinkfile=0,
if not mypath:
raise InvalidData(_("Empty path given"))
+ # Since Python 3.4, chown requires int type (no proxies).
+ portage_gid = int(portage.data.portage_gid)
+
# Support for file object or integer file descriptor parameters is
# deprecated due to ambiguity in whether or not it's safe to close
# the file descriptor, making it prone to "Bad file descriptor" errors
@@ -148,7 +151,7 @@ def lockfile(mypath, wantnewlockfile=0, unlinkfile=0,
except IOError as e:
if not hasattr(e, "errno"):
raise
- if e.errno in (errno.EACCES, errno.EAGAIN):
+ if e.errno in (errno.EACCES, errno.EAGAIN, errno.ENOLCK):
# resource temp unavailable; eg, someone beat us to the lock.
if flags & os.O_NONBLOCK:
os.close(myfd)
@@ -163,19 +166,43 @@ def lockfile(mypath, wantnewlockfile=0, unlinkfile=0,
if isinstance(mypath, int):
waiting_msg = _("waiting for lock on fd %i") % myfd
else:
- waiting_msg = _("waiting for lock on %s\n") % lockfilename
+ waiting_msg = _("waiting for lock on %s") % lockfilename
if out is not None:
out.ebegin(waiting_msg)
# try for the exclusive lock now.
- try:
- locking_method(myfd, fcntl.LOCK_EX)
- except EnvironmentError as e:
- if out is not None:
- out.eend(1, str(e))
- raise
+ enolock_msg_shown = False
+ while True:
+ try:
+ locking_method(myfd, fcntl.LOCK_EX)
+ except EnvironmentError as e:
+ if e.errno == errno.ENOLCK:
+ # This is known to occur on Solaris NFS (see
+ # bug #462694). Assume that the error is due
+ # to temporary exhaustion of record locks,
+ # and loop until one becomes available.
+ if not enolock_msg_shown:
+ enolock_msg_shown = True
+ if isinstance(mypath, int):
+ context_desc = _("Error while waiting "
+ "to lock fd %i") % myfd
+ else:
+ context_desc = _("Error while waiting "
+ "to lock '%s'") % lockfilename
+ writemsg("\n!!! %s: %s\n" % (context_desc, e),
+ noiselevel=-1)
+
+ time.sleep(_HARDLINK_POLL_LATENCY)
+ continue
+
+ if out is not None:
+ out.eend(1, str(e))
+ raise
+ else:
+ break
+
if out is not None:
out.eend(os.EX_OK)
- elif e.errno in (errno.ENOSYS, errno.ENOLCK):
+ elif e.errno in (errno.ENOSYS,):
# We're not allowed to lock on this FS.
if not isinstance(lockfilename, int):
# If a file object was passed in, it's not safe
@@ -207,10 +234,21 @@ def lockfile(mypath, wantnewlockfile=0, unlinkfile=0,
waiting_msg=waiting_msg, flags=flags)
if myfd != HARDLINK_FD:
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(myfd, fcntl.F_SETFD,
+ fcntl.fcntl(myfd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
_open_fds.add(myfd)
- writemsg(str((lockfilename,myfd,unlinkfile))+"\n",1)
- return (lockfilename,myfd,unlinkfile,locking_method)
+ writemsg(str((lockfilename, myfd, unlinkfile)) + "\n", 1)
+ return (lockfilename, myfd, unlinkfile, locking_method)
def _fstat_nlink(fd):
"""
@@ -232,10 +270,10 @@ def unlockfile(mytuple):
#XXX: Compatability hack.
if len(mytuple) == 3:
- lockfilename,myfd,unlinkfile = mytuple
+ lockfilename, myfd, unlinkfile = mytuple
locking_method = fcntl.flock
elif len(mytuple) == 4:
- lockfilename,myfd,unlinkfile,locking_method = mytuple
+ lockfilename, myfd, unlinkfile, locking_method = mytuple
else:
raise InvalidData
@@ -246,7 +284,7 @@ def unlockfile(mytuple):
# myfd may be None here due to myfd = mypath in lockfile()
if isinstance(lockfilename, basestring) and \
not os.path.exists(lockfilename):
- writemsg(_("lockfile does not exist '%s'\n") % lockfilename,1)
+ writemsg(_("lockfile does not exist '%s'\n") % lockfilename, 1)
if myfd is not None:
os.close(myfd)
_open_fds.remove(myfd)
@@ -254,9 +292,9 @@ def unlockfile(mytuple):
try:
if myfd is None:
- myfd = os.open(lockfilename, os.O_WRONLY,0o660)
+ myfd = os.open(lockfilename, os.O_WRONLY, 0o660)
unlinkfile = 1
- locking_method(myfd,fcntl.LOCK_UN)
+ locking_method(myfd, fcntl.LOCK_UN)
except OSError:
if isinstance(lockfilename, basestring):
os.close(myfd)
@@ -271,14 +309,14 @@ def unlockfile(mytuple):
# commenting until it is proved necessary.
#time.sleep(0.0001)
if unlinkfile:
- locking_method(myfd,fcntl.LOCK_EX|fcntl.LOCK_NB)
+ locking_method(myfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
# We won the lock, so there isn't competition for it.
# We can safely delete the file.
writemsg(_("Got the lockfile...\n"), 1)
if _fstat_nlink(myfd) == 1:
os.unlink(lockfilename)
writemsg(_("Unlinked lockfile...\n"), 1)
- locking_method(myfd,fcntl.LOCK_UN)
+ locking_method(myfd, fcntl.LOCK_UN)
else:
writemsg(_("lockfile does not exist '%s'\n") % lockfilename, 1)
os.close(myfd)
@@ -288,7 +326,7 @@ def unlockfile(mytuple):
raise
except Exception as e:
writemsg(_("Failed to get lock... someone took it.\n"), 1)
- writemsg(str(e)+"\n",1)
+ writemsg(str(e) + "\n", 1)
# why test lockfilename? because we may have been handed an
# fd originally, and the caller might not like having their
@@ -300,14 +338,12 @@ def unlockfile(mytuple):
return True
-
-
def hardlock_name(path):
base, tail = os.path.split(path)
return os.path.join(base, ".%s.hardlock-%s-%s" %
(tail, os.uname()[1], os.getpid()))
-def hardlink_is_mine(link,lock):
+def hardlink_is_mine(link, lock):
try:
lock_st = os.stat(lock)
if lock_st.st_nlink == 2:
@@ -339,6 +375,9 @@ def hardlink_lockfile(lockfilename, max_wait=DeprecationWarning,
preexisting = os.path.exists(lockfilename)
myhardlock = hardlock_name(lockfilename)
+ # Since Python 3.4, chown requires int type (no proxies).
+ portage_gid = int(portage.data.portage_gid)
+
# myhardlock must not exist prior to our link() call, and we can
# safely unlink it since its file name is unique to our PID
try:
@@ -456,7 +495,6 @@ def unhardlink_lockfile(lockfilename, unlinkfile=True):
pass
def hardlock_cleanup(path, remove_all_locks=False):
- mypid = str(os.getpid())
myhost = os.uname()[1]
mydl = os.listdir(path)
@@ -465,7 +503,7 @@ def hardlock_cleanup(path, remove_all_locks=False):
mylist = {}
for x in mydl:
- if os.path.isfile(path+"/"+x):
+ if os.path.isfile(path + "/" + x):
parts = x.split(".hardlock-")
if len(parts) == 2:
filename = parts[0][1:]
@@ -482,17 +520,17 @@ def hardlock_cleanup(path, remove_all_locks=False):
mycount += 1
- results.append(_("Found %(count)s locks") % {"count":mycount})
+ results.append(_("Found %(count)s locks") % {"count": mycount})
for x in mylist:
if myhost in mylist[x] or remove_all_locks:
- mylockname = hardlock_name(path+"/"+x)
- if hardlink_is_mine(mylockname, path+"/"+x) or \
- not os.path.exists(path+"/"+x) or \
+ mylockname = hardlock_name(path + "/" + x)
+ if hardlink_is_mine(mylockname, path + "/" + x) or \
+ not os.path.exists(path + "/" + x) or \
remove_all_locks:
for y in mylist[x]:
for z in mylist[x][y]:
- filename = path+"/."+x+".hardlock-"+y+"-"+z
+ filename = path + "/." + x + ".hardlock-" + y + "-" + z
if filename == mylockname:
continue
try:
@@ -502,8 +540,8 @@ def hardlock_cleanup(path, remove_all_locks=False):
except OSError:
pass
try:
- os.unlink(path+"/"+x)
- results.append(_("Unlinked: ") + path+"/"+x)
+ os.unlink(path + "/" + x)
+ results.append(_("Unlinked: ") + path + "/" + x)
os.unlink(mylockname)
results.append(_("Unlinked: ") + mylockname)
except OSError:
diff --git a/pym/portage/mail.py b/pym/portage/mail.py
index 3fcadd27b..723da04b8 100644
--- a/pym/portage/mail.py
+++ b/pym/portage/mail.py
@@ -1,4 +1,4 @@
-# Copyright 1998-2011 Gentoo Foundation
+# Copyright 1998-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# Since python ebuilds remove the 'email' module when USE=build
@@ -21,6 +21,7 @@ from portage.localization import _
import portage
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
def _force_ascii_if_necessary(s):
@@ -117,13 +118,13 @@ def send_mail(mysettings, message):
if "@" in mymailuri:
myauthdata, myconndata = mymailuri.rsplit("@", 1)
try:
- mymailuser,mymailpasswd = myauthdata.split(":")
+ mymailuser, mymailpasswd = myauthdata.split(":")
except ValueError:
print(_("!!! invalid SMTP AUTH configuration, trying unauthenticated ..."))
else:
myconndata = mymailuri
if ":" in myconndata:
- mymailhost,mymailport = myconndata.split(":")
+ mymailhost, mymailport = myconndata.split(":")
else:
mymailhost = myconndata
else:
diff --git a/pym/portage/manifest.py b/pym/portage/manifest.py
index a04b71780..3936b9a1d 100644
--- a/pym/portage/manifest.py
+++ b/pym/portage/manifest.py
@@ -1,15 +1,19 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
import errno
import io
import re
+import sys
import warnings
import portage
portage.proxy.lazyimport.lazyimport(globals(),
'portage.checksum:hashfunc_map,perform_multiple_checksums,' + \
- 'verify_all,_filter_unaccelarated_hashes',
+ 'verify_all,_apply_hash_filter,_filter_unaccelarated_hashes',
+ 'portage.repository.config:_find_invalid_path_char',
'portage.util:write_atomic',
)
@@ -24,8 +28,16 @@ from portage.const import (MANIFEST1_HASH_FUNCTIONS, MANIFEST2_HASH_DEFAULTS,
MANIFEST2_HASH_FUNCTIONS, MANIFEST2_IDENTIFIERS, MANIFEST2_REQUIRED_HASH)
from portage.localization import _
-# Characters prohibited by repoman's file.name check.
-_prohibited_filename_chars_re = re.compile(r'[^a-zA-Z0-9._\-+:]')
+_manifest_re = re.compile(
+ r'^(' + '|'.join(MANIFEST2_IDENTIFIERS) + r') (.*)( \d+( \S+ \S+)+)$',
+ re.UNICODE)
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ _unicode = str
+ basestring = str
+else:
+ _unicode = unicode
class FileNotInManifestException(PortageException):
pass
@@ -38,15 +50,10 @@ def manifest2AuxfileFilter(filename):
for x in mysplit:
if x[:1] == '.':
return False
- if _prohibited_filename_chars_re.search(x) is not None:
- return False
return not filename[:7] == 'digest-'
def manifest2MiscfileFilter(filename):
- filename = filename.strip(os.sep)
- if _prohibited_filename_chars_re.search(filename) is not None:
- return False
- return not (filename in ["CVS", ".svn", "files", "Manifest"] or filename.endswith(".ebuild"))
+ return not (filename == "Manifest" or filename.endswith(".ebuild"))
def guessManifestFileType(filename):
""" Perform a best effort guess of which type the given filename is, avoid using this if possible """
@@ -67,18 +74,17 @@ def guessThinManifestFileType(filename):
return None
return "DIST"
-def parseManifest2(mysplit):
+def parseManifest2(line):
+ if not isinstance(line, basestring):
+ line = ' '.join(line)
myentry = None
- if len(mysplit) > 4 and mysplit[0] in MANIFEST2_IDENTIFIERS:
- mytype = mysplit[0]
- myname = mysplit[1]
- try:
- mysize = int(mysplit[2])
- except ValueError:
- return None
- myhashes = dict(zip(mysplit[3::2], mysplit[4::2]))
- myhashes["size"] = mysize
- myentry = Manifest2Entry(type=mytype, name=myname, hashes=myhashes)
+ match = _manifest_re.match(line)
+ if match is not None:
+ tokens = match.group(3).split()
+ hashes = dict(zip(tokens[1::2], tokens[2::2]))
+ hashes["size"] = int(tokens[0])
+ myentry = Manifest2Entry(type=match.group(1),
+ name=match.group(2), hashes=hashes)
return myentry
class ManifestEntry(object):
@@ -108,11 +114,20 @@ class Manifest2Entry(ManifestEntry):
def __ne__(self, other):
return not self.__eq__(other)
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(),
+ encoding=_encodings['repo.content'], errors='strict')
+
class Manifest(object):
parsers = (parseManifest2,)
- def __init__(self, pkgdir, distdir, fetchlist_dict=None,
+ def __init__(self, pkgdir, distdir=None, fetchlist_dict=None,
manifest1_compat=DeprecationWarning, from_scratch=False, thin=False,
- allow_missing=False, allow_create=True, hashes=None):
+ allow_missing=False, allow_create=True, hashes=None,
+ find_invalid_path_char=None):
""" Create new Manifest instance for package in pkgdir.
Do not parse Manifest file if from_scratch == True (only for internal use)
The fetchlist_dict parameter is required only for generation of
@@ -125,6 +140,9 @@ class Manifest(object):
"portage.manifest.Manifest constructor is deprecated.",
DeprecationWarning, stacklevel=2)
+ if find_invalid_path_char is None:
+ find_invalid_path_char = _find_invalid_path_char
+ self._find_invalid_path_char = find_invalid_path_char
self.pkgdir = _unicode_decode(pkgdir).rstrip(os.sep) + os.sep
self.fhashdict = {}
self.hashes = set()
@@ -173,13 +191,12 @@ class Manifest(object):
"""Parse a manifest. If myhashdict is given then data will be added too it.
Otherwise, a new dict will be created and returned."""
try:
- fd = io.open(_unicode_encode(file_path,
+ with io.open(_unicode_encode(file_path,
encoding=_encodings['fs'], errors='strict'), mode='r',
- encoding=_encodings['repo.content'], errors='replace')
- if myhashdict is None:
- myhashdict = {}
- self._parseDigests(fd, myhashdict=myhashdict, **kwargs)
- fd.close()
+ encoding=_encodings['repo.content'], errors='replace') as f:
+ if myhashdict is None:
+ myhashdict = {}
+ self._parseDigests(f, myhashdict=myhashdict, **kwargs)
return myhashdict
except (OSError, IOError) as e:
if e.errno == errno.ENOENT:
@@ -198,9 +215,8 @@ class Manifest(object):
"""Parse manifest lines and return a list of manifest entries."""
for myline in mylines:
myentry = None
- mysplit = myline.split()
for parser in self.parsers:
- myentry = parser(mysplit)
+ myentry = parser(myline)
if myentry is not None:
yield myentry
break # go to the next line
@@ -255,9 +271,12 @@ class Manifest(object):
(MANIFEST2_REQUIRED_HASH, t, f))
def write(self, sign=False, force=False):
- """ Write Manifest instance to disk, optionally signing it """
+ """ Write Manifest instance to disk, optionally signing it. Returns
+ True if the Manifest is actually written, and False if the write
+ is skipped due to existing Manifest being identical."""
+ rval = False
if not self.allow_create:
- return
+ return rval
self.checkIntegrity()
try:
myentries = list(self._createManifestEntries())
@@ -289,7 +308,8 @@ class Manifest(object):
# thin manifests with no DIST entries, myentries is
# non-empty for all currently known use cases.
write_atomic(self.getFullname(), "".join("%s\n" %
- str(myentry) for myentry in myentries))
+ _unicode(myentry) for myentry in myentries))
+ rval = True
else:
# With thin manifest, there's no need to have
# a Manifest file if there are no DIST entries.
@@ -298,6 +318,7 @@ class Manifest(object):
except OSError as e:
if e.errno != errno.ENOENT:
raise
+ rval = True
if sign:
self.sign()
@@ -305,6 +326,7 @@ class Manifest(object):
if e.errno == errno.EACCES:
raise PermissionDenied(str(e))
raise
+ return rval
def sign(self):
""" Sign the Manifest """
@@ -363,10 +385,11 @@ class Manifest(object):
distfilehashes = self.fhashdict["DIST"]
else:
distfilehashes = {}
- self.__init__(self.pkgdir, self.distdir,
+ self.__init__(self.pkgdir, distdir=self.distdir,
fetchlist_dict=self.fetchlist_dict, from_scratch=True,
thin=self.thin, allow_missing=self.allow_missing,
- allow_create=self.allow_create, hashes=self.hashes)
+ allow_create=self.allow_create, hashes=self.hashes,
+ find_invalid_path_char=self._find_invalid_path_char)
pn = os.path.basename(self.pkgdir.rstrip(os.path.sep))
cat = self._pkgdir_category()
@@ -461,7 +484,8 @@ class Manifest(object):
if pf is not None:
mytype = "EBUILD"
cpvlist.append(pf)
- elif manifest2MiscfileFilter(f):
+ elif self._find_invalid_path_char(f) == -1 and \
+ manifest2MiscfileFilter(f):
mytype = "MISC"
else:
continue
@@ -480,7 +504,8 @@ class Manifest(object):
full_path = os.path.join(parentdir, f)
recursive_files.append(full_path[cut_len:])
for f in recursive_files:
- if not manifest2AuxfileFilter(f):
+ if self._find_invalid_path_char(f) != -1 or \
+ not manifest2AuxfileFilter(f):
continue
self.fhashdict["AUX"][f] = perform_multiple_checksums(
os.path.join(self.pkgdir, "files", f.lstrip(os.sep)), self.hashes)
@@ -502,14 +527,17 @@ class Manifest(object):
for t in MANIFEST2_IDENTIFIERS:
self.checkTypeHashes(t, ignoreMissingFiles=ignoreMissingFiles)
- def checkTypeHashes(self, idtype, ignoreMissingFiles=False):
+ def checkTypeHashes(self, idtype, ignoreMissingFiles=False, hash_filter=None):
for f in self.fhashdict[idtype]:
- self.checkFileHashes(idtype, f, ignoreMissing=ignoreMissingFiles)
+ self.checkFileHashes(idtype, f, ignoreMissing=ignoreMissingFiles,
+ hash_filter=hash_filter)
- def checkFileHashes(self, ftype, fname, ignoreMissing=False):
+ def checkFileHashes(self, ftype, fname, ignoreMissing=False, hash_filter=None):
+ digests = _filter_unaccelarated_hashes(self.fhashdict[ftype][fname])
+ if hash_filter is not None:
+ digests = _apply_hash_filter(digests, hash_filter)
try:
- ok, reason = verify_all(self._getAbsname(ftype, fname),
- _filter_unaccelarated_hashes(self.fhashdict[ftype][fname]))
+ ok, reason = verify_all(self._getAbsname(ftype, fname), digests)
if not ok:
raise DigestException(tuple([self._getAbsname(ftype, fname)]+list(reason)))
return ok, reason
diff --git a/pym/portage/news.py b/pym/portage/news.py
index bbd93257a..408fb5c5f 100644
--- a/pym/portage/news.py
+++ b/pym/portage/news.py
@@ -1,8 +1,8 @@
# portage: news management code
-# Copyright 2006-2011 Gentoo Foundation
+# Copyright 2006-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-from __future__ import print_function
+from __future__ import print_function, unicode_literals
__all__ = ["NewsManager", "NewsItem", "DisplayRestriction",
"DisplayProfileRestriction", "DisplayKeywordRestriction",
@@ -13,6 +13,7 @@ import io
import logging
import os as _os
import re
+import portage
from portage import OrderedDict
from portage import os
from portage import _encodings
@@ -241,7 +242,8 @@ class NewsItem(object):
for values in self.restrictions.values():
any_match = False
for restriction in values:
- if restriction.checkRestriction(**kwargs):
+ if restriction.checkRestriction(
+ **portage._native_kwargs(kwargs)):
any_match = True
if not any_match:
all_match = False
@@ -388,7 +390,7 @@ def count_unread_news(portdb, vardb, repos=None, update=True):
# NOTE: The NewsManager typically handles permission errors by
# returning silently, so PermissionDenied won't necessarily be
# raised even if we do trigger a permission error above.
- msg = _unicode_decode("Permission denied: '%s'\n") % (e,)
+ msg = "Permission denied: '%s'\n" % (e,)
if msg in permission_msgs:
pass
else:
diff --git a/pym/portage/output.py b/pym/portage/output.py
index e44375ee3..cd660ac99 100644
--- a/pym/portage/output.py
+++ b/pym/portage/output.py
@@ -1,4 +1,4 @@
-# Copyright 1998-2011 Gentoo Foundation
+# Copyright 1998-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
__docformat__ = "epytext"
@@ -24,8 +24,8 @@ from portage.exception import CommandNotFound, FileNotFound, \
ParseError, PermissionDenied, PortageException
from portage.localization import _
-havecolor=1
-dotitles=1
+havecolor = 1
+dotitles = 1
_styles = {}
"""Maps style class to tuple of attribute names."""
@@ -164,15 +164,12 @@ def _parse_color_map(config_root='/', onerror=None):
token = token[1:-1]
return token
- f = None
try:
- f = io.open(_unicode_encode(myfile,
+ with io.open(_unicode_encode(myfile,
encoding=_encodings['fs'], errors='strict'),
- mode='r', encoding=_encodings['content'], errors='replace')
- lineno = 0
- for line in f:
- lineno += 1
-
+ mode='r', encoding=_encodings['content'], errors='replace') as f:
+ lines = f.readlines()
+ for lineno, line in enumerate(lines):
commenter_pos = line.find("#")
line = line[:commenter_pos].strip()
@@ -230,9 +227,6 @@ def _parse_color_map(config_root='/', onerror=None):
elif e.errno == errno.EACCES:
raise PermissionDenied(myfile)
raise
- finally:
- if f is not None:
- f.close()
def nc_len(mystr):
tmp = re.sub(esc_seq + "^m]+m", "", mystr);
@@ -245,7 +239,7 @@ _max_xtermTitle_len = 253
def xtermTitle(mystr, raw=False):
global _disable_xtermTitle
if _disable_xtermTitle is None:
- _disable_xtermTitle = not (sys.stderr.isatty() and \
+ _disable_xtermTitle = not (sys.__stderr__.isatty() and \
'TERM' in os.environ and \
_legal_terms_re.match(os.environ['TERM']) is not None)
@@ -278,15 +272,18 @@ def xtermTitleReset():
if dotitles and \
'TERM' in os.environ and \
_legal_terms_re.match(os.environ['TERM']) is not None and \
- sys.stderr.isatty():
+ sys.__stderr__.isatty():
from portage.process import find_binary, spawn
shell = os.environ.get("SHELL")
if not shell or not os.access(shell, os.EX_OK):
shell = find_binary("sh")
if shell:
spawn([shell, "-c", prompt_command], env=os.environ,
- fd_pipes={0:sys.stdin.fileno(),1:sys.stderr.fileno(),
- 2:sys.stderr.fileno()})
+ fd_pipes={
+ 0: portage._get_stdin().fileno(),
+ 1: sys.__stderr__.fileno(),
+ 2: sys.__stderr__.fileno()
+ })
else:
os.system(prompt_command)
return
@@ -302,12 +299,12 @@ def xtermTitleReset():
def notitles():
"turn off title setting"
- dotitles=0
+ dotitles = 0
def nocolor():
"turn off colorization"
global havecolor
- havecolor=0
+ havecolor = 0
def resetColor():
return codes["reset"]
@@ -344,9 +341,11 @@ def colorize(color_key, text):
else:
return text
-compat_functions_colors = ["bold","white","teal","turquoise","darkteal",
- "fuchsia","purple","blue","darkblue","green","darkgreen","yellow",
- "brown","darkyellow","red","darkred"]
+compat_functions_colors = [
+ "bold", "white", "teal", "turquoise", "darkteal",
+ "fuchsia", "purple", "blue", "darkblue", "green", "darkgreen", "yellow",
+ "brown", "darkyellow", "red", "darkred",
+]
class create_color_func(object):
__slots__ = ("_color_key",)
diff --git a/pym/portage/package/ebuild/_config/KeywordsManager.py b/pym/portage/package/ebuild/_config/KeywordsManager.py
index 0c613ce04..af606f1eb 100644
--- a/pym/portage/package/ebuild/_config/KeywordsManager.py
+++ b/pym/portage/package/ebuild/_config/KeywordsManager.py
@@ -11,7 +11,7 @@ from portage.dep import ExtendedAtomDict, _repo_separator, _slot_separator
from portage.localization import _
from portage.package.ebuild._config.helper import ordered_by_atom_specificity
from portage.util import grabdict_package, stack_lists, writemsg
-from portage.versions import cpv_getkey, _pkg_str
+from portage.versions import _pkg_str
class KeywordsManager(object):
"""Manager class to handle keywords processing and validation"""
@@ -77,7 +77,9 @@ class KeywordsManager(object):
def getKeywords(self, cpv, slot, keywords, repo):
- if not hasattr(cpv, 'slot'):
+ try:
+ cpv.slot
+ except AttributeError:
pkg = _pkg_str(cpv, slot=slot, repo=repo)
else:
pkg = cpv
@@ -91,6 +93,47 @@ class KeywordsManager(object):
keywords.extend(pkg_keywords)
return stack_lists(keywords, incremental=True)
+ def isStable(self, pkg, global_accept_keywords, backuped_accept_keywords):
+ mygroups = self.getKeywords(pkg, None, pkg._metadata["KEYWORDS"], None)
+ pgroups = global_accept_keywords.split()
+
+ unmaskgroups = self.getPKeywords(pkg, None, None,
+ global_accept_keywords)
+ pgroups.extend(unmaskgroups)
+
+ egroups = backuped_accept_keywords.split()
+
+ if unmaskgroups or egroups:
+ pgroups = self._getEgroups(egroups, pgroups)
+ else:
+ pgroups = set(pgroups)
+
+ if self._getMissingKeywords(pkg, pgroups, mygroups):
+ return False
+
+ if pkg.cpv._settings.local_config:
+ # If replacing all keywords with unstable variants would mask the
+ # package, then it's considered stable.
+ unstable = []
+ for kw in mygroups:
+ if kw[:1] != "~":
+ kw = "~" + kw
+ unstable.append(kw)
+
+ return bool(self._getMissingKeywords(pkg, pgroups, set(unstable)))
+ else:
+ # For repoman, if the package has an effective stable keyword that
+ # intersects with the effective ACCEPT_KEYWORDS for the current
+ # profile, then consider it stable.
+ for kw in pgroups:
+ if kw[:1] != "~":
+ if kw in mygroups or '*' in mygroups:
+ return True
+ if kw == '*':
+ for x in mygroups:
+ if x[:1] != "~":
+ return True
+ return False
def getMissingKeywords(self,
cpv,
@@ -237,7 +280,7 @@ class KeywordsManager(object):
if not mygroups:
# If KEYWORDS is empty then we still have to return something
# in order to distinguish from the case of "none missing".
- mygroups.append("**")
+ mygroups = ["**"]
missing = mygroups
return missing
@@ -261,9 +304,11 @@ class KeywordsManager(object):
"""
pgroups = global_accept_keywords.split()
- if not hasattr(cpv, 'slot'):
+ try:
+ cpv.slot
+ except AttributeError:
cpv = _pkg_str(cpv, slot=slot, repo=repo)
- cp = cpv_getkey(cpv)
+ cp = cpv.cp
unmaskgroups = []
if self._p_accept_keywords:
@@ -288,4 +333,3 @@ class KeywordsManager(object):
for x in pkg_accept_keywords:
unmaskgroups.extend(x)
return unmaskgroups
-
diff --git a/pym/portage/package/ebuild/_config/LocationsManager.py b/pym/portage/package/ebuild/_config/LocationsManager.py
index f7a1177e7..4427f1d05 100644
--- a/pym/portage/package/ebuild/_config/LocationsManager.py
+++ b/pym/portage/package/ebuild/_config/LocationsManager.py
@@ -1,6 +1,8 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = (
'LocationsManager',
)
@@ -13,10 +15,12 @@ import portage
from portage import os, eapi_is_supported, _encodings, _unicode_encode
from portage.const import CUSTOM_PROFILE_PATH, GLOBAL_CONFIG_PATH, \
PROFILE_PATH, USER_CONFIG_PATH
+from portage.eapi import eapi_allows_directories_on_profile_level_and_repository_level
from portage.exception import DirectoryNotFound, ParseError
from portage.localization import _
from portage.util import ensure_dirs, grabfile, \
normalize_path, shlex_split, writemsg
+from portage.util._path import exists_raise_eaccess, isdir_raise_eaccess
from portage.repository.config import parse_layout_conf, \
_portage1_profiles_allow_directories
@@ -27,7 +31,7 @@ _PORTAGE1_DIRECTORIES = frozenset([
'use.mask', 'use.force'])
_profile_node = collections.namedtuple('_profile_node',
- 'location portage1_directories')
+ 'location portage1_directories user_config')
_allow_parent_colon = frozenset(
["portage-2"])
@@ -45,9 +49,13 @@ class LocationsManager(object):
if self.eprefix is None:
self.eprefix = portage.const.EPREFIX
+ elif self.eprefix:
+ self.eprefix = normalize_path(self.eprefix)
+ if self.eprefix == os.sep:
+ self.eprefix = ""
if self.config_root is None:
- self.config_root = self.eprefix + os.sep
+ self.config_root = portage.const.EPREFIX + os.sep
self.config_root = normalize_path(os.path.abspath(
self.config_root)).rstrip(os.path.sep) + os.path.sep
@@ -72,14 +80,26 @@ class LocationsManager(object):
known_repos = tuple(known_repos)
if self.config_profile_path is None:
+ deprecated_profile_path = os.path.join(
+ self.config_root, 'etc', 'make.profile')
self.config_profile_path = \
os.path.join(self.config_root, PROFILE_PATH)
- if os.path.isdir(self.config_profile_path):
+ if isdir_raise_eaccess(self.config_profile_path):
self.profile_path = self.config_profile_path
+ if isdir_raise_eaccess(deprecated_profile_path) and not \
+ os.path.samefile(self.profile_path,
+ deprecated_profile_path):
+ # Don't warn if they refer to the same path, since
+ # that can be used for backward compatibility with
+ # old software.
+ writemsg("!!! %s\n" %
+ _("Found 2 make.profile dirs: "
+ "using '%s', ignoring '%s'") %
+ (self.profile_path, deprecated_profile_path),
+ noiselevel=-1)
else:
- self.config_profile_path = \
- os.path.join(self.abs_user_config, 'make.profile')
- if os.path.isdir(self.config_profile_path):
+ self.config_profile_path = deprecated_profile_path
+ if isdir_raise_eaccess(self.config_profile_path):
self.profile_path = self.config_profile_path
else:
self.profile_path = None
@@ -99,9 +119,9 @@ class LocationsManager(object):
self._addProfile(os.path.realpath(self.profile_path),
repositories, known_repos)
except ParseError as e:
- writemsg(_("!!! Unable to parse profile: '%s'\n") % \
- self.profile_path, noiselevel=-1)
- writemsg("!!! ParseError: %s\n" % str(e), noiselevel=-1)
+ if not portage._sync_mode:
+ writemsg(_("!!! Unable to parse profile: '%s'\n") % self.profile_path, noiselevel=-1)
+ writemsg("!!! ParseError: %s\n" % str(e), noiselevel=-1)
self.profiles = []
self.profiles_complex = []
@@ -111,14 +131,15 @@ class LocationsManager(object):
if os.path.exists(custom_prof):
self.user_profile_dir = custom_prof
self.profiles.append(custom_prof)
- self.profiles_complex.append(_profile_node(custom_prof, True))
+ self.profiles_complex.append(
+ _profile_node(custom_prof, True, True))
del custom_prof
self.profiles = tuple(self.profiles)
self.profiles_complex = tuple(self.profiles_complex)
def _check_var_directory(self, varname, var):
- if not os.path.isdir(var):
+ if not isdir_raise_eaccess(var):
writemsg(_("!!! Error: %s='%s' is not a directory. "
"Please correct this.\n") % (varname, var),
noiselevel=-1)
@@ -130,33 +151,9 @@ class LocationsManager(object):
allow_parent_colon = True
repo_loc = None
compat_mode = False
- intersecting_repos = [x for x in known_repos if current_abs_path.startswith(x[0])]
- if intersecting_repos:
- # protect against nested repositories. Insane configuration, but the longest
- # path will be the correct one.
- repo_loc, layout_data = max(intersecting_repos, key=lambda x:len(x[0]))
- allow_directories = any(x in _portage1_profiles_allow_directories
- for x in layout_data['profile-formats'])
- compat_mode = layout_data['profile-formats'] == ('portage-1-compat',)
- allow_parent_colon = any(x in _allow_parent_colon
- for x in layout_data['profile-formats'])
- if compat_mode:
- offenders = _PORTAGE1_DIRECTORIES.intersection(os.listdir(currentPath))
- offenders = sorted(x for x in offenders
- if os.path.isdir(os.path.join(currentPath, x)))
- if offenders:
- warnings.warn(_("Profile '%(profile_path)s' in repository "
- "'%(repo_name)s' is implicitly using 'portage-1' profile format, but "
- "the repository profiles are not marked as that format. This will break "
- "in the future. Please either convert the following paths "
- "to files, or add\nprofile-formats = portage-1\nto the "
- "repositories layout.conf. Files: '%(files)s'\n")
- % dict(profile_path=currentPath, repo_name=repo_loc,
- files=', '.join(offenders)))
-
- parentsFile = os.path.join(currentPath, "parent")
eapi_file = os.path.join(currentPath, "eapi")
+ eapi = "0"
f = None
try:
f = io.open(_unicode_encode(eapi_file,
@@ -174,7 +171,38 @@ class LocationsManager(object):
finally:
if f is not None:
f.close()
- if os.path.exists(parentsFile):
+
+ intersecting_repos = [x for x in known_repos if current_abs_path.startswith(x[0])]
+ if intersecting_repos:
+ # protect against nested repositories. Insane configuration, but the longest
+ # path will be the correct one.
+ repo_loc, layout_data = max(intersecting_repos, key=lambda x:len(x[0]))
+ allow_directories = eapi_allows_directories_on_profile_level_and_repository_level(eapi) or \
+ any(x in _portage1_profiles_allow_directories for x in layout_data['profile-formats'])
+ compat_mode = not eapi_allows_directories_on_profile_level_and_repository_level(eapi) and \
+ layout_data['profile-formats'] == ('portage-1-compat',)
+ allow_parent_colon = any(x in _allow_parent_colon
+ for x in layout_data['profile-formats'])
+
+ if compat_mode:
+ offenders = _PORTAGE1_DIRECTORIES.intersection(os.listdir(currentPath))
+ offenders = sorted(x for x in offenders
+ if os.path.isdir(os.path.join(currentPath, x)))
+ if offenders:
+ warnings.warn(_(
+ "\nThe selected profile is implicitly using the 'portage-1' format:\n"
+ "\tprofile = %(profile_path)s\n"
+ "But this repository is not using that format:\n"
+ "\trepo = %(repo_name)s\n"
+ "This will break in the future. Please convert these dirs to files:\n"
+ "\t%(files)s\n"
+ "Or, add this line to the repository's layout.conf:\n"
+ "\tprofile-formats = portage-1")
+ % dict(profile_path=currentPath, repo_name=repo_loc,
+ files='\n\t'.join(offenders)))
+
+ parentsFile = os.path.join(currentPath, "parent")
+ if exists_raise_eaccess(parentsFile):
parents = grabfile(parentsFile)
if not parents:
raise ParseError(
@@ -196,7 +224,7 @@ class LocationsManager(object):
# of the current repo, so realpath it.
parentPath = os.path.realpath(parentPath)
- if os.path.exists(parentPath):
+ if exists_raise_eaccess(parentPath):
self._addProfile(parentPath, repositories, known_repos)
else:
raise ParseError(
@@ -205,7 +233,7 @@ class LocationsManager(object):
self.profiles.append(currentPath)
self.profiles_complex.append(
- _profile_node(currentPath, allow_directories))
+ _profile_node(currentPath, allow_directories, False))
def _expand_parent_colon(self, parentsFile, parentPath,
repo_loc, repositories):
@@ -253,29 +281,10 @@ class LocationsManager(object):
self.eroot = self.target_root.rstrip(os.sep) + self.eprefix + os.sep
- # make.globals should not be relative to config_root
- # because it only contains constants. However, if EPREFIX
- # is set then there are two possible scenarios:
- # 1) If $ROOT == "/" then make.globals should be
- # relative to EPREFIX.
- # 2) If $ROOT != "/" then the correct location of
- # make.globals needs to be specified in the constructor
- # parameters, since it's a property of the host system
- # (and the current config represents the target system).
self.global_config_path = GLOBAL_CONFIG_PATH
- if self.eprefix:
- if self.target_root == "/":
- # case (1) above
- self.global_config_path = os.path.join(self.eprefix,
- GLOBAL_CONFIG_PATH.lstrip(os.sep))
- else:
- # case (2) above
- # For now, just assume make.globals is relative
- # to EPREFIX.
- # TODO: Pass in more info to the constructor,
- # so we know the host system configuration.
- self.global_config_path = os.path.join(self.eprefix,
- GLOBAL_CONFIG_PATH.lstrip(os.sep))
+ if portage.const.EPREFIX:
+ self.global_config_path = os.path.join(portage.const.EPREFIX,
+ GLOBAL_CONFIG_PATH.lstrip(os.sep))
def set_port_dirs(self, portdir, portdir_overlay):
self.portdir = portdir
@@ -287,7 +296,7 @@ class LocationsManager(object):
for ov in shlex_split(self.portdir_overlay):
ov = normalize_path(ov)
profiles_dir = os.path.join(ov, "profiles")
- if os.path.isdir(profiles_dir):
+ if isdir_raise_eaccess(profiles_dir):
self.overlay_profiles.append(profiles_dir)
self.profile_locations = [os.path.join(portdir, "profiles")] + self.overlay_profiles
diff --git a/pym/portage/package/ebuild/_config/MaskManager.py b/pym/portage/package/ebuild/_config/MaskManager.py
index bce1152ee..0f060c96e 100644
--- a/pym/portage/package/ebuild/_config/MaskManager.py
+++ b/pym/portage/package/ebuild/_config/MaskManager.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
__all__ = (
@@ -8,11 +8,10 @@ __all__ = (
import warnings
from portage import os
-from portage.dep import ExtendedAtomDict, match_from_list, _repo_separator, _slot_separator
+from portage.dep import ExtendedAtomDict, match_from_list
from portage.localization import _
from portage.util import append_repo, grabfile_package, stack_lists, writemsg
-from portage.versions import cpv_getkey
-from _emerge.Package import Package
+from portage.versions import _pkg_str
class MaskManager(object):
@@ -47,7 +46,7 @@ class MaskManager(object):
"the repository profiles are not marked as that format. This will break "
"in the future. Please either convert the following paths "
"to files, or add\nprofile-formats = portage-1\nto the "
- "repositories layout.conf.\n")
+ "repository's layout.conf.\n")
% dict(repo_name=repo_config.name))
return pmask_cache[loc]
@@ -185,12 +184,15 @@ class MaskManager(object):
@return: A matching atom string or None if one is not found.
"""
- cp = cpv_getkey(cpv)
- mask_atoms = self._pmaskdict.get(cp)
+ try:
+ cpv.slot
+ except AttributeError:
+ pkg = _pkg_str(cpv, slot=slot, repo=repo)
+ else:
+ pkg = cpv
+
+ mask_atoms = self._pmaskdict.get(pkg.cp)
if mask_atoms:
- pkg = "".join((cpv, _slot_separator, slot))
- if repo and repo != Package.UNKNOWN_REPO:
- pkg = "".join((pkg, _repo_separator, repo))
pkg_list = [pkg]
for x in mask_atoms:
if not match_from_list(x, pkg_list):
@@ -219,8 +221,15 @@ class MaskManager(object):
@return: A matching atom string or None if one is not found.
"""
- cp = cpv_getkey(cpv)
- return self._getMaskAtom(cpv, slot, repo, self._punmaskdict.get(cp))
+ try:
+ cpv.slot
+ except AttributeError:
+ pkg = _pkg_str(cpv, slot=slot, repo=repo)
+ else:
+ pkg = cpv
+
+ return self._getMaskAtom(pkg, slot, repo,
+ self._punmaskdict.get(pkg.cp))
def getRawMaskAtom(self, cpv, slot, repo):
diff --git a/pym/portage/package/ebuild/_config/UseManager.py b/pym/portage/package/ebuild/_config/UseManager.py
index e1ec7f4a0..1c8c60eae 100644
--- a/pym/portage/package/ebuild/_config/UseManager.py
+++ b/pym/portage/package/ebuild/_config/UseManager.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
__all__ = (
@@ -7,36 +7,49 @@ __all__ = (
from _emerge.Package import Package
from portage import os
-from portage.dep import dep_getrepo, dep_getslot, ExtendedAtomDict, remove_slot, _get_useflag_re
+from portage.dep import Atom, dep_getrepo, dep_getslot, ExtendedAtomDict, remove_slot, _get_useflag_re, _repo_separator
+from portage.eapi import eapi_has_use_aliases, eapi_supports_stable_use_forcing_and_masking
+from portage.exception import InvalidAtom
from portage.localization import _
-from portage.util import grabfile, grabdict_package, read_corresponding_eapi_file, stack_lists, writemsg
-from portage.versions import cpv_getkey, _pkg_str
+from portage.util import grabfile, grabdict, grabdict_package, read_corresponding_eapi_file, stack_lists, writemsg
+from portage.versions import _pkg_str
from portage.package.ebuild._config.helper import ordered_by_atom_specificity
class UseManager(object):
- def __init__(self, repositories, profiles, abs_user_config, user_config=True):
+ def __init__(self, repositories, profiles, abs_user_config, is_stable,
+ user_config=True):
# file variable
#--------------------------------
# repositories
#--------------------------------
# use.mask _repo_usemask_dict
+ # use.stable.mask _repo_usestablemask_dict
# use.force _repo_useforce_dict
+ # use.stable.force _repo_usestableforce_dict
+ # use.aliases _repo_usealiases_dict
# package.use.mask _repo_pusemask_dict
+ # package.use.stable.mask _repo_pusestablemask_dict
# package.use.force _repo_puseforce_dict
+ # package.use.stable.force _repo_pusestableforce_dict
+ # package.use.aliases _repo_pusealiases_dict
#--------------------------------
# profiles
#--------------------------------
# use.mask _usemask_list
+ # use.stable.mask _usestablemask_list
# use.force _useforce_list
+ # use.stable.force _usestableforce_list
# package.use.mask _pusemask_list
+ # package.use.stable.mask _pusestablemask_list
# package.use _pkgprofileuse
# package.use.force _puseforce_list
+ # package.use.stable.force _pusestableforce_list
#--------------------------------
# user config
#--------------------------------
- # package.use _pusedict
+ # package.use _pusedict
# Dynamic variables tracked by the config class
#--------------------------------
@@ -49,26 +62,61 @@ class UseManager(object):
#--------------------------------
# puse
+ self._user_config = user_config
+ self._is_stable = is_stable
self._repo_usemask_dict = self._parse_repository_files_to_dict_of_tuples("use.mask", repositories)
+ self._repo_usestablemask_dict = \
+ self._parse_repository_files_to_dict_of_tuples("use.stable.mask",
+ repositories, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
self._repo_useforce_dict = self._parse_repository_files_to_dict_of_tuples("use.force", repositories)
+ self._repo_usestableforce_dict = \
+ self._parse_repository_files_to_dict_of_tuples("use.stable.force",
+ repositories, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
self._repo_pusemask_dict = self._parse_repository_files_to_dict_of_dicts("package.use.mask", repositories)
+ self._repo_pusestablemask_dict = \
+ self._parse_repository_files_to_dict_of_dicts("package.use.stable.mask",
+ repositories, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
self._repo_puseforce_dict = self._parse_repository_files_to_dict_of_dicts("package.use.force", repositories)
+ self._repo_pusestableforce_dict = \
+ self._parse_repository_files_to_dict_of_dicts("package.use.stable.force",
+ repositories, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
self._repo_puse_dict = self._parse_repository_files_to_dict_of_dicts("package.use", repositories)
self._usemask_list = self._parse_profile_files_to_tuple_of_tuples("use.mask", profiles)
+ self._usestablemask_list = \
+ self._parse_profile_files_to_tuple_of_tuples("use.stable.mask",
+ profiles, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
self._useforce_list = self._parse_profile_files_to_tuple_of_tuples("use.force", profiles)
+ self._usestableforce_list = \
+ self._parse_profile_files_to_tuple_of_tuples("use.stable.force",
+ profiles, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
self._pusemask_list = self._parse_profile_files_to_tuple_of_dicts("package.use.mask", profiles)
+ self._pusestablemask_list = \
+ self._parse_profile_files_to_tuple_of_dicts("package.use.stable.mask",
+ profiles, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
self._pkgprofileuse = self._parse_profile_files_to_tuple_of_dicts("package.use", profiles, juststrings=True)
self._puseforce_list = self._parse_profile_files_to_tuple_of_dicts("package.use.force", profiles)
+ self._pusestableforce_list = \
+ self._parse_profile_files_to_tuple_of_dicts("package.use.stable.force",
+ profiles, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
self._pusedict = self._parse_user_files_to_extatomdict("package.use", abs_user_config, user_config)
+ self._repo_usealiases_dict = self._parse_repository_usealiases(repositories)
+ self._repo_pusealiases_dict = self._parse_repository_packageusealiases(repositories)
+
self.repositories = repositories
-
- def _parse_file_to_tuple(self, file_name, recursive=True):
+
+ def _parse_file_to_tuple(self, file_name, recursive=True, eapi_filter=None):
ret = []
lines = grabfile(file_name, recursive=recursive)
eapi = read_corresponding_eapi_file(file_name)
+ if eapi_filter is not None and not eapi_filter(eapi):
+ if lines:
+ writemsg(_("--- EAPI '%s' does not support '%s': '%s'\n") %
+ (eapi, os.path.basename(file_name), file_name),
+ noiselevel=-1)
+ return ()
useflag_re = _get_useflag_re(eapi)
for prefixed_useflag in lines:
if prefixed_useflag[:1] == "-":
@@ -82,11 +130,26 @@ class UseManager(object):
ret.append(prefixed_useflag)
return tuple(ret)
- def _parse_file_to_dict(self, file_name, juststrings=False, recursive=True):
+ def _parse_file_to_dict(self, file_name, juststrings=False, recursive=True,
+ eapi_filter=None, user_config=False):
ret = {}
location_dict = {}
- file_dict = grabdict_package(file_name, recursive=recursive, verify_eapi=True)
- eapi = read_corresponding_eapi_file(file_name)
+ eapi = read_corresponding_eapi_file(file_name, default=None)
+ if eapi is None and not user_config:
+ eapi = "0"
+ if eapi is None:
+ ret = ExtendedAtomDict(dict)
+ else:
+ ret = {}
+ file_dict = grabdict_package(file_name, recursive=recursive,
+ allow_wildcard=(eapi is None), allow_repo=(eapi is None),
+ verify_eapi=(eapi is not None))
+ if eapi is not None and eapi_filter is not None and not eapi_filter(eapi):
+ if file_dict:
+ writemsg(_("--- EAPI '%s' does not support '%s': '%s'\n") %
+ (eapi, os.path.basename(file_name), file_name),
+ noiselevel=-1)
+ return ret
useflag_re = _get_useflag_re(eapi)
for k, v in file_dict.items():
useflags = []
@@ -119,31 +182,116 @@ class UseManager(object):
return ret
- def _parse_repository_files_to_dict_of_tuples(self, file_name, repositories):
+ def _parse_repository_files_to_dict_of_tuples(self, file_name, repositories, eapi_filter=None):
ret = {}
for repo in repositories.repos_with_profiles():
- ret[repo.name] = self._parse_file_to_tuple(os.path.join(repo.location, "profiles", file_name))
+ ret[repo.name] = self._parse_file_to_tuple(os.path.join(repo.location, "profiles", file_name), eapi_filter=eapi_filter)
return ret
- def _parse_repository_files_to_dict_of_dicts(self, file_name, repositories):
+ def _parse_repository_files_to_dict_of_dicts(self, file_name, repositories, eapi_filter=None):
ret = {}
for repo in repositories.repos_with_profiles():
- ret[repo.name] = self._parse_file_to_dict(os.path.join(repo.location, "profiles", file_name))
+ ret[repo.name] = self._parse_file_to_dict(os.path.join(repo.location, "profiles", file_name), eapi_filter=eapi_filter)
return ret
- def _parse_profile_files_to_tuple_of_tuples(self, file_name, locations):
+ def _parse_profile_files_to_tuple_of_tuples(self, file_name, locations,
+ eapi_filter=None):
return tuple(self._parse_file_to_tuple(
os.path.join(profile.location, file_name),
- recursive=profile.portage1_directories)
+ recursive=profile.portage1_directories, eapi_filter=eapi_filter)
for profile in locations)
- def _parse_profile_files_to_tuple_of_dicts(self, file_name, locations, juststrings=False):
+ def _parse_profile_files_to_tuple_of_dicts(self, file_name, locations,
+ juststrings=False, eapi_filter=None):
return tuple(self._parse_file_to_dict(
os.path.join(profile.location, file_name), juststrings,
- recursive=profile.portage1_directories)
+ recursive=profile.portage1_directories, eapi_filter=eapi_filter,
+ user_config=profile.user_config)
for profile in locations)
- def getUseMask(self, pkg=None):
+ def _parse_repository_usealiases(self, repositories):
+ ret = {}
+ for repo in repositories.repos_with_profiles():
+ file_name = os.path.join(repo.location, "profiles", "use.aliases")
+ eapi = read_corresponding_eapi_file(file_name)
+ useflag_re = _get_useflag_re(eapi)
+ raw_file_dict = grabdict(file_name, recursive=True)
+ file_dict = {}
+ for real_flag, aliases in raw_file_dict.items():
+ if useflag_re.match(real_flag) is None:
+ writemsg(_("--- Invalid real USE flag in '%s': '%s'\n") % (file_name, real_flag), noiselevel=-1)
+ else:
+ for alias in aliases:
+ if useflag_re.match(alias) is None:
+ writemsg(_("--- Invalid USE flag alias for '%s' real USE flag in '%s': '%s'\n") %
+ (real_flag, file_name, alias), noiselevel=-1)
+ else:
+ if any(alias in v for k, v in file_dict.items() if k != real_flag):
+ writemsg(_("--- Duplicated USE flag alias in '%s': '%s'\n") %
+ (file_name, alias), noiselevel=-1)
+ else:
+ file_dict.setdefault(real_flag, []).append(alias)
+ ret[repo.name] = file_dict
+ return ret
+
+ def _parse_repository_packageusealiases(self, repositories):
+ ret = {}
+ for repo in repositories.repos_with_profiles():
+ file_name = os.path.join(repo.location, "profiles", "package.use.aliases")
+ eapi = read_corresponding_eapi_file(file_name)
+ useflag_re = _get_useflag_re(eapi)
+ lines = grabfile(file_name, recursive=True)
+ file_dict = {}
+ for line in lines:
+ elements = line.split()
+ atom = elements[0]
+ try:
+ atom = Atom(atom, eapi=eapi)
+ except InvalidAtom:
+ writemsg(_("--- Invalid atom in '%s': '%s'\n") % (file_name, atom))
+ continue
+ if len(elements) == 1:
+ writemsg(_("--- Missing real USE flag for '%s' in '%s'\n") % (atom, file_name), noiselevel=-1)
+ continue
+ real_flag = elements[1]
+ if useflag_re.match(real_flag) is None:
+ writemsg(_("--- Invalid real USE flag for '%s' in '%s': '%s'\n") % (atom, file_name, real_flag), noiselevel=-1)
+ else:
+ for alias in elements[2:]:
+ if useflag_re.match(alias) is None:
+ writemsg(_("--- Invalid USE flag alias for '%s' real USE flag for '%s' in '%s': '%s'\n") %
+ (real_flag, atom, file_name, alias), noiselevel=-1)
+ else:
+ # Duplicated USE flag aliases in entries for different atoms
+ # matching the same package version are detected in getUseAliases().
+ if any(alias in v for k, v in file_dict.get(atom.cp, {}).get(atom, {}).items() if k != real_flag):
+ writemsg(_("--- Duplicated USE flag alias for '%s' in '%s': '%s'\n") %
+ (atom, file_name, alias), noiselevel=-1)
+ else:
+ file_dict.setdefault(atom.cp, {}).setdefault(atom, {}).setdefault(real_flag, []).append(alias)
+ ret[repo.name] = file_dict
+ return ret
+
+ def _isStable(self, pkg):
+ if self._user_config:
+ try:
+ return pkg.stable
+ except AttributeError:
+ # KEYWORDS is unavailable (prior to "depend" phase)
+ return False
+
+ try:
+ pkg._metadata
+ except AttributeError:
+ # KEYWORDS is unavailable (prior to "depend" phase)
+ return False
+
+ # Since repoman uses different config instances for
+ # different profiles, we have to be careful to do the
+ # stable check against the correct profile here.
+ return self._is_stable(pkg)
+
+ def getUseMask(self, pkg=None, stable=None):
if pkg is None:
return frozenset(stack_lists(
self._usemask_list, incremental=True))
@@ -155,7 +303,12 @@ class UseManager(object):
repo = dep_getrepo(pkg)
pkg = _pkg_str(remove_slot(pkg), slot=slot, repo=repo)
cp = pkg.cp
+
+ if stable is None:
+ stable = self._isStable(pkg)
+
usemask = []
+
if hasattr(pkg, "repo") and pkg.repo != Package.UNKNOWN_REPO:
repos = []
try:
@@ -166,30 +319,56 @@ class UseManager(object):
repos.append(pkg.repo)
for repo in repos:
usemask.append(self._repo_usemask_dict.get(repo, {}))
+ if stable:
+ usemask.append(self._repo_usestablemask_dict.get(repo, {}))
cpdict = self._repo_pusemask_dict.get(repo, {}).get(cp)
if cpdict:
pkg_usemask = ordered_by_atom_specificity(cpdict, pkg)
if pkg_usemask:
usemask.extend(pkg_usemask)
+ if stable:
+ cpdict = self._repo_pusestablemask_dict.get(repo, {}).get(cp)
+ if cpdict:
+ pkg_usemask = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_usemask:
+ usemask.extend(pkg_usemask)
+
for i, pusemask_dict in enumerate(self._pusemask_list):
if self._usemask_list[i]:
usemask.append(self._usemask_list[i])
+ if stable and self._usestablemask_list[i]:
+ usemask.append(self._usestablemask_list[i])
cpdict = pusemask_dict.get(cp)
if cpdict:
pkg_usemask = ordered_by_atom_specificity(cpdict, pkg)
if pkg_usemask:
usemask.extend(pkg_usemask)
+ if stable:
+ cpdict = self._pusestablemask_list[i].get(cp)
+ if cpdict:
+ pkg_usemask = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_usemask:
+ usemask.extend(pkg_usemask)
+
return frozenset(stack_lists(usemask, incremental=True))
- def getUseForce(self, pkg=None):
+ def getUseForce(self, pkg=None, stable=None):
if pkg is None:
return frozenset(stack_lists(
self._useforce_list, incremental=True))
cp = getattr(pkg, "cp", None)
if cp is None:
- cp = cpv_getkey(remove_slot(pkg))
+ slot = dep_getslot(pkg)
+ repo = dep_getrepo(pkg)
+ pkg = _pkg_str(remove_slot(pkg), slot=slot, repo=repo)
+ cp = pkg.cp
+
+ if stable is None:
+ stable = self._isStable(pkg)
+
useforce = []
+
if hasattr(pkg, "repo") and pkg.repo != Package.UNKNOWN_REPO:
repos = []
try:
@@ -200,25 +379,90 @@ class UseManager(object):
repos.append(pkg.repo)
for repo in repos:
useforce.append(self._repo_useforce_dict.get(repo, {}))
+ if stable:
+ useforce.append(self._repo_usestableforce_dict.get(repo, {}))
cpdict = self._repo_puseforce_dict.get(repo, {}).get(cp)
if cpdict:
pkg_useforce = ordered_by_atom_specificity(cpdict, pkg)
if pkg_useforce:
useforce.extend(pkg_useforce)
+ if stable:
+ cpdict = self._repo_pusestableforce_dict.get(repo, {}).get(cp)
+ if cpdict:
+ pkg_useforce = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_useforce:
+ useforce.extend(pkg_useforce)
+
for i, puseforce_dict in enumerate(self._puseforce_list):
if self._useforce_list[i]:
useforce.append(self._useforce_list[i])
+ if stable and self._usestableforce_list[i]:
+ useforce.append(self._usestableforce_list[i])
cpdict = puseforce_dict.get(cp)
if cpdict:
pkg_useforce = ordered_by_atom_specificity(cpdict, pkg)
if pkg_useforce:
useforce.extend(pkg_useforce)
+ if stable:
+ cpdict = self._pusestableforce_list[i].get(cp)
+ if cpdict:
+ pkg_useforce = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_useforce:
+ useforce.extend(pkg_useforce)
+
return frozenset(stack_lists(useforce, incremental=True))
+ def getUseAliases(self, pkg):
+ if hasattr(pkg, "eapi") and not eapi_has_use_aliases(pkg.eapi):
+ return {}
+
+ cp = getattr(pkg, "cp", None)
+ if cp is None:
+ slot = dep_getslot(pkg)
+ repo = dep_getrepo(pkg)
+ pkg = _pkg_str(remove_slot(pkg), slot=slot, repo=repo)
+ cp = pkg.cp
+
+ usealiases = {}
+
+ if hasattr(pkg, "repo") and pkg.repo != Package.UNKNOWN_REPO:
+ repos = []
+ try:
+ repos.extend(repo.name for repo in
+ self.repositories[pkg.repo].masters)
+ except KeyError:
+ pass
+ repos.append(pkg.repo)
+ for repo in repos:
+ usealiases_dict = self._repo_usealiases_dict.get(repo, {})
+ for real_flag, aliases in usealiases_dict.items():
+ for alias in aliases:
+ if any(alias in v for k, v in usealiases.items() if k != real_flag):
+ writemsg(_("--- Duplicated USE flag alias for '%s%s%s': '%s'\n") %
+ (pkg.cpv, _repo_separator, pkg.repo, alias), noiselevel=-1)
+ else:
+ usealiases.setdefault(real_flag, []).append(alias)
+ cp_usealiases_dict = self._repo_pusealiases_dict.get(repo, {}).get(cp)
+ if cp_usealiases_dict:
+ usealiases_dict_list = ordered_by_atom_specificity(cp_usealiases_dict, pkg)
+ for usealiases_dict in usealiases_dict_list:
+ for real_flag, aliases in usealiases_dict.items():
+ for alias in aliases:
+ if any(alias in v for k, v in usealiases.items() if k != real_flag):
+ writemsg(_("--- Duplicated USE flag alias for '%s%s%s': '%s'\n") %
+ (pkg.cpv, _repo_separator, pkg.repo, alias), noiselevel=-1)
+ else:
+ usealiases.setdefault(real_flag, []).append(alias)
+
+ return usealiases
+
def getPUSE(self, pkg):
cp = getattr(pkg, "cp", None)
if cp is None:
- cp = cpv_getkey(remove_slot(pkg))
+ slot = dep_getslot(pkg)
+ repo = dep_getrepo(pkg)
+ pkg = _pkg_str(remove_slot(pkg), slot=slot, repo=repo)
+ cp = pkg.cp
ret = ""
cpdict = self._pusedict.get(cp)
if cpdict:
diff --git a/pym/portage/package/ebuild/_config/special_env_vars.py b/pym/portage/package/ebuild/_config/special_env_vars.py
index 6ed6d0542..74fedd689 100644
--- a/pym/portage/package/ebuild/_config/special_env_vars.py
+++ b/pym/portage/package/ebuild/_config/special_env_vars.py
@@ -1,6 +1,8 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = (
'case_insensitive_vars', 'default_globals', 'env_blacklist', \
'environ_filter', 'environ_whitelist', 'environ_whitelist_re',
@@ -13,14 +15,17 @@ import re
# configuration files.
env_blacklist = frozenset((
"A", "AA", "CATEGORY", "DEPEND", "DESCRIPTION", "EAPI",
- "EBUILD_FORCE_TEST", "EBUILD_PHASE", "EBUILD_SKIP_MANIFEST",
+ "EBUILD_FORCE_TEST", "EBUILD_PHASE",
+ "EBUILD_PHASE_FUNC", "EBUILD_SKIP_MANIFEST",
"ED", "EMERGE_FROM", "EPREFIX", "EROOT",
- "GREP_OPTIONS", "HOMEPAGE", "INHERITED", "IUSE",
+ "GREP_OPTIONS", "HDEPEND", "HOMEPAGE",
+ "INHERITED", "IUSE", "IUSE_EFFECTIVE",
"KEYWORDS", "LICENSE", "MERGE_TYPE",
"PDEPEND", "PF", "PKGUSE", "PORTAGE_BACKGROUND",
- "PORTAGE_BACKGROUND_UNMERGE", "PORTAGE_BUILDIR_LOCKED",
- "PORTAGE_BUILT_USE", "PORTAGE_CONFIGROOT", "PORTAGE_IUSE",
- "PORTAGE_NONFATAL", "PORTAGE_REPO_NAME",
+ "PORTAGE_BACKGROUND_UNMERGE", "PORTAGE_BUILDDIR_LOCKED",
+ "PORTAGE_BUILT_USE", "PORTAGE_CONFIGROOT",
+ "PORTAGE_INTERNAL_CALLER", "PORTAGE_IUSE",
+ "PORTAGE_NONFATAL", "PORTAGE_PIPE_FD", "PORTAGE_REPO_NAME",
"PORTAGE_USE", "PROPERTIES", "PROVIDE", "RDEPEND", "REPOSITORY",
"RESTRICT", "ROOT", "SLOT", "SRC_URI"
))
@@ -39,7 +44,7 @@ environ_whitelist += [
"ACCEPT_LICENSE", "BASH_ENV", "BUILD_PREFIX", "COLUMNS", "D",
"DISTDIR", "DOC_SYMLINKS_DIR", "EAPI", "EBUILD",
"EBUILD_FORCE_TEST",
- "EBUILD_PHASE", "ECLASSDIR", "ECLASS_DEPTH", "ED",
+ "EBUILD_PHASE", "EBUILD_PHASE_FUNC", "ECLASSDIR", "ECLASS_DEPTH", "ED",
"EMERGE_FROM", "EPREFIX", "EROOT",
"FEATURES", "FILESDIR", "HOME", "MERGE_TYPE", "NOCOLOR", "PATH",
"PKGDIR",
@@ -49,7 +54,8 @@ environ_whitelist += [
"PORTAGE_BINPKG_FILE", "PORTAGE_BINPKG_TAR_OPTS",
"PORTAGE_BINPKG_TMPFILE",
"PORTAGE_BIN_PATH",
- "PORTAGE_BUILDDIR", "PORTAGE_BUNZIP2_COMMAND", "PORTAGE_BZIP2_COMMAND",
+ "PORTAGE_BUILDDIR", "PORTAGE_BUILD_GROUP", "PORTAGE_BUILD_USER",
+ "PORTAGE_BUNZIP2_COMMAND", "PORTAGE_BZIP2_COMMAND",
"PORTAGE_COLORMAP", "PORTAGE_COMPRESS",
"PORTAGE_COMPRESS_EXCLUDE_SUFFIXES",
"PORTAGE_CONFIGROOT", "PORTAGE_DEBUG", "PORTAGE_DEPCACHEDIR",
@@ -58,14 +64,16 @@ environ_whitelist += [
"PORTAGE_DOHTML_WARN_ON_SKIPPED_FILES",
"PORTAGE_EBUILD_EXIT_FILE", "PORTAGE_FEATURES",
"PORTAGE_GID", "PORTAGE_GRPNAME",
+ "PORTAGE_INTERNAL_CALLER",
"PORTAGE_INST_GID", "PORTAGE_INST_UID",
- "PORTAGE_IPC_DAEMON", "PORTAGE_IUSE",
- "PORTAGE_LOG_FILE", "PORTAGE_OVERRIDE_EPREFIX",
- "PORTAGE_PYM_PATH", "PORTAGE_PYTHON", "PORTAGE_QUIET",
- "PORTAGE_REPO_NAME", "PORTAGE_RESTRICT",
+ "PORTAGE_IPC_DAEMON", "PORTAGE_IUSE", "PORTAGE_ECLASS_LOCATIONS",
+ "PORTAGE_LOG_FILE", "PORTAGE_OVERRIDE_EPREFIX", "PORTAGE_PIPE_FD",
+ "PORTAGE_PYM_PATH", "PORTAGE_PYTHON",
+ "PORTAGE_PYTHONPATH", "PORTAGE_QUIET",
+ "PORTAGE_REPO_NAME", "PORTAGE_REPOSITORIES", "PORTAGE_RESTRICT",
"PORTAGE_SIGPIPE_STATUS",
"PORTAGE_TMPDIR", "PORTAGE_UPDATE_ENV", "PORTAGE_USERNAME",
- "PORTAGE_VERBOSE", "PORTAGE_WORKDIR_MODE",
+ "PORTAGE_VERBOSE", "PORTAGE_WORKDIR_MODE", "PORTAGE_XATTR_EXCLUDE",
"PORTDIR", "PORTDIR_OVERLAY", "PREROOTPATH", "PROFILE_PATHS",
"REPLACING_VERSIONS", "REPLACED_BY_VERSION",
"ROOT", "ROOTPATH", "T", "TMP", "TMPDIR",
@@ -137,9 +145,11 @@ environ_filter += [
# portage config variables and variables set directly by portage
environ_filter += [
- "ACCEPT_CHOSTS", "ACCEPT_KEYWORDS", "ACCEPT_PROPERTIES", "AUTOCLEAN",
+ "ACCEPT_CHOSTS", "ACCEPT_KEYWORDS", "ACCEPT_PROPERTIES",
+ "ACCEPT_RESTRICT", "AUTOCLEAN",
"CLEAN_DELAY", "COLLISION_IGNORE",
"CONFIG_PROTECT", "CONFIG_PROTECT_MASK",
+ "DCO_SIGNED_OFF_BY",
"EGENCACHE_DEFAULT_OPTS", "EMERGE_DEFAULT_OPTS",
"EMERGE_LOG_DIR",
"EMERGE_WARNING_DELAY",
@@ -148,8 +158,9 @@ environ_filter += [
"FETCHCOMMAND_RSYNC", "FETCHCOMMAND_SFTP",
"GENTOO_MIRRORS", "NOCONFMEM", "O",
"PORTAGE_BACKGROUND", "PORTAGE_BACKGROUND_UNMERGE",
- "PORTAGE_BINHOST",
- "PORTAGE_BINHOST_CHUNKSIZE", "PORTAGE_BUILDIR_LOCKED",
+ "PORTAGE_BINHOST", "PORTAGE_BINPKG_FORMAT",
+ "PORTAGE_BUILDDIR_LOCKED",
+ "PORTAGE_CHECKSUM_FILTER",
"PORTAGE_ELOG_CLASSES",
"PORTAGE_ELOG_MAILFROM", "PORTAGE_ELOG_MAILSUBJECT",
"PORTAGE_ELOG_MAILURI", "PORTAGE_ELOG_SYSTEM",
@@ -161,13 +172,20 @@ environ_filter += [
"PORTAGE_REPO_DUPLICATE_WARN",
"PORTAGE_RO_DISTDIRS",
"PORTAGE_RSYNC_EXTRA_OPTS", "PORTAGE_RSYNC_OPTS",
- "PORTAGE_RSYNC_RETRIES", "PORTAGE_SYNC_STALE",
- "PORTAGE_USE", "PORT_LOGDIR", "PORT_LOGDIR_CLEAN",
+ "PORTAGE_RSYNC_RETRIES", "PORTAGE_SSH_OPTS", "PORTAGE_SYNC_STALE",
+ "PORTAGE_USE",
+ "PORT_LOGDIR", "PORT_LOGDIR_CLEAN",
"QUICKPKG_DEFAULT_OPTS", "REPOMAN_DEFAULT_OPTS",
"RESUMECOMMAND", "RESUMECOMMAND_FTP",
"RESUMECOMMAND_HTTP", "RESUMECOMMAND_HTTPS",
"RESUMECOMMAND_RSYNC", "RESUMECOMMAND_SFTP",
- "SYNC", "UNINSTALL_IGNORE", "USE_EXPAND_HIDDEN", "USE_ORDER",
+ "UNINSTALL_IGNORE", "USE_EXPAND_HIDDEN", "USE_ORDER",
+ "__PORTAGE_HELPER"
+]
+
+# No longer supported variables
+environ_filter += [
+ "SYNC"
]
environ_filter = frozenset(environ_filter)
diff --git a/pym/portage/package/ebuild/_config/unpack_dependencies.py b/pym/portage/package/ebuild/_config/unpack_dependencies.py
new file mode 100644
index 000000000..137518949
--- /dev/null
+++ b/pym/portage/package/ebuild/_config/unpack_dependencies.py
@@ -0,0 +1,38 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os, _supported_eapis
+from portage.dep import use_reduce
+from portage.eapi import eapi_has_automatic_unpack_dependencies
+from portage.exception import InvalidDependString
+from portage.localization import _
+from portage.util import grabfile, writemsg
+
+def load_unpack_dependencies_configuration(repositories):
+ repo_dict = {}
+ for repo in repositories.repos_with_profiles():
+ for eapi in _supported_eapis:
+ if eapi_has_automatic_unpack_dependencies(eapi):
+ file_name = os.path.join(repo.location, "profiles", "unpack_dependencies", eapi)
+ lines = grabfile(file_name, recursive=True)
+ for line in lines:
+ elements = line.split()
+ suffix = elements[0].lower()
+ if len(elements) == 1:
+ writemsg(_("--- Missing unpack dependencies for '%s' suffix in '%s'\n") % (suffix, file_name))
+ depend = " ".join(elements[1:])
+ try:
+ use_reduce(depend, eapi=eapi)
+ except InvalidDependString as e:
+ writemsg(_("--- Invalid unpack dependencies for '%s' suffix in '%s': '%s'\n" % (suffix, file_name, e)))
+ else:
+ repo_dict.setdefault(repo.name, {}).setdefault(eapi, {})[suffix] = depend
+
+ ret = {}
+ for repo in repositories.repos_with_profiles():
+ for repo_name in [x.name for x in repo.masters] + [repo.name]:
+ for eapi in repo_dict.get(repo_name, {}):
+ for suffix, depend in repo_dict.get(repo_name, {}).get(eapi, {}).items():
+ ret.setdefault(repo.name, {}).setdefault(eapi, {})[suffix] = depend
+
+ return ret
diff --git a/pym/portage/package/ebuild/_ipc/QueryCommand.py b/pym/portage/package/ebuild/_ipc/QueryCommand.py
index d200fe80d..351c95628 100644
--- a/pym/portage/package/ebuild/_ipc/QueryCommand.py
+++ b/pym/portage/package/ebuild/_ipc/QueryCommand.py
@@ -1,12 +1,13 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
import io
import portage
from portage import os
-from portage import _unicode_decode
-from portage.dep import Atom
+from portage.dep import Atom, _repo_name_re
from portage.eapi import eapi_has_repo_deps
from portage.elog import messages as elog_messages
from portage.exception import InvalidAtom
@@ -36,40 +37,47 @@ class QueryCommand(IpcCommand):
@return: tuple of (stdout, stderr, returncode)
"""
- cmd, root, atom_str = argv
-
- eapi = self.settings.get('EAPI')
- allow_repo = eapi_has_repo_deps(eapi)
- try:
- atom = Atom(atom_str, allow_repo=allow_repo)
- except InvalidAtom:
- return ('', 'invalid atom: %s\n' % atom_str, 2)
+ # Python 3:
+ # cmd, root, *args = argv
+ cmd = argv[0]
+ root = argv[1]
+ args = argv[2:]
warnings = []
- try:
- atom = Atom(atom_str, allow_repo=allow_repo, eapi=eapi)
- except InvalidAtom as e:
- warnings.append(_unicode_decode("QA Notice: %s: %s") % (cmd, e))
-
- use = self.settings.get('PORTAGE_BUILT_USE')
- if use is None:
- use = self.settings['PORTAGE_USE']
-
- use = frozenset(use.split())
- atom = atom.evaluate_conditionals(use)
+ warnings_str = ''
db = self.get_db()
-
- warnings_str = ''
- if warnings:
- warnings_str = self._elog('eqawarn', warnings)
+ eapi = self.settings.get('EAPI')
root = normalize_path(root).rstrip(os.path.sep) + os.path.sep
if root not in db:
- return ('', 'invalid ROOT: %s\n' % root, 2)
+ return ('', '%s: Invalid ROOT: %s\n' % (cmd, root), 3)
+ portdb = db[root]["porttree"].dbapi
vardb = db[root]["vartree"].dbapi
+ if cmd in ('best_version', 'has_version'):
+ allow_repo = eapi_has_repo_deps(eapi)
+ try:
+ atom = Atom(args[0], allow_repo=allow_repo)
+ except InvalidAtom:
+ return ('', '%s: Invalid atom: %s\n' % (cmd, args[0]), 2)
+
+ try:
+ atom = Atom(args[0], allow_repo=allow_repo, eapi=eapi)
+ except InvalidAtom as e:
+ warnings.append("QA Notice: %s: %s" % (cmd, e))
+
+ use = self.settings.get('PORTAGE_BUILT_USE')
+ if use is None:
+ use = self.settings['PORTAGE_USE']
+
+ use = frozenset(use.split())
+ atom = atom.evaluate_conditionals(use)
+
+ if warnings:
+ warnings_str = self._elog('eqawarn', warnings)
+
if cmd == 'has_version':
if vardb.match(atom):
returncode = 0
@@ -79,8 +87,35 @@ class QueryCommand(IpcCommand):
elif cmd == 'best_version':
m = best(vardb.match(atom))
return ('%s\n' % m, warnings_str, 0)
+ elif cmd in ('master_repositories', 'repository_path', 'available_eclasses', 'eclass_path', 'license_path'):
+ repo = _repo_name_re.match(args[0])
+ if repo is None:
+ return ('', '%s: Invalid repository: %s\n' % (cmd, args[0]), 2)
+ try:
+ repo = portdb.repositories[args[0]]
+ except KeyError:
+ return ('', warnings_str, 1)
+
+ if cmd == 'master_repositories':
+ return ('%s\n' % ' '.join(x.name for x in repo.masters), warnings_str, 0)
+ elif cmd == 'repository_path':
+ return ('%s\n' % repo.location, warnings_str, 0)
+ elif cmd == 'available_eclasses':
+ return ('%s\n' % ' '.join(sorted(repo.eclass_db.eclasses)), warnings_str, 0)
+ elif cmd == 'eclass_path':
+ try:
+ eclass = repo.eclass_db.eclasses[args[1]]
+ except KeyError:
+ return ('', warnings_str, 1)
+ return ('%s\n' % eclass.location, warnings_str, 0)
+ elif cmd == 'license_path':
+ paths = reversed([os.path.join(x.location, 'licenses', args[1]) for x in list(repo.masters) + [repo]])
+ for path in paths:
+ if os.path.exists(path):
+ return ('%s\n' % path, warnings_str, 0)
+ return ('', warnings_str, 1)
else:
- return ('', 'invalid command: %s\n' % cmd, 2)
+ return ('', 'Invalid command: %s\n' % cmd, 3)
def _elog(self, elog_funcname, lines):
"""
diff --git a/pym/portage/package/ebuild/_eapi_invalid.py b/pym/portage/package/ebuild/_metadata_invalid.py
index d23677d23..bcf1f7fcd 100644
--- a/pym/portage/package/ebuild/_eapi_invalid.py
+++ b/pym/portage/package/ebuild/_metadata_invalid.py
@@ -28,19 +28,6 @@ def eapi_invalid(self, cpv, repo_name, settings,
"assignment on line: %s") %
(eapi_var, eapi_lineno))
- if 'parse-eapi-ebuild-head' in settings.features:
- msg.extend(textwrap.wrap(("NOTE: This error will soon"
- " become unconditionally fatal in a future version of Portage,"
- " but at this time, it can by made non-fatal by setting"
- " FEATURES=-parse-eapi-ebuild-head in"
- " make.conf."), 70))
- else:
- msg.extend(textwrap.wrap(("NOTE: This error will soon"
- " become unconditionally fatal in a future version of Portage."
- " At the earliest opportunity, please enable"
- " FEATURES=parse-eapi-ebuild-head in make.conf in order to"
- " make this error fatal."), 70))
-
if portage.data.secpass >= 2:
# TODO: improve elog permission error handling (bug #416231)
for line in msg:
diff --git a/pym/portage/package/ebuild/_parallel_manifest/ManifestProcess.py b/pym/portage/package/ebuild/_parallel_manifest/ManifestProcess.py
new file mode 100644
index 000000000..44e257664
--- /dev/null
+++ b/pym/portage/package/ebuild/_parallel_manifest/ManifestProcess.py
@@ -0,0 +1,43 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+from portage.exception import (FileNotFound,
+ PermissionDenied, PortagePackageException)
+from portage.localization import _
+from portage.util._async.ForkProcess import ForkProcess
+
+class ManifestProcess(ForkProcess):
+
+ __slots__ = ("cp", "distdir", "fetchlist_dict", "repo_config")
+
+ MODIFIED = 16
+
+ def _run(self):
+ mf = self.repo_config.load_manifest(
+ os.path.join(self.repo_config.location, self.cp),
+ self.distdir, fetchlist_dict=self.fetchlist_dict)
+
+ try:
+ mf.create(assumeDistHashesAlways=True)
+ except FileNotFound as e:
+ portage.writemsg(_("!!! File %s doesn't exist, can't update "
+ "Manifest\n") % e, noiselevel=-1)
+ return 1
+
+ except PortagePackageException as e:
+ portage.writemsg(("!!! %s\n") % (e,), noiselevel=-1)
+ return 1
+
+ try:
+ modified = mf.write(sign=False)
+ except PermissionDenied as e:
+ portage.writemsg("!!! %s: %s\n" % (_("Permission Denied"), e,),
+ noiselevel=-1)
+ return 1
+ else:
+ if modified:
+ return self.MODIFIED
+ else:
+ return os.EX_OK
diff --git a/pym/portage/package/ebuild/_parallel_manifest/ManifestScheduler.py b/pym/portage/package/ebuild/_parallel_manifest/ManifestScheduler.py
new file mode 100644
index 000000000..38ac4825e
--- /dev/null
+++ b/pym/portage/package/ebuild/_parallel_manifest/ManifestScheduler.py
@@ -0,0 +1,93 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+from portage.dep import _repo_separator
+from portage.exception import InvalidDependString
+from portage.localization import _
+from portage.util._async.AsyncScheduler import AsyncScheduler
+from .ManifestTask import ManifestTask
+
+class ManifestScheduler(AsyncScheduler):
+
+ def __init__(self, portdb, cp_iter=None,
+ gpg_cmd=None, gpg_vars=None, force_sign_key=None, **kwargs):
+
+ AsyncScheduler.__init__(self, **kwargs)
+
+ self._portdb = portdb
+
+ if cp_iter is None:
+ cp_iter = self._iter_every_cp()
+ self._cp_iter = cp_iter
+ self._gpg_cmd = gpg_cmd
+ self._gpg_vars = gpg_vars
+ self._force_sign_key = force_sign_key
+ self._task_iter = self._iter_tasks()
+
+ def _next_task(self):
+ return next(self._task_iter)
+
+ def _iter_every_cp(self):
+ # List categories individually, in order to start yielding quicker,
+ # and in order to reduce latency in case of a signal interrupt.
+ cp_all = self._portdb.cp_all
+ for category in sorted(self._portdb.categories):
+ for cp in cp_all(categories=(category,)):
+ yield cp
+
+ def _iter_tasks(self):
+ portdb = self._portdb
+ distdir = portdb.settings["DISTDIR"]
+ disabled_repos = set()
+
+ for cp in self._cp_iter:
+ if self._terminated.is_set():
+ break
+ # We iterate over portdb.porttrees, since it's common to
+ # tweak this attribute in order to adjust repo selection.
+ for mytree in portdb.porttrees:
+ if self._terminated.is_set():
+ break
+ repo_config = portdb.repositories.get_repo_for_location(mytree)
+ if not repo_config.create_manifest:
+ if repo_config.name not in disabled_repos:
+ disabled_repos.add(repo_config.name)
+ portage.writemsg(
+ _(">>> Skipping creating Manifest for %s%s%s; "
+ "repository is configured to not use them\n") %
+ (cp, _repo_separator, repo_config.name),
+ noiselevel=-1)
+ continue
+ cpv_list = portdb.cp_list(cp, mytree=[repo_config.location])
+ if not cpv_list:
+ continue
+ fetchlist_dict = {}
+ try:
+ for cpv in cpv_list:
+ fetchlist_dict[cpv] = \
+ list(portdb.getFetchMap(cpv, mytree=mytree))
+ except InvalidDependString as e:
+ portage.writemsg(
+ _("!!! %s%s%s: SRC_URI: %s\n") %
+ (cp, _repo_separator, repo_config.name, e),
+ noiselevel=-1)
+ self._error_count += 1
+ continue
+
+ yield ManifestTask(cp=cp, distdir=distdir,
+ fetchlist_dict=fetchlist_dict, repo_config=repo_config,
+ gpg_cmd=self._gpg_cmd, gpg_vars=self._gpg_vars,
+ force_sign_key=self._force_sign_key)
+
+ def _task_exit(self, task):
+
+ if task.returncode != os.EX_OK:
+ if not self._terminated_tasks:
+ portage.writemsg(
+ "Error processing %s%s%s, continuing...\n" %
+ (task.cp, _repo_separator, task.repo_config.name),
+ noiselevel=-1)
+
+ AsyncScheduler._task_exit(self, task)
diff --git a/pym/portage/package/ebuild/_parallel_manifest/ManifestTask.py b/pym/portage/package/ebuild/_parallel_manifest/ManifestTask.py
new file mode 100644
index 000000000..0ee2b910d
--- /dev/null
+++ b/pym/portage/package/ebuild/_parallel_manifest/ManifestTask.py
@@ -0,0 +1,186 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import re
+import subprocess
+
+from portage import os
+from portage import _unicode_encode, _encodings
+from portage.const import MANIFEST2_IDENTIFIERS
+from portage.util import (atomic_ofstream, grablines,
+ shlex_split, varexpand, writemsg)
+from portage.util._async.PipeLogger import PipeLogger
+from portage.util._async.PopenProcess import PopenProcess
+from _emerge.CompositeTask import CompositeTask
+from _emerge.PipeReader import PipeReader
+from .ManifestProcess import ManifestProcess
+
+class ManifestTask(CompositeTask):
+
+ __slots__ = ("cp", "distdir", "fetchlist_dict", "gpg_cmd",
+ "gpg_vars", "repo_config", "force_sign_key", "_manifest_path")
+
+ _PGP_HEADER = b"BEGIN PGP SIGNED MESSAGE"
+ _manifest_line_re = re.compile(r'^(%s) ' % "|".join(MANIFEST2_IDENTIFIERS))
+ _gpg_key_id_re = re.compile(r'^[0-9A-F]*$')
+ _gpg_key_id_lengths = (8, 16, 24, 32, 40)
+
+ def _start(self):
+ self._manifest_path = os.path.join(self.repo_config.location,
+ self.cp, "Manifest")
+ manifest_proc = ManifestProcess(cp=self.cp, distdir=self.distdir,
+ fetchlist_dict=self.fetchlist_dict, repo_config=self.repo_config,
+ scheduler=self.scheduler)
+ self._start_task(manifest_proc, self._manifest_proc_exit)
+
+ def _manifest_proc_exit(self, manifest_proc):
+ self._assert_current(manifest_proc)
+ if manifest_proc.returncode not in (os.EX_OK, manifest_proc.MODIFIED):
+ self.returncode = manifest_proc.returncode
+ self._current_task = None
+ self.wait()
+ return
+
+ modified = manifest_proc.returncode == manifest_proc.MODIFIED
+ sign = self.gpg_cmd is not None
+
+ if not modified and sign:
+ sign = self._need_signature()
+ if not sign and self.force_sign_key is not None \
+ and os.path.exists(self._manifest_path):
+ self._check_sig_key()
+ return
+
+ if not sign or not os.path.exists(self._manifest_path):
+ self.returncode = os.EX_OK
+ self._current_task = None
+ self.wait()
+ return
+
+ self._start_gpg_proc()
+
+ def _check_sig_key(self):
+ null_fd = os.open('/dev/null', os.O_RDONLY)
+ popen_proc = PopenProcess(proc=subprocess.Popen(
+ ["gpg", "--verify", self._manifest_path],
+ stdin=null_fd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT),
+ pipe_reader=PipeReader())
+ os.close(null_fd)
+ popen_proc.pipe_reader.input_files = {
+ "producer" : popen_proc.proc.stdout}
+ self._start_task(popen_proc, self._check_sig_key_exit)
+
+ @staticmethod
+ def _parse_gpg_key(output):
+ """
+ Returns the first token which appears to represent a gpg key
+ id, or None if there is no such token.
+ """
+ regex = ManifestTask._gpg_key_id_re
+ lengths = ManifestTask._gpg_key_id_lengths
+ for token in output.split():
+ m = regex.match(token)
+ if m is not None and len(m.group(0)) in lengths:
+ return m.group(0)
+ return None
+
+ @staticmethod
+ def _normalize_gpg_key(key_str):
+ """
+ Strips leading "0x" and trailing "!", and converts to uppercase
+ (intended to be the same format as that in gpg --verify output).
+ """
+ key_str = key_str.upper()
+ if key_str.startswith("0X"):
+ key_str = key_str[2:]
+ key_str = key_str.rstrip("!")
+ return key_str
+
+ def _check_sig_key_exit(self, proc):
+ self._assert_current(proc)
+
+ parsed_key = self._parse_gpg_key(
+ proc.pipe_reader.getvalue().decode('utf_8', 'replace'))
+ if parsed_key is not None and \
+ self._normalize_gpg_key(parsed_key) == \
+ self._normalize_gpg_key(self.force_sign_key):
+ self.returncode = os.EX_OK
+ self._current_task = None
+ self.wait()
+ return
+
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ self._strip_sig(self._manifest_path)
+ self._start_gpg_proc()
+
+ @staticmethod
+ def _strip_sig(manifest_path):
+ """
+ Strip an existing signature from a Manifest file.
+ """
+ line_re = ManifestTask._manifest_line_re
+ lines = grablines(manifest_path)
+ f = None
+ try:
+ f = atomic_ofstream(manifest_path)
+ for line in lines:
+ if line_re.match(line) is not None:
+ f.write(line)
+ f.close()
+ f = None
+ finally:
+ if f is not None:
+ f.abort()
+
+ def _start_gpg_proc(self):
+ gpg_vars = self.gpg_vars
+ if gpg_vars is None:
+ gpg_vars = {}
+ else:
+ gpg_vars = gpg_vars.copy()
+ gpg_vars["FILE"] = self._manifest_path
+ gpg_cmd = varexpand(self.gpg_cmd, mydict=gpg_vars)
+ gpg_cmd = shlex_split(gpg_cmd)
+ gpg_proc = PopenProcess(proc=subprocess.Popen(gpg_cmd,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
+ # PipeLogger echos output and efficiently monitors for process
+ # exit by listening for the stdout EOF event.
+ gpg_proc.pipe_reader = PipeLogger(background=self.background,
+ input_fd=gpg_proc.proc.stdout, scheduler=self.scheduler)
+ self._start_task(gpg_proc, self._gpg_proc_exit)
+
+ def _gpg_proc_exit(self, gpg_proc):
+ if self._default_exit(gpg_proc) != os.EX_OK:
+ self.wait()
+ return
+
+ rename_args = (self._manifest_path + ".asc", self._manifest_path)
+ try:
+ os.rename(*rename_args)
+ except OSError as e:
+ writemsg("!!! rename('%s', '%s'): %s\n" % rename_args + (e,),
+ noiselevel=-1)
+ try:
+ os.unlink(self._manifest_path + ".asc")
+ except OSError:
+ pass
+ self.returncode = 1
+ else:
+ self.returncode = os.EX_OK
+
+ self._current_task = None
+ self.wait()
+
+ def _need_signature(self):
+ try:
+ with open(_unicode_encode(self._manifest_path,
+ encoding=_encodings['fs'], errors='strict'), 'rb') as f:
+ return self._PGP_HEADER not in f.readline()
+ except IOError as e:
+ if e.errno in (errno.ENOENT, errno.ESTALE):
+ return False
+ raise
diff --git a/pym/portage/package/ebuild/_parallel_manifest/__init__.py b/pym/portage/package/ebuild/_parallel_manifest/__init__.py
new file mode 100644
index 000000000..418ad862b
--- /dev/null
+++ b/pym/portage/package/ebuild/_parallel_manifest/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/pym/portage/package/ebuild/_spawn_nofetch.py b/pym/portage/package/ebuild/_spawn_nofetch.py
index 94f8c79a3..0fc53c8ca 100644
--- a/pym/portage/package/ebuild/_spawn_nofetch.py
+++ b/pym/portage/package/ebuild/_spawn_nofetch.py
@@ -1,8 +1,9 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import tempfile
+import portage
from portage import os
from portage import shutil
from portage.const import EBUILD_PHASES
@@ -10,10 +11,12 @@ from portage.elog import elog_process
from portage.package.ebuild.config import config
from portage.package.ebuild.doebuild import doebuild_environment
from portage.package.ebuild.prepare_build_dirs import prepare_build_dirs
+from portage.util._async.SchedulerInterface import SchedulerInterface
+from portage.util._eventloop.EventLoop import EventLoop
+from portage.util._eventloop.global_event_loop import global_event_loop
from _emerge.EbuildPhase import EbuildPhase
-from _emerge.PollScheduler import PollScheduler
-def spawn_nofetch(portdb, ebuild_path, settings=None):
+def spawn_nofetch(portdb, ebuild_path, settings=None, fd_pipes=None):
"""
This spawns pkg_nofetch if appropriate. The settings parameter
is useful only if setcpv has already been called in order
@@ -47,7 +50,7 @@ def spawn_nofetch(portdb, ebuild_path, settings=None):
settings = config(clone=settings)
if 'PORTAGE_PARALLEL_FETCHONLY' in settings:
- return
+ return os.EX_OK
# We must create our private PORTAGE_TMPDIR before calling
# doebuild_environment(), since lots of variables such
@@ -59,7 +62,7 @@ def spawn_nofetch(portdb, ebuild_path, settings=None):
settings['PORTAGE_TMPDIR'] = private_tmpdir
settings.backup_changes('PORTAGE_TMPDIR')
# private temp dir was just created, so it's not locked yet
- settings.pop('PORTAGE_BUILDIR_LOCKED', None)
+ settings.pop('PORTAGE_BUILDDIR_LOCKED', None)
try:
doebuild_environment(ebuild_path, 'nofetch',
@@ -73,14 +76,18 @@ def spawn_nofetch(portdb, ebuild_path, settings=None):
if 'fetch' not in restrict and \
'nofetch' not in defined_phases:
- return
+ return os.EX_OK
prepare_build_dirs(settings=settings)
ebuild_phase = EbuildPhase(background=False,
- phase='nofetch', scheduler=PollScheduler().sched_iface,
- settings=settings)
+ phase='nofetch',
+ scheduler=SchedulerInterface(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False)),
+ fd_pipes=fd_pipes, settings=settings)
ebuild_phase.start()
ebuild_phase.wait()
elog_process(settings.mycpv, settings)
finally:
shutil.rmtree(private_tmpdir)
+
+ return ebuild_phase.returncode
diff --git a/pym/portage/package/ebuild/config.py b/pym/portage/package/ebuild/config.py
index 2fa799f7e..e104501dc 100644
--- a/pym/portage/package/ebuild/config.py
+++ b/pym/portage/package/ebuild/config.py
@@ -1,6 +1,8 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = [
'autouse', 'best_from_dict', 'check_config_instance', 'config',
]
@@ -19,20 +21,21 @@ from _emerge.Package import Package
import portage
portage.proxy.lazyimport.lazyimport(globals(),
'portage.data:portage_gid',
+ 'portage.dbapi.vartree:vartree',
+ 'portage.package.ebuild.doebuild:_phase_func_map',
)
from portage import bsd_chflags, \
load_mod, os, selinux, _unicode_decode
from portage.const import CACHE_PATH, \
DEPCACHE_PATH, INCREMENTALS, MAKE_CONF_FILE, \
- MODULES_FILE_PATH, \
+ MODULES_FILE_PATH, PORTAGE_BASE_PATH, \
PRIVATE_PATH, PROFILE_PATH, USER_CONFIG_PATH, \
USER_VIRTUALS_FILE
from portage.dbapi import dbapi
from portage.dbapi.porttree import portdbapi
-from portage.dbapi.vartree import vartree
from portage.dep import Atom, isvalidatom, match_from_list, use_reduce, _repo_separator, _slot_separator
from portage.eapi import eapi_exports_AA, eapi_exports_merge_type, \
- eapi_supports_prefix, eapi_exports_replace_vars
+ eapi_supports_prefix, eapi_exports_replace_vars, _get_eapi_attrs
from portage.env.loaders import KeyValuePairFileLoader
from portage.exception import InvalidDependString, PortageException
from portage.localization import _
@@ -42,7 +45,8 @@ from portage.repository.config import load_repository_config
from portage.util import ensure_dirs, getconfig, grabdict, \
grabdict_package, grabfile, grabfile_package, LazyItemsDict, \
normalize_path, shlex_split, stack_dictlist, stack_dicts, stack_lists, \
- writemsg, writemsg_level
+ writemsg, writemsg_level, _eapi_cache
+from portage.util._path import exists_raise_eaccess, isdir_raise_eaccess
from portage.versions import catpkgsplit, catsplit, cpv_getkey, _pkg_str
from portage.package.ebuild._config import special_env_vars
@@ -55,10 +59,30 @@ from portage.package.ebuild._config.LocationsManager import LocationsManager
from portage.package.ebuild._config.MaskManager import MaskManager
from portage.package.ebuild._config.VirtualsManager import VirtualsManager
from portage.package.ebuild._config.helper import ordered_by_atom_specificity, prune_incremental
+from portage.package.ebuild._config.unpack_dependencies import load_unpack_dependencies_configuration
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
+_feature_flags_cache = {}
+
+def _get_feature_flags(eapi_attrs):
+ cache_key = (eapi_attrs.feature_flag_test, eapi_attrs.feature_flag_targetroot)
+ flags = _feature_flags_cache.get(cache_key)
+ if flags is not None:
+ return flags
+
+ flags = []
+ if eapi_attrs.feature_flag_test:
+ flags.append("test")
+ if eapi_attrs.feature_flag_targetroot:
+ flags.append("targetroot")
+
+ flags = frozenset(flags)
+ _feature_flags_cache[cache_key] = flags
+ return flags
+
def autouse(myvartree, use_cache=1, mysettings=None):
warnings.warn("portage.autouse() is deprecated",
DeprecationWarning, stacklevel=2)
@@ -123,9 +147,9 @@ class config(object):
"""
_constant_keys = frozenset(['PORTAGE_BIN_PATH', 'PORTAGE_GID',
- 'PORTAGE_PYM_PATH'])
+ 'PORTAGE_PYM_PATH', 'PORTAGE_PYTHONPATH'])
- _setcpv_aux_keys = ('DEFINED_PHASES', 'DEPEND', 'EAPI',
+ _setcpv_aux_keys = ('DEFINED_PHASES', 'DEPEND', 'EAPI', 'HDEPEND',
'INHERITED', 'IUSE', 'REQUIRED_USE', 'KEYWORDS', 'LICENSE', 'PDEPEND',
'PROPERTIES', 'PROVIDE', 'RDEPEND', 'SLOT',
'repository', 'RESTRICT', 'LICENSE',)
@@ -146,7 +170,7 @@ class config(object):
def __init__(self, clone=None, mycpv=None, config_profile_path=None,
config_incrementals=None, config_root=None, target_root=None,
eprefix=None, local_config=True, env=None,
- _unmatched_removal=False):
+ _unmatched_removal=False, repositories=None):
"""
@param clone: If provided, init will use deepcopy to copy by value the instance.
@type clone: Instance of config class.
@@ -160,7 +184,8 @@ class config(object):
@type config_incrementals: List
@param config_root: path to read local config from (defaults to "/", see PORTAGE_CONFIGROOT)
@type config_root: String
- @param target_root: __init__ override of $ROOT env variable.
+ @param target_root: the target root, which typically corresponds to the
+ value of the $ROOT env variable (default is /)
@type target_root: String
@param eprefix: set the EPREFIX variable (default is portage.const.EPREFIX)
@type eprefix: String
@@ -173,8 +198,14 @@ class config(object):
@param _unmatched_removal: Enabled by repoman when the
--unmatched-removal option is given.
@type _unmatched_removal: Boolean
+ @param repositories: Configuration of repositories.
+ Defaults to portage.repository.config.load_repository_config().
+ @type repositories: Instance of portage.repository.config.RepoConfigLoader class.
"""
+ # This is important when config is reloaded after emerge --sync.
+ _eapi_cache.clear()
+
# When initializing the global portage.settings instance, avoid
# raising exceptions whenever possible since exceptions thrown
# from 'import portage' or 'import portage.exceptions' statements
@@ -192,8 +223,10 @@ class config(object):
self.uvlist = []
self._accept_chost_re = None
self._accept_properties = None
+ self._accept_restrict = None
self._features_overrides = []
self._make_defaults = None
+ self._parent_stable = None
# _unknown_features records unknown features that
# have triggered warning messages, and ensures that
@@ -215,6 +248,8 @@ class config(object):
self.profiles = clone.profiles
self.packages = clone.packages
self.repositories = clone.repositories
+ self.unpack_dependencies = clone.unpack_dependencies
+ self._iuse_effective = clone._iuse_effective
self._iuse_implicit_match = clone._iuse_implicit_match
self._non_user_variables = clone._non_user_variables
self._env_d_blacklist = clone._env_d_blacklist
@@ -277,6 +312,8 @@ class config(object):
self._accept_properties = copy.deepcopy(clone._accept_properties)
self._ppropertiesdict = copy.deepcopy(clone._ppropertiesdict)
+ self._accept_restrict = copy.deepcopy(clone._accept_restrict)
+ self._paccept_restrict = copy.deepcopy(clone._paccept_restrict)
self._penvdict = copy.deepcopy(clone._penvdict)
self._expand_map = copy.deepcopy(clone._expand_map)
@@ -294,15 +331,30 @@ class config(object):
eprefix = locations_manager.eprefix
config_root = locations_manager.config_root
abs_user_config = locations_manager.abs_user_config
+ make_conf_paths = [
+ os.path.join(config_root, 'etc', 'make.conf'),
+ os.path.join(config_root, MAKE_CONF_FILE)
+ ]
+ try:
+ if os.path.samefile(*make_conf_paths):
+ make_conf_paths.pop()
+ except OSError:
+ pass
- make_conf = getconfig(
- os.path.join(config_root, MAKE_CONF_FILE),
- tolerant=tolerant, allow_sourcing=True) or {}
-
- make_conf.update(getconfig(
- os.path.join(abs_user_config, 'make.conf'),
- tolerant=tolerant, allow_sourcing=True,
- expand=make_conf) or {})
+ make_conf_count = 0
+ make_conf = {}
+ for x in make_conf_paths:
+ mygcfg = getconfig(x,
+ tolerant=tolerant, allow_sourcing=True,
+ expand=make_conf, recursive=True)
+ if mygcfg is not None:
+ make_conf.update(mygcfg)
+ make_conf_count += 1
+
+ if make_conf_count == 2:
+ writemsg("!!! %s\n" %
+ _("Found 2 make.conf files, using both '%s' and '%s'") %
+ tuple(make_conf_paths), noiselevel=-1)
# Allow ROOT setting to come from make.conf if it's not overridden
# by the constructor argument (from the calling environment).
@@ -335,8 +387,23 @@ class config(object):
# Allow make.globals to set default paths relative to ${EPREFIX}.
expand_map["EPREFIX"] = eprefix
- make_globals = getconfig(os.path.join(
- self.global_config_path, 'make.globals'),
+ if portage._not_installed:
+ make_globals_path = os.path.join(PORTAGE_BASE_PATH, "cnf", "make.globals")
+ else:
+ make_globals_path = os.path.join(self.global_config_path, "make.globals")
+ old_make_globals = os.path.join(config_root, "etc", "make.globals")
+ if os.path.isfile(old_make_globals) and \
+ not os.path.samefile(make_globals_path, old_make_globals):
+ # Don't warn if they refer to the same path, since
+ # that can be used for backward compatibility with
+ # old software.
+ writemsg("!!! %s\n" %
+ _("Found obsolete make.globals file: "
+ "'%s', (using '%s' instead)") %
+ (old_make_globals, make_globals_path),
+ noiselevel=-1)
+
+ make_globals = getconfig(make_globals_path,
tolerant=tolerant, expand=expand_map)
if make_globals is None:
make_globals = {}
@@ -426,6 +493,7 @@ class config(object):
known_repos = []
portdir = ""
portdir_overlay = ""
+ portdir_sync = None
for confs in [make_globals, make_conf, self.configdict["env"]]:
v = confs.get("PORTDIR")
if v is not None:
@@ -435,12 +503,52 @@ class config(object):
if v is not None:
portdir_overlay = v
known_repos.extend(shlex_split(v))
+ v = confs.get("SYNC")
+ if v is not None:
+ portdir_sync = v
+
known_repos = frozenset(known_repos)
self["PORTDIR"] = portdir
self["PORTDIR_OVERLAY"] = portdir_overlay
+ if portdir_sync:
+ self["SYNC"] = portdir_sync
self.lookuplist = [self.configdict["env"]]
- self.repositories = load_repository_config(self)
+ if repositories is None:
+ self.repositories = load_repository_config(self)
+ else:
+ self.repositories = repositories
+
+ self['PORTAGE_REPOSITORIES'] = self.repositories.config_string()
+ self.backup_changes('PORTAGE_REPOSITORIES')
+
+ #filling PORTDIR and PORTDIR_OVERLAY variable for compatibility
+ main_repo = self.repositories.mainRepo()
+ if main_repo is not None:
+ self["PORTDIR"] = main_repo.user_location
+ self.backup_changes("PORTDIR")
+ expand_map["PORTDIR"] = self["PORTDIR"]
+ # repoman controls PORTDIR_OVERLAY via the environment, so no
+ # special cases are needed here.
+ portdir_overlay = list(self.repositories.repoUserLocationList())
+ if portdir_overlay and portdir_overlay[0] == self["PORTDIR"]:
+ portdir_overlay = portdir_overlay[1:]
+
+ new_ov = []
+ if portdir_overlay:
+ for ov in portdir_overlay:
+ ov = normalize_path(ov)
+ if isdir_raise_eaccess(ov) or portage._sync_mode:
+ new_ov.append(portage._shell_quote(ov))
+ else:
+ writemsg(_("!!! Invalid PORTDIR_OVERLAY"
+ " (not a dir): '%s'\n") % ov, noiselevel=-1)
+
+ self["PORTDIR_OVERLAY"] = " ".join(new_ov)
+ self.backup_changes("PORTDIR_OVERLAY")
+ expand_map["PORTDIR_OVERLAY"] = self["PORTDIR_OVERLAY"]
+
+ locations_manager.set_port_dirs(self["PORTDIR"], self["PORTDIR_OVERLAY"])
locations_manager.load_profiles(self.repositories, known_repos)
profiles_complex = locations_manager.profiles_complex
@@ -460,12 +568,13 @@ class config(object):
x = Atom(x.lstrip('*'))
self.prevmaskdict.setdefault(x.cp, []).append(x)
+ self.unpack_dependencies = load_unpack_dependencies_configuration(self.repositories)
mygcfg = {}
- if self.profiles:
- mygcfg_dlists = [getconfig(os.path.join(x, "make.defaults"),
- tolerant=tolerant, expand=expand_map)
- for x in self.profiles]
+ if profiles_complex:
+ mygcfg_dlists = [getconfig(os.path.join(x.location, "make.defaults"),
+ tolerant=tolerant, expand=expand_map, recursive=x.portage1_directories)
+ for x in profiles_complex]
self._make_defaults = mygcfg_dlists
mygcfg = stack_dicts(mygcfg_dlists,
incrementals=self.incrementals)
@@ -474,15 +583,11 @@ class config(object):
self.configlist.append(mygcfg)
self.configdict["defaults"]=self.configlist[-1]
- mygcfg = getconfig(
- os.path.join(config_root, MAKE_CONF_FILE),
- tolerant=tolerant, allow_sourcing=True,
- expand=expand_map) or {}
-
- mygcfg.update(getconfig(
- os.path.join(abs_user_config, 'make.conf'),
- tolerant=tolerant, allow_sourcing=True,
- expand=expand_map) or {})
+ mygcfg = {}
+ for x in make_conf_paths:
+ mygcfg.update(getconfig(x,
+ tolerant=tolerant, allow_sourcing=True,
+ expand=expand_map, recursive=True) or {})
# Don't allow the user to override certain variables in make.conf
profile_only_variables = self.configdict["defaults"].get(
@@ -535,54 +640,25 @@ class config(object):
self.backup_changes("PORTAGE_CONFIGROOT")
self["ROOT"] = target_root
self.backup_changes("ROOT")
-
- # The PORTAGE_OVERRIDE_EPREFIX variable propagates the EPREFIX
- # of this config instance to any portage commands or API
- # consumers running in subprocesses.
self["EPREFIX"] = eprefix
self.backup_changes("EPREFIX")
- self["PORTAGE_OVERRIDE_EPREFIX"] = eprefix
- self.backup_changes("PORTAGE_OVERRIDE_EPREFIX")
self["EROOT"] = eroot
self.backup_changes("EROOT")
+ # The prefix of the running portage instance is used in the
+ # ebuild environment to implement the --host-root option for
+ # best_version and has_version.
+ self["PORTAGE_OVERRIDE_EPREFIX"] = portage.const.EPREFIX
+ self.backup_changes("PORTAGE_OVERRIDE_EPREFIX")
+
self._ppropertiesdict = portage.dep.ExtendedAtomDict(dict)
+ self._paccept_restrict = portage.dep.ExtendedAtomDict(dict)
self._penvdict = portage.dep.ExtendedAtomDict(dict)
- #filling PORTDIR and PORTDIR_OVERLAY variable for compatibility
- main_repo = self.repositories.mainRepo()
- if main_repo is not None:
- self["PORTDIR"] = main_repo.user_location
- self.backup_changes("PORTDIR")
-
- # repoman controls PORTDIR_OVERLAY via the environment, so no
- # special cases are needed here.
- portdir_overlay = list(self.repositories.repoUserLocationList())
- if portdir_overlay and portdir_overlay[0] == self["PORTDIR"]:
- portdir_overlay = portdir_overlay[1:]
-
- new_ov = []
- if portdir_overlay:
- shell_quote_re = re.compile(r"[\s\\\"'$`]")
- for ov in portdir_overlay:
- ov = normalize_path(ov)
- if os.path.isdir(ov):
- if shell_quote_re.search(ov) is not None:
- ov = portage._shell_quote(ov)
- new_ov.append(ov)
- else:
- writemsg(_("!!! Invalid PORTDIR_OVERLAY"
- " (not a dir): '%s'\n") % ov, noiselevel=-1)
-
- self["PORTDIR_OVERLAY"] = " ".join(new_ov)
- self.backup_changes("PORTDIR_OVERLAY")
-
- locations_manager.set_port_dirs(self["PORTDIR"], self["PORTDIR_OVERLAY"])
-
self._repo_make_defaults = {}
for repo in self.repositories.repos_with_profiles():
d = getconfig(os.path.join(repo.location, "profiles", "make.defaults"),
- tolerant=tolerant, expand=self.configdict["globals"].copy()) or {}
+ tolerant=tolerant, expand=self.configdict["globals"].copy(), recursive=repo.portage1_profiles) or {}
if d:
for k in chain(self._env_blacklist,
profile_only_variables, self._global_only_vars):
@@ -590,7 +666,8 @@ class config(object):
self._repo_make_defaults[repo.name] = d
#Read all USE related files from profiles and optionally from user config.
- self._use_manager = UseManager(self.repositories, profiles_complex, abs_user_config, user_config=local_config)
+ self._use_manager = UseManager(self.repositories, profiles_complex,
+ abs_user_config, self._isStable, user_config=local_config)
#Initialize all USE related variables we track ourselves.
self.usemask = self._use_manager.getUseMask()
self.useforce = self._use_manager.getUseForce()
@@ -620,6 +697,20 @@ class config(object):
for k, v in propdict.items():
self._ppropertiesdict.setdefault(k.cp, {})[k] = v
+ # package.accept_restrict
+ d = grabdict_package(os.path.join(
+ abs_user_config, "package.accept_restrict"),
+ recursive=True, allow_wildcard=True,
+ allow_repo=True, verify_eapi=False)
+ v = d.pop("*/*", None)
+ if v is not None:
+ if "ACCEPT_RESTRICT" in self.configdict["conf"]:
+ self.configdict["conf"]["ACCEPT_RESTRICT"] += " " + " ".join(v)
+ else:
+ self.configdict["conf"]["ACCEPT_RESTRICT"] = " ".join(v)
+ for k, v in d.items():
+ self._paccept_restrict.setdefault(k.cp, {})[k] = v
+
#package.env
penvdict = grabdict_package(os.path.join(
abs_user_config, "package.env"), recursive=1, allow_wildcard=True, \
@@ -706,21 +797,9 @@ class config(object):
self.backupenv["USE_ORDER"] = "env:pkg:conf:defaults:pkginternal:repo:env.d"
self.depcachedir = DEPCACHE_PATH
- if eprefix:
- # See comments about make.globals and EPREFIX
- # above. DEPCACHE_PATH is similar.
- if target_root == "/":
- # case (1) above
- self.depcachedir = os.path.join(eprefix,
- DEPCACHE_PATH.lstrip(os.sep))
- else:
- # case (2) above
- # For now, just assume DEPCACHE_PATH is relative
- # to EPREFIX.
- # TODO: Pass in more info to the constructor,
- # so we know the host system configuration.
- self.depcachedir = os.path.join(eprefix,
- DEPCACHE_PATH.lstrip(os.sep))
+ if portage.const.EPREFIX:
+ self.depcachedir = os.path.join(portage.const.EPREFIX,
+ DEPCACHE_PATH.lstrip(os.sep))
if self.get("PORTAGE_DEPCACHEDIR", None):
self.depcachedir = self["PORTAGE_DEPCACHEDIR"]
@@ -787,12 +866,17 @@ class config(object):
self[var] = default_val
self.backup_changes(var)
+ if portage._internal_caller:
+ self["PORTAGE_INTERNAL_CALLER"] = "1"
+ self.backup_changes("PORTAGE_INTERNAL_CALLER")
+
# initialize self.features
self.regenerate()
if bsd_chflags:
self.features.add('chflags')
+ self._iuse_effective = self._calc_iuse_effective()
self._iuse_implicit_match = _iuse_implicit_match_cache(self)
self._validate_commands()
@@ -802,11 +886,6 @@ class config(object):
self[k] = self[k].lower()
self.backup_changes(k)
- if main_repo is not None and not main_repo.sync:
- main_repo_sync = self.get("SYNC")
- if main_repo_sync:
- main_repo.sync = main_repo_sync
-
# The first constructed config object initializes these modules,
# and subsequent calls to the _init() functions have no effect.
portage.output._init(config_root=self['PORTAGE_CONFIGROOT'])
@@ -949,13 +1028,23 @@ class config(object):
writemsg(_("!!! INVALID ACCEPT_KEYWORDS: %s\n") % str(group),
noiselevel=-1)
- profile_broken = not self.profile_path or \
- not os.path.exists(os.path.join(self.profile_path, "parent")) and \
- os.path.exists(os.path.join(self["PORTDIR"], "profiles"))
+ profile_broken = False
+
+ if not self.profile_path:
+ profile_broken = True
+ else:
+ # If any one of these files exists, then
+ # the profile is considered valid.
+ for x in ("make.defaults", "parent",
+ "packages", "use.force", "use.mask"):
+ if exists_raise_eaccess(os.path.join(self.profile_path, x)):
+ break
+ else:
+ profile_broken = True
- if profile_broken:
+ if profile_broken and not portage._sync_mode:
abs_profile_path = None
- for x in (PROFILE_PATH, 'etc/portage/make.profile'):
+ for x in (PROFILE_PATH, 'etc/make.profile'):
x = os.path.join(self["PORTAGE_CONFIGROOT"], x)
try:
os.lstat(x)
@@ -1121,8 +1210,11 @@ class config(object):
the previously calculated USE settings.
"""
- def __init__(self, use, usemask, iuse_implicit,
+ def __init__(self, settings, unfiltered_use,
+ use, usemask, iuse_implicit,
use_expand_split, use_expand_dict):
+ self._settings = settings
+ self._unfiltered_use = unfiltered_use
self._use = use
self._usemask = usemask
self._iuse_implicit = iuse_implicit
@@ -1177,13 +1269,32 @@ class config(object):
# Don't export empty USE_EXPAND vars unless the user config
# exports them as empty. This is required for vars such as
# LINGUAS, where unset and empty have different meanings.
+ # The special '*' token is understood by ebuild.sh, which
+ # will unset the variable so that things like LINGUAS work
+ # properly (see bug #459350).
if has_wildcard:
- # ebuild.sh will see this and unset the variable so
- # that things like LINGUAS work properly
value = '*'
else:
if has_iuse:
- value = ''
+ already_set = False
+ # Skip the first 'env' configdict, in order to
+ # avoid infinite recursion here, since that dict's
+ # __getitem__ calls the current __getitem__.
+ for d in self._settings.lookuplist[1:]:
+ if key in d:
+ already_set = True
+ break
+
+ if not already_set:
+ for x in self._unfiltered_use:
+ if x[:prefix_len] == prefix:
+ already_set = True
+ break
+
+ if already_set:
+ value = ''
+ else:
+ value = '*'
else:
# It's not in IUSE, so just allow the variable content
# to pass through if it is defined somewhere. This
@@ -1219,7 +1330,7 @@ class config(object):
if not isinstance(mycpv, basestring):
pkg = mycpv
mycpv = pkg.cpv
- mydb = pkg.metadata
+ mydb = pkg._metadata
explicit_iuse = pkg.iuse.all
args_hash = (mycpv, id(pkg))
if pkg.built:
@@ -1240,6 +1351,7 @@ class config(object):
iuse = ""
pkg_configdict = self.configdict["pkg"]
previous_iuse = pkg_configdict.get("IUSE")
+ previous_iuse_effective = pkg_configdict.get("IUSE_EFFECTIVE")
previous_features = pkg_configdict.get("FEATURES")
aux_keys = self._setcpv_aux_keys
@@ -1251,6 +1363,7 @@ class config(object):
pkg_configdict["CATEGORY"] = cat
pkg_configdict["PF"] = pf
repository = None
+ eapi = None
if mydb:
if not hasattr(mydb, "aux_get"):
for k in aux_keys:
@@ -1277,14 +1390,16 @@ class config(object):
# Empty USE means this dbapi instance does not contain
# built packages.
built_use = None
+ eapi = pkg_configdict['EAPI']
repository = pkg_configdict.pop("repository", None)
if repository is not None:
pkg_configdict["PORTAGE_REPO_NAME"] = repository
- slot = pkg_configdict["SLOT"]
iuse = pkg_configdict["IUSE"]
if pkg is None:
- cpv_slot = _pkg_str(self.mycpv, slot=slot, repo=repository)
+ self.mycpv = _pkg_str(self.mycpv, metadata=pkg_configdict,
+ settings=self)
+ cpv_slot = self.mycpv
else:
cpv_slot = pkg
pkginternaluse = []
@@ -1294,6 +1409,9 @@ class config(object):
elif x.startswith("-"):
pkginternaluse.append(x)
pkginternaluse = " ".join(pkginternaluse)
+
+ eapi_attrs = _get_eapi_attrs(eapi)
+
if pkginternaluse != self.configdict["pkginternal"].get("USE", ""):
self.configdict["pkginternal"]["USE"] = pkginternaluse
has_changed = True
@@ -1424,30 +1542,70 @@ class config(object):
# If reset() has not been called, it's safe to return
# early if IUSE has not changed.
- if not has_changed and previous_iuse == iuse:
+ if not has_changed and previous_iuse == iuse and \
+ (previous_iuse_effective is not None == eapi_attrs.iuse_effective):
return
# Filter out USE flags that aren't part of IUSE. This has to
# be done for every setcpv() call since practically every
# package has different IUSE.
use = set(self["USE"].split())
+ unfiltered_use = frozenset(use)
if explicit_iuse is None:
explicit_iuse = frozenset(x.lstrip("+-") for x in iuse.split())
- iuse_implicit_match = self._iuse_implicit_match
- portage_iuse = self._get_implicit_iuse()
- portage_iuse.update(explicit_iuse)
+
+ if eapi_attrs.iuse_effective:
+ iuse_implicit_match = self._iuse_effective_match
+ portage_iuse = set(self._iuse_effective)
+ portage_iuse.update(explicit_iuse)
+ self.configdict["pkg"]["IUSE_EFFECTIVE"] = \
+ " ".join(sorted(portage_iuse))
+ else:
+ iuse_implicit_match = self._iuse_implicit_match
+ portage_iuse = self._get_implicit_iuse()
+ portage_iuse.update(explicit_iuse)
# PORTAGE_IUSE is not always needed so it's lazily evaluated.
self.configdict["env"].addLazySingleton(
"PORTAGE_IUSE", _lazy_iuse_regex, portage_iuse)
- ebuild_force_test = self.get("EBUILD_FORCE_TEST") == "1"
+ if pkg is None:
+ raw_restrict = pkg_configdict.get("RESTRICT")
+ else:
+ raw_restrict = pkg._raw_metadata["RESTRICT"]
+
+ restrict_test = False
+ if raw_restrict:
+ try:
+ if built_use is not None:
+ restrict = use_reduce(raw_restrict,
+ uselist=built_use, flat=True)
+ else:
+ # Use matchnone=True to ignore USE conditional parts
+ # of RESTRICT, since we want to know whether to mask
+ # the "test" flag _before_ we know the USE values
+ # that would be needed to evaluate the USE
+ # conditionals (see bug #273272).
+ restrict = use_reduce(raw_restrict,
+ matchnone=True, flat=True)
+ except PortageException:
+ pass
+ else:
+ restrict_test = "test" in restrict
+
+ ebuild_force_test = not restrict_test and \
+ self.get("EBUILD_FORCE_TEST") == "1"
+
if ebuild_force_test and \
not hasattr(self, "_ebuild_force_test_msg_shown"):
self._ebuild_force_test_msg_shown = True
writemsg(_("Forcing test.\n"), noiselevel=-1)
- if "test" in self.features:
- if "test" in self.usemask and not ebuild_force_test:
+
+ if "test" in explicit_iuse or iuse_implicit_match("test"):
+ if "test" not in self.features:
+ use.discard("test")
+ elif restrict_test or \
+ ("test" in self.usemask and not ebuild_force_test):
# "test" is in IUSE and USE=test is masked, so execution
# of src_test() probably is not reliable. Therefore,
# temporarily disable FEATURES=test just for this package.
@@ -1460,6 +1618,13 @@ class config(object):
self.usemask = \
frozenset(x for x in self.usemask if x != "test")
+ if eapi_attrs.feature_flag_targetroot and \
+ ("targetroot" in explicit_iuse or iuse_implicit_match("targetroot")):
+ if self["ROOT"] != "/":
+ use.add("targetroot")
+ else:
+ use.discard("targetroot")
+
# Allow _* flags from USE_EXPAND wildcards to pass through here.
use.difference_update([x for x in use \
if (x not in explicit_iuse and \
@@ -1470,7 +1635,8 @@ class config(object):
# comparison instead of startswith().
use_expand_split = set(x.lower() for \
x in self.get('USE_EXPAND', '').split())
- lazy_use_expand = self._lazy_use_expand(use, self.usemask,
+ lazy_use_expand = self._lazy_use_expand(
+ self, unfiltered_use, use, self.usemask,
portage_iuse, use_expand_split, self._use_expand_dict)
use_expand_iuses = {}
@@ -1500,6 +1666,14 @@ class config(object):
self.configdict['env'].addLazySingleton(k,
lazy_use_expand.__getitem__, k)
+ for k in self.get("USE_EXPAND_UNPREFIXED", "").split():
+ var_split = self.get(k, '').split()
+ var_split = [ x for x in var_split if x in use ]
+ if var_split:
+ self.configlist[-1][k] = ' '.join(var_split)
+ elif k in self:
+ self.configlist[-1][k] = ''
+
# Filtered for the ebuild environment. Store this in a separate
# attribute since we still want to be able to see global USE
# settings for things like emerge --info.
@@ -1507,6 +1681,10 @@ class config(object):
self.configdict["env"]["PORTAGE_USE"] = \
" ".join(sorted(x for x in use if x[-2:] != '_*'))
+ # Clear the eapi cache here rather than in the constructor, since
+ # setcpv triggers lazy instantiation of things like _use_manager.
+ _eapi_cache.clear()
+
def _grab_pkg_env(self, penv, container, protected_keys=None):
if protected_keys is None:
protected_keys = ()
@@ -1540,9 +1718,42 @@ class config(object):
else:
container[k] = v
+ def _iuse_effective_match(self, flag):
+ return flag in self._iuse_effective
+
+ def _calc_iuse_effective(self):
+ """
+ Beginning with EAPI 5, IUSE_EFFECTIVE is defined by PMS.
+ """
+ iuse_effective = []
+ iuse_effective.extend(self.get("IUSE_IMPLICIT", "").split())
+
+ # USE_EXPAND_IMPLICIT should contain things like ARCH, ELIBC,
+ # KERNEL, and USERLAND.
+ use_expand_implicit = frozenset(
+ self.get("USE_EXPAND_IMPLICIT", "").split())
+
+ # USE_EXPAND_UNPREFIXED should contain at least ARCH, and
+ # USE_EXPAND_VALUES_ARCH should contain all valid ARCH flags.
+ for v in self.get("USE_EXPAND_UNPREFIXED", "").split():
+ if v not in use_expand_implicit:
+ continue
+ iuse_effective.extend(
+ self.get("USE_EXPAND_VALUES_" + v, "").split())
+
+ use_expand = frozenset(self.get("USE_EXPAND", "").split())
+ for v in use_expand_implicit:
+ if v not in use_expand:
+ continue
+ lower_v = v.lower()
+ for x in self.get("USE_EXPAND_VALUES_" + v, "").split():
+ iuse_effective.append(lower_v + "_" + x)
+
+ return frozenset(iuse_effective)
+
def _get_implicit_iuse(self):
"""
- Some flags are considered to
+ Prior to EAPI 5, these flags are considered to
be implicit members of IUSE:
* Flags derived from ARCH
* Flags derived from USE_EXPAND_HIDDEN variables
@@ -1579,11 +1790,11 @@ class config(object):
return iuse_implicit
- def _getUseMask(self, pkg):
- return self._use_manager.getUseMask(pkg)
+ def _getUseMask(self, pkg, stable=None):
+ return self._use_manager.getUseMask(pkg, stable=stable)
- def _getUseForce(self, pkg):
- return self._use_manager.getUseForce(pkg)
+ def _getUseForce(self, pkg, stable=None):
+ return self._use_manager.getUseForce(pkg, stable=stable)
def _getMaskAtom(self, cpv, metadata):
"""
@@ -1648,6 +1859,11 @@ class config(object):
return x
return None
+ def _isStable(self, pkg):
+ return self._keywords_manager.isStable(pkg,
+ self.get("ACCEPT_KEYWORDS", ""),
+ self.configdict["backupenv"].get("ACCEPT_KEYWORDS", ""))
+
def _getKeywords(self, cpv, metadata):
return self._keywords_manager.getKeywords(cpv, metadata["SLOT"], \
metadata.get("KEYWORDS", ""), metadata.get("repository"))
@@ -1736,9 +1952,10 @@ class config(object):
@return: A list of properties that have not been accepted.
"""
accept_properties = self._accept_properties
- if not hasattr(cpv, 'slot'):
- cpv = _pkg_str(cpv, slot=metadata["SLOT"],
- repo=metadata.get("repository"))
+ try:
+ cpv.slot
+ except AttributeError:
+ cpv = _pkg_str(cpv, metadata=metadata, settings=self)
cp = cpv_getkey(cpv)
cpdict = self._ppropertiesdict.get(cp)
if cpdict:
@@ -1750,7 +1967,6 @@ class config(object):
properties_str = metadata.get("PROPERTIES", "")
properties = set(use_reduce(properties_str, matchall=1, flat=True))
- properties.discard('||')
acceptable_properties = set()
for x in accept_properties:
@@ -1768,40 +1984,58 @@ class config(object):
else:
use = []
- properties_struct = use_reduce(properties_str, uselist=use, opconvert=True)
- return self._getMaskedProperties(properties_struct, acceptable_properties)
-
- def _getMaskedProperties(self, properties_struct, acceptable_properties):
- if not properties_struct:
- return []
- if properties_struct[0] == "||":
- ret = []
- for element in properties_struct[1:]:
- if isinstance(element, list):
- if element:
- tmp = self._getMaskedProperties(
- element, acceptable_properties)
- if not tmp:
- return []
- ret.extend(tmp)
- else:
- if element in acceptable_properties:
- return[]
- ret.append(element)
- # Return all masked properties, since we don't know which combination
- # (if any) the user will decide to unmask
- return ret
-
- ret = []
- for element in properties_struct:
- if isinstance(element, list):
- if element:
- ret.extend(self._getMaskedProperties(element,
- acceptable_properties))
+ return [x for x in use_reduce(properties_str, uselist=use, flat=True)
+ if x not in acceptable_properties]
+
+ def _getMissingRestrict(self, cpv, metadata):
+ """
+ Take a RESTRICT string and return a list of any tokens the user
+ may need to accept for the given package. The returned list will not
+ contain any tokens that have already been accepted. This method
+ can throw an InvalidDependString exception.
+
+ @param cpv: The package name (for package.accept_restrict support)
+ @type cpv: String
+ @param metadata: A dictionary of raw package metadata
+ @type metadata: dict
+ @rtype: List
+ @return: A list of tokens that have not been accepted.
+ """
+ accept_restrict = self._accept_restrict
+ try:
+ cpv.slot
+ except AttributeError:
+ cpv = _pkg_str(cpv, metadata=metadata, settings=self)
+ cp = cpv_getkey(cpv)
+ cpdict = self._paccept_restrict.get(cp)
+ if cpdict:
+ paccept_restrict_list = ordered_by_atom_specificity(cpdict, cpv)
+ if paccept_restrict_list:
+ accept_restrict = list(self._accept_restrict)
+ for x in paccept_restrict_list:
+ accept_restrict.extend(x)
+
+ restrict_str = metadata.get("RESTRICT", "")
+ all_restricts = set(use_reduce(restrict_str, matchall=1, flat=True))
+
+ acceptable_restricts = set()
+ for x in accept_restrict:
+ if x == '*':
+ acceptable_restricts.update(all_restricts)
+ elif x == '-*':
+ acceptable_restricts.clear()
+ elif x[:1] == '-':
+ acceptable_restricts.discard(x[1:])
else:
- if element not in acceptable_properties:
- ret.append(element)
- return ret
+ acceptable_restricts.add(x)
+
+ if "?" in restrict_str:
+ use = metadata["USE"].split()
+ else:
+ use = []
+
+ return [x for x in use_reduce(restrict_str, uselist=use, flat=True)
+ if x not in acceptable_restricts]
def _accept_chost(self, cpv, metadata):
"""
@@ -1940,6 +2174,18 @@ class config(object):
# repoman will accept any property
self._accept_properties = ('*',)
+ if self.local_config:
+ mysplit = []
+ for curdb in mydbs:
+ mysplit.extend(curdb.get('ACCEPT_RESTRICT', '').split())
+ mysplit = prune_incremental(mysplit)
+ self.configlist[-1]['ACCEPT_RESTRICT'] = ' '.join(mysplit)
+ if tuple(mysplit) != self._accept_restrict:
+ self._accept_restrict = tuple(mysplit)
+ else:
+ # repoman will accept any property
+ self._accept_restrict = ('*',)
+
increment_lists = {}
for k in myincrementals:
incremental_list = []
@@ -1994,6 +2240,8 @@ class config(object):
if v is not None:
use_expand_dict[k] = v
+ use_expand_unprefixed = self.get("USE_EXPAND_UNPREFIXED", "").split()
+
# In order to best accomodate the long-standing practice of
# setting default USE_EXPAND variables in the profile's
# make.defaults, we translate these variables into their
@@ -2007,6 +2255,12 @@ class config(object):
continue
use = cfg.get("USE", "")
expand_use = []
+
+ for k in use_expand_unprefixed:
+ v = cfg.get(k)
+ if v is not None:
+ expand_use.extend(v.split())
+
for k in use_expand_dict:
v = cfg.get(k)
if v is None:
@@ -2044,6 +2298,17 @@ class config(object):
iuse = [x.lstrip("+-") for x in iuse.split()]
myflags = set()
for curdb in self.uvlist:
+
+ for k in use_expand_unprefixed:
+ v = curdb.get(k)
+ if v is None:
+ continue
+ for x in v.split():
+ if x[:1] == "-":
+ myflags.discard(x[1:])
+ else:
+ myflags.add(x)
+
cur_use_expand = [x for x in use_expand if x in curdb]
mysplit = curdb.get("USE", "").split()
if not mysplit and not cur_use_expand:
@@ -2160,6 +2425,14 @@ class config(object):
elif k in self:
self.configlist[-1][k] = ''
+ for k in use_expand_unprefixed:
+ var_split = self.get(k, '').split()
+ var_split = [ x for x in var_split if x in myflags ]
+ if var_split:
+ self.configlist[-1][k] = ' '.join(var_split)
+ elif k in self:
+ self.configlist[-1][k] = ''
+
@property
def virts_p(self):
warnings.warn("portage config.virts_p attribute " + \
@@ -2220,8 +2493,22 @@ class config(object):
elif mykey == "PORTAGE_PYM_PATH":
return portage._pym_path
+ elif mykey == "PORTAGE_PYTHONPATH":
+ value = [x for x in \
+ self.backupenv.get("PYTHONPATH", "").split(":") if x]
+ need_pym_path = True
+ if value:
+ try:
+ need_pym_path = not os.path.samefile(value[0],
+ portage._pym_path)
+ except OSError:
+ pass
+ if need_pym_path:
+ value.insert(0, portage._pym_path)
+ return ":".join(value)
+
elif mykey == "PORTAGE_GID":
- return _unicode_decode(str(portage_gid))
+ return "%s" % portage_gid
for d in self.lookuplist:
try:
@@ -2308,6 +2595,7 @@ class config(object):
environ_filter = self._environ_filter
eapi = self.get('EAPI')
+ eapi_attrs = _get_eapi_attrs(eapi)
phase = self.get('EBUILD_PHASE')
filter_calling_env = False
if self.mycpv is not None and \
@@ -2389,14 +2677,20 @@ class config(object):
not eapi_exports_replace_vars(eapi):
mydict.pop("REPLACED_BY_VERSION", None)
+ if phase is not None and eapi_attrs.exports_EBUILD_PHASE_FUNC:
+ phase_func = _phase_func_map.get(phase)
+ if phase_func is not None:
+ mydict["EBUILD_PHASE_FUNC"] = phase_func
+
return mydict
def thirdpartymirrors(self):
if getattr(self, "_thirdpartymirrors", None) is None:
- profileroots = [os.path.join(self["PORTDIR"], "profiles")]
- for x in shlex_split(self.get("PORTDIR_OVERLAY", "")):
- profileroots.insert(0, os.path.join(x, "profiles"))
- thirdparty_lists = [grabdict(os.path.join(x, "thirdpartymirrors")) for x in profileroots]
+ thirdparty_lists = []
+ for repo_name in reversed(self.repositories.prepos_order):
+ thirdparty_lists.append(grabdict(os.path.join(
+ self.repositories[repo_name].location,
+ "profiles", "thirdpartymirrors")))
self._thirdpartymirrors = stack_dictlist(thirdparty_lists, incremental=True)
return self._thirdpartymirrors
diff --git a/pym/portage/package/ebuild/deprecated_profile_check.py b/pym/portage/package/ebuild/deprecated_profile_check.py
index 3fab4da6e..fdb19b4ac 100644
--- a/pym/portage/package/ebuild/deprecated_profile_check.py
+++ b/pym/portage/package/ebuild/deprecated_profile_check.py
@@ -1,10 +1,11 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
__all__ = ['deprecated_profile_check']
import io
+import portage
from portage import os, _encodings, _unicode_encode
from portage.const import DEPRECATED_PROFILE_FILE
from portage.localization import _
@@ -12,16 +13,32 @@ from portage.output import colorize
from portage.util import writemsg
def deprecated_profile_check(settings=None):
- config_root = "/"
+ config_root = None
+ eprefix = None
+ deprecated_profile_file = None
if settings is not None:
config_root = settings["PORTAGE_CONFIGROOT"]
- deprecated_profile_file = os.path.join(config_root,
- DEPRECATED_PROFILE_FILE)
- if not os.access(deprecated_profile_file, os.R_OK):
- return False
- dcontent = io.open(_unicode_encode(deprecated_profile_file,
+ eprefix = settings["EPREFIX"]
+ for x in reversed(settings.profiles):
+ deprecated_profile_file = os.path.join(x, "deprecated")
+ if os.access(deprecated_profile_file, os.R_OK):
+ break
+ else:
+ deprecated_profile_file = None
+
+ if deprecated_profile_file is None:
+ deprecated_profile_file = os.path.join(config_root or "/",
+ DEPRECATED_PROFILE_FILE)
+ if not os.access(deprecated_profile_file, os.R_OK):
+ deprecated_profile_file = os.path.join(config_root or "/",
+ 'etc', 'make.profile', 'deprecated')
+ if not os.access(deprecated_profile_file, os.R_OK):
+ return
+
+ with io.open(_unicode_encode(deprecated_profile_file,
encoding=_encodings['fs'], errors='strict'),
- mode='r', encoding=_encodings['content'], errors='replace').readlines()
+ mode='r', encoding=_encodings['content'], errors='replace') as f:
+ dcontent = f.readlines()
writemsg(colorize("BAD", _("\n!!! Your current profile is "
"deprecated and not supported anymore.")) + "\n", noiselevel=-1)
writemsg(colorize("BAD", _("!!! Use eselect profile to update your "
@@ -30,13 +47,37 @@ def deprecated_profile_check(settings=None):
writemsg(colorize("BAD", _("!!! Please refer to the "
"Gentoo Upgrading Guide.")) + "\n", noiselevel=-1)
return True
- newprofile = dcontent[0]
+ newprofile = dcontent[0].rstrip("\n")
writemsg(colorize("BAD", _("!!! Please upgrade to the "
- "following profile if possible:")) + "\n", noiselevel=-1)
- writemsg(8*" " + colorize("GOOD", newprofile) + "\n", noiselevel=-1)
+ "following profile if possible:")) + "\n\n", noiselevel=-1)
+ writemsg(8*" " + colorize("GOOD", newprofile) + "\n\n", noiselevel=-1)
if len(dcontent) > 1:
writemsg(_("To upgrade do the following steps:\n"), noiselevel=-1)
for myline in dcontent[1:]:
writemsg(myline, noiselevel=-1)
writemsg("\n\n", noiselevel=-1)
+ else:
+ writemsg(_("You may use the following command to upgrade:\n\n"), noiselevel=-1)
+ writemsg(8*" " + colorize("INFORM", 'eselect profile set ' +
+ newprofile) + "\n\n", noiselevel=-1)
+
+ if settings is not None:
+ main_repo_loc = settings.repositories.mainRepoLocation()
+ new_profile_path = os.path.join(main_repo_loc,
+ "profiles", newprofile.rstrip("\n"))
+
+ if os.path.isdir(new_profile_path):
+ new_config = portage.config(config_root=config_root,
+ config_profile_path=new_profile_path,
+ eprefix=eprefix)
+
+ if not new_config.profiles:
+ writemsg("\n %s %s\n" % (colorize("WARN", "*"),
+ _("You must update portage before you "
+ "can migrate to the above profile.")), noiselevel=-1)
+ writemsg(" %s %s\n\n" % (colorize("WARN", "*"),
+ _("In order to update portage, "
+ "run 'emerge --oneshot portage'.")),
+ noiselevel=-1)
+
return True
diff --git a/pym/portage/package/ebuild/digestcheck.py b/pym/portage/package/ebuild/digestcheck.py
index 8705639d1..e207ba841 100644
--- a/pym/portage/package/ebuild/digestcheck.py
+++ b/pym/portage/package/ebuild/digestcheck.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
__all__ = ['digestcheck']
@@ -6,6 +6,7 @@ __all__ = ['digestcheck']
import warnings
from portage import os, _encodings, _unicode_decode
+from portage.checksum import _hash_filter
from portage.exception import DigestException, FileNotFound
from portage.localization import _
from portage.output import EOutput
@@ -28,6 +29,9 @@ def digestcheck(myfiles, mysettings, strict=False, justmanifest=None, mf=None):
if mysettings.get("EBUILD_SKIP_MANIFEST") == "1":
return 1
pkgdir = mysettings["O"]
+ hash_filter = _hash_filter(mysettings.get("PORTAGE_CHECKSUM_FILTER", ""))
+ if hash_filter.transparent:
+ hash_filter = None
if mf is None:
mf = mysettings.repositories.get_repo_for_location(
os.path.dirname(os.path.dirname(pkgdir)))
@@ -38,15 +42,16 @@ def digestcheck(myfiles, mysettings, strict=False, justmanifest=None, mf=None):
if not mf.thin and strict and "PORTAGE_PARALLEL_FETCHONLY" not in mysettings:
if mf.fhashdict.get("EBUILD"):
eout.ebegin(_("checking ebuild checksums ;-)"))
- mf.checkTypeHashes("EBUILD")
+ mf.checkTypeHashes("EBUILD", hash_filter=hash_filter)
eout.eend(0)
if mf.fhashdict.get("AUX"):
eout.ebegin(_("checking auxfile checksums ;-)"))
- mf.checkTypeHashes("AUX")
+ mf.checkTypeHashes("AUX", hash_filter=hash_filter)
eout.eend(0)
if mf.fhashdict.get("MISC"):
eout.ebegin(_("checking miscfile checksums ;-)"))
- mf.checkTypeHashes("MISC", ignoreMissingFiles=True)
+ mf.checkTypeHashes("MISC", ignoreMissingFiles=True,
+ hash_filter=hash_filter)
eout.eend(0)
for f in myfiles:
eout.ebegin(_("checking %s ;-)") % f)
@@ -58,7 +63,7 @@ def digestcheck(myfiles, mysettings, strict=False, justmanifest=None, mf=None):
writemsg(_("\n!!! Missing digest for '%s'\n") % (f,),
noiselevel=-1)
return 0
- mf.checkFileHashes(ftype, f)
+ mf.checkFileHashes(ftype, f, hash_filter=hash_filter)
eout.eend(0)
except FileNotFound as e:
eout.eend(1)
diff --git a/pym/portage/package/ebuild/digestgen.py b/pym/portage/package/ebuild/digestgen.py
index 6ad339737..95d02db9b 100644
--- a/pym/portage/package/ebuild/digestgen.py
+++ b/pym/portage/package/ebuild/digestgen.py
@@ -112,67 +112,64 @@ def digestgen(myarchives=None, mysettings=None, myportdb=None):
missing_files.append(myfile)
continue
- if missing_files:
- for myfile in missing_files:
- uris = set()
- all_restrict = set()
- for cpv in distfiles_map[myfile]:
- uris.update(myportdb.getFetchMap(
- cpv, mytree=mytree)[myfile])
- restrict = myportdb.aux_get(cpv, ['RESTRICT'],
- mytree=mytree)[0]
- # Here we ignore conditional parts of RESTRICT since
- # they don't apply unconditionally. Assume such
- # conditionals only apply on the client side where
- # digestgen() does not need to be called.
- all_restrict.update(use_reduce(restrict,
- flat=True, matchnone=True))
-
- # fetch() uses CATEGORY and PF to display a message
- # when fetch restriction is triggered.
- cat, pf = catsplit(cpv)
- mysettings["CATEGORY"] = cat
- mysettings["PF"] = pf
-
- # fetch() uses PORTAGE_RESTRICT to control fetch
- # restriction, which is only applied to files that
- # are not fetchable via a mirror:// URI.
- mysettings["PORTAGE_RESTRICT"] = " ".join(all_restrict)
-
- try:
- st = os.stat(os.path.join(
- mysettings["DISTDIR"],myfile))
- except OSError:
- st = None
-
- if not fetch({myfile : uris}, mysettings):
- myebuild = os.path.join(mysettings["O"],
- catsplit(cpv)[1] + ".ebuild")
- spawn_nofetch(myportdb, myebuild)
- writemsg(_("!!! Fetch failed for %s, can't update "
- "Manifest\n") % myfile, noiselevel=-1)
- if myfile in dist_hashes and \
- st is not None and st.st_size > 0:
- # stat result is obtained before calling fetch(),
- # since fetch may rename the existing file if the
- # digest does not match.
- writemsg(_("!!! If you would like to "
- "forcefully replace the existing "
- "Manifest entry\n!!! for %s, use "
- "the following command:\n") % myfile + \
- "!!! " + colorize("INFORM",
- "ebuild --force %s manifest" % \
- os.path.basename(myebuild)) + "\n",
- noiselevel=-1)
- return 0
+ for myfile in missing_files:
+ uris = set()
+ all_restrict = set()
+ for cpv in distfiles_map[myfile]:
+ uris.update(myportdb.getFetchMap(
+ cpv, mytree=mytree)[myfile])
+ restrict = myportdb.aux_get(cpv, ['RESTRICT'], mytree=mytree)[0]
+ # Here we ignore conditional parts of RESTRICT since
+ # they don't apply unconditionally. Assume such
+ # conditionals only apply on the client side where
+ # digestgen() does not need to be called.
+ all_restrict.update(use_reduce(restrict,
+ flat=True, matchnone=True))
+
+ # fetch() uses CATEGORY and PF to display a message
+ # when fetch restriction is triggered.
+ cat, pf = catsplit(cpv)
+ mysettings["CATEGORY"] = cat
+ mysettings["PF"] = pf
+
+ # fetch() uses PORTAGE_RESTRICT to control fetch
+ # restriction, which is only applied to files that
+ # are not fetchable via a mirror:// URI.
+ mysettings["PORTAGE_RESTRICT"] = " ".join(all_restrict)
+
+ try:
+ st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
+ except OSError:
+ st = None
+
+ if not fetch({myfile : uris}, mysettings):
+ myebuild = os.path.join(mysettings["O"],
+ catsplit(cpv)[1] + ".ebuild")
+ spawn_nofetch(myportdb, myebuild)
+ writemsg(_("!!! Fetch failed for %s, can't update Manifest\n")
+ % myfile, noiselevel=-1)
+ if myfile in dist_hashes and \
+ st is not None and st.st_size > 0:
+ # stat result is obtained before calling fetch(),
+ # since fetch may rename the existing file if the
+ # digest does not match.
+ cmd = colorize("INFORM", "ebuild --force %s manifest" %
+ os.path.basename(myebuild))
+ writemsg((_(
+ "!!! If you would like to forcefully replace the existing Manifest entry\n"
+ "!!! for %s, use the following command:\n") % myfile) +
+ "!!! %s\n" % cmd,
+ noiselevel=-1)
+ return 0
+
writemsg_stdout(_(">>> Creating Manifest for %s\n") % mysettings["O"])
try:
mf.create(assumeDistHashesSometimes=True,
assumeDistHashesAlways=(
"assume-digests" in mysettings.features))
except FileNotFound as e:
- writemsg(_("!!! File %s doesn't exist, can't update "
- "Manifest\n") % e, noiselevel=-1)
+ writemsg(_("!!! File %s doesn't exist, can't update Manifest\n")
+ % e, noiselevel=-1)
return 0
except PortagePackageException as e:
writemsg(("!!! %s\n") % (e,), noiselevel=-1)
diff --git a/pym/portage/package/ebuild/doebuild.py b/pym/portage/package/ebuild/doebuild.py
index 09062f9f3..01707aeec 100644
--- a/pym/portage/package/ebuild/doebuild.py
+++ b/pym/portage/package/ebuild/doebuild.py
@@ -1,14 +1,19 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = ['doebuild', 'doebuild_environment', 'spawn', 'spawnebuild']
+import grp
import gzip
import errno
import io
from itertools import chain
import logging
import os as _os
+import platform
+import pwd
import re
import signal
import stat
@@ -26,8 +31,12 @@ portage.proxy.lazyimport.lazyimport(globals(),
'portage.package.ebuild.digestgen:digestgen',
'portage.package.ebuild.fetch:fetch',
'portage.package.ebuild._ipc.QueryCommand:QueryCommand',
- 'portage.dep._slot_abi:evaluate_slot_abi_equal_deps',
+ 'portage.dep._slot_operator:evaluate_slot_operator_equal_deps',
'portage.package.ebuild._spawn_nofetch:spawn_nofetch',
+ 'portage.util._desktop_entry:validate_desktop_entry',
+ 'portage.util._async.SchedulerInterface:SchedulerInterface',
+ 'portage.util._eventloop.EventLoop:EventLoop',
+ 'portage.util._eventloop.global_event_loop:global_event_loop',
'portage.util.ExtractKernelVersion:ExtractKernelVersion'
)
@@ -64,7 +73,6 @@ from _emerge.EbuildBuildDir import EbuildBuildDir
from _emerge.EbuildPhase import EbuildPhase
from _emerge.EbuildSpawnProcess import EbuildSpawnProcess
from _emerge.Package import Package
-from _emerge.PollScheduler import PollScheduler
from _emerge.RootConfig import RootConfig
_unsandboxed_phases = frozenset([
@@ -74,6 +82,40 @@ _unsandboxed_phases = frozenset([
"prerm", "setup"
])
+# phases in which IPC with host is allowed
+_ipc_phases = frozenset([
+ "setup", "pretend",
+ "preinst", "postinst", "prerm", "postrm",
+])
+
+# phases in which networking access is allowed
+_networked_phases = frozenset([
+ # for VCS fetching
+ "unpack",
+ # + for network-bound IPC
+] + list(_ipc_phases))
+
+_phase_func_map = {
+ "config": "pkg_config",
+ "setup": "pkg_setup",
+ "nofetch": "pkg_nofetch",
+ "unpack": "src_unpack",
+ "prepare": "src_prepare",
+ "configure": "src_configure",
+ "compile": "src_compile",
+ "test": "src_test",
+ "install": "src_install",
+ "preinst": "pkg_preinst",
+ "postinst": "pkg_postinst",
+ "prerm": "pkg_prerm",
+ "postrm": "pkg_postrm",
+ "info": "pkg_info",
+ "pretend": "pkg_pretend",
+}
+
+_vdb_use_conditional_keys = Package._dep_keys + \
+ ('LICENSE', 'PROPERTIES', 'PROVIDE', 'RESTRICT',)
+
def _doebuild_spawn(phase, settings, actionmap=None, **kwargs):
"""
All proper ebuild phases which execute ebuild.sh are spawned
@@ -83,8 +125,18 @@ def _doebuild_spawn(phase, settings, actionmap=None, **kwargs):
if phase in _unsandboxed_phases:
kwargs['free'] = True
+ kwargs['ipc'] = 'ipc-sandbox' not in settings.features or \
+ phase in _ipc_phases
+ kwargs['networked'] = 'network-sandbox' not in settings.features or \
+ phase in _networked_phases
+
if phase == 'depend':
kwargs['droppriv'] = 'userpriv' in settings.features
+ # It's not necessary to close_fds for this phase, since
+ # it should not spawn any daemons, and close_fds is
+ # best avoided since it can interact badly with some
+ # garbage collectors (see _setup_pipes docstring).
+ kwargs['close_fds'] = False
if actionmap is not None and phase in actionmap:
kwargs.update(actionmap[phase]["args"])
@@ -102,17 +154,24 @@ def _doebuild_spawn(phase, settings, actionmap=None, **kwargs):
settings['EBUILD_PHASE'] = phase
try:
- return spawn(cmd, settings, **kwargs)
+ return spawn(cmd, settings, **portage._native_kwargs(kwargs))
finally:
settings.pop('EBUILD_PHASE', None)
-def _spawn_phase(phase, settings, actionmap=None, **kwargs):
- if kwargs.get('returnpid'):
- return _doebuild_spawn(phase, settings, actionmap=actionmap, **kwargs)
+def _spawn_phase(phase, settings, actionmap=None, returnpid=False,
+ logfile=None, **kwargs):
+ if returnpid:
+ return _doebuild_spawn(phase, settings, actionmap=actionmap,
+ returnpid=returnpid, logfile=logfile, **kwargs)
+
+ # The logfile argument is unused here, since EbuildPhase uses
+ # the PORTAGE_LOG_FILE variable if set.
ebuild_phase = EbuildPhase(actionmap=actionmap, background=False,
- phase=phase, scheduler=PollScheduler().sched_iface,
- settings=settings)
+ phase=phase, scheduler=SchedulerInterface(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False)),
+ settings=settings, **kwargs)
+
ebuild_phase.start()
ebuild_phase.wait()
return ebuild_phase.returncode
@@ -125,19 +184,28 @@ def _doebuild_path(settings, eapi=None):
# Note: PORTAGE_BIN_PATH may differ from the global constant
# when portage is reinstalling itself.
portage_bin_path = settings["PORTAGE_BIN_PATH"]
- eprefix = settings["EPREFIX"]
+ eprefix = portage.const.EPREFIX
prerootpath = [x for x in settings.get("PREROOTPATH", "").split(":") if x]
rootpath = [x for x in settings.get("ROOTPATH", "").split(":") if x]
+ overrides = [x for x in settings.get(
+ "__PORTAGE_TEST_PATH_OVERRIDE", "").split(":") if x]
prefixes = []
if eprefix:
prefixes.append(eprefix)
prefixes.append("/")
- path = []
+ path = overrides
+
+ if "xattr" in settings.features:
+ path.append(os.path.join(portage_bin_path, "ebuild-helpers", "xattr"))
- if eapi not in (None, "0", "1", "2", "3"):
- path.append(os.path.join(portage_bin_path, "ebuild-helpers", "4"))
+ if eprefix and uid != 0 and "fakeroot" not in settings.features:
+ path.append(os.path.join(portage_bin_path,
+ "ebuild-helpers", "unprivileged"))
+
+ if settings.get("USERLAND", "GNU") != "GNU":
+ path.append(os.path.join(portage_bin_path, "ebuild-helpers", "bsd"))
path.append(os.path.join(portage_bin_path, "ebuild-helpers"))
path.extend(prerootpath)
@@ -256,10 +324,11 @@ def doebuild_environment(myebuild, mydo, myroot=None, settings=None,
if hasattr(mydbapi, 'repositories'):
repo = mydbapi.repositories.get_repo_for_location(mytree)
mysettings['PORTDIR'] = repo.eclass_db.porttrees[0]
- mysettings['PORTDIR_OVERLAY'] = ' '.join(repo.eclass_db.porttrees[1:])
+ mysettings['PORTAGE_ECLASS_LOCATIONS'] = repo.eclass_db.eclass_locations_string
mysettings.configdict["pkg"]["PORTAGE_REPO_NAME"] = repo.name
mysettings["PORTDIR"] = os.path.realpath(mysettings["PORTDIR"])
+ mysettings.pop("PORTDIR_OVERLAY", None)
mysettings["DISTDIR"] = os.path.realpath(mysettings["DISTDIR"])
mysettings["RPMDIR"] = os.path.realpath(mysettings["RPMDIR"])
@@ -416,8 +485,8 @@ _doebuild_commands_without_builddir = (
'fetch', 'fetchall', 'help', 'manifest'
)
-def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
- fetchonly=0, cleanup=0, dbkey=None, use_cache=1, fetchall=0, tree=None,
+def doebuild(myebuild, mydo, _unused=DeprecationWarning, settings=None, debug=0, listonly=0,
+ fetchonly=0, cleanup=0, dbkey=DeprecationWarning, use_cache=1, fetchall=0, tree=None,
mydbapi=None, vartree=None, prev_mtimes=None,
fd_pipes=None, returnpid=False):
"""
@@ -480,10 +549,15 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
mysettings = settings
myroot = settings['EROOT']
- if _unused is not None and _unused != mysettings['EROOT']:
+ if _unused is not DeprecationWarning:
warnings.warn("The third parameter of the "
- "portage.doebuild() is now unused. Use "
- "settings['ROOT'] instead.",
+ "portage.doebuild() is deprecated. Instead "
+ "settings['EROOT'] is used.",
+ DeprecationWarning, stacklevel=2)
+
+ if dbkey is not DeprecationWarning:
+ warnings.warn("portage.doebuild() called "
+ "with deprecated dbkey argument.",
DeprecationWarning, stacklevel=2)
if not tree:
@@ -520,7 +594,7 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
"fetch", "fetchall", "digest",
"unpack", "prepare", "configure", "compile", "test",
"install", "rpm", "qmerge", "merge",
- "package","unmerge", "manifest"]
+ "package", "unmerge", "manifest", "nofetch"]
if mydo not in validcommands:
validcommands.sort()
@@ -534,8 +608,11 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
return 1
if returnpid and mydo != 'depend':
- warnings.warn("portage.doebuild() called " + \
- "with returnpid parameter enabled. This usage will " + \
+ # This case is not supported, since it bypasses the EbuildPhase class
+ # which implements important functionality (including post phase hooks
+ # and IPC for things like best/has_version and die).
+ warnings.warn("portage.doebuild() called "
+ "with returnpid parameter enabled. This usage will "
"not be supported in the future.",
DeprecationWarning, stacklevel=2)
@@ -543,9 +620,6 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
fetchall = 1
mydo = "fetch"
- parallel_fetchonly = mydo in ("fetch", "fetchall") and \
- "PORTAGE_PARALLEL_FETCHONLY" in mysettings
-
if mydo not in clean_phases and not os.path.exists(myebuild):
writemsg("!!! doebuild: %s not found for %s\n" % (myebuild, mydo),
noiselevel=-1)
@@ -652,7 +726,7 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
# we can temporarily override PORTAGE_TMPDIR with a random temp dir
# so that there's no need for locking and it can be used even if the
# user isn't in the portage group.
- if mydo in ("info",):
+ if not returnpid and mydo in ("info",):
tmpdir = tempfile.mkdtemp()
tmpdir_orig = mysettings["PORTAGE_TMPDIR"]
mysettings["PORTAGE_TMPDIR"] = tmpdir
@@ -663,9 +737,10 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
if mydo in clean_phases:
builddir_lock = None
if not returnpid and \
- 'PORTAGE_BUILDIR_LOCKED' not in mysettings:
+ 'PORTAGE_BUILDDIR_LOCKED' not in mysettings:
builddir_lock = EbuildBuildDir(
- scheduler=PollScheduler().sched_iface,
+ scheduler=(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False)),
settings=mysettings)
builddir_lock.lock()
try:
@@ -681,42 +756,7 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
if returnpid:
return _spawn_phase(mydo, mysettings,
fd_pipes=fd_pipes, returnpid=returnpid)
- elif isinstance(dbkey, dict):
- warnings.warn("portage.doebuild() called " + \
- "with dict dbkey argument. This usage will " + \
- "not be supported in the future.",
- DeprecationWarning, stacklevel=2)
- mysettings["dbkey"] = ""
- pr, pw = os.pipe()
- fd_pipes = {
- 0:sys.stdin.fileno(),
- 1:sys.stdout.fileno(),
- 2:sys.stderr.fileno(),
- 9:pw}
- mypids = _spawn_phase(mydo, mysettings, returnpid=True,
- fd_pipes=fd_pipes)
- os.close(pw) # belongs exclusively to the child process now
- f = os.fdopen(pr, 'rb', 0)
- for k, v in zip(auxdbkeys,
- (_unicode_decode(line).rstrip('\n') for line in f)):
- dbkey[k] = v
- f.close()
- retval = os.waitpid(mypids[0], 0)[1]
- portage.process.spawned_pids.remove(mypids[0])
- # If it got a signal, return the signal that was sent, but
- # shift in order to distinguish it from a return value. (just
- # like portage.process.spawn() would do).
- if retval & 0xff:
- retval = (retval & 0xff) << 8
- else:
- # Otherwise, return its exit code.
- retval = retval >> 8
- if retval == os.EX_OK and len(dbkey) != len(auxdbkeys):
- # Don't trust bash's returncode if the
- # number of lines is incorrect.
- retval = 1
- return retval
- elif dbkey:
+ elif dbkey and dbkey is not DeprecationWarning:
mysettings["dbkey"] = dbkey
else:
mysettings["dbkey"] = \
@@ -725,14 +765,25 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
return _spawn_phase(mydo, mysettings,
fd_pipes=fd_pipes, returnpid=returnpid)
- # Validate dependency metadata here to ensure that ebuilds with invalid
- # data are never installed via the ebuild command. Don't bother when
- # returnpid == True since there's no need to do this every time emerge
- # executes a phase.
+ elif mydo == "nofetch":
+
+ if returnpid:
+ writemsg("!!! doebuild: %s\n" %
+ _("returnpid is not supported for phase '%s'\n" % mydo),
+ noiselevel=-1)
+
+ return spawn_nofetch(mydbapi, myebuild, settings=mysettings,
+ fd_pipes=fd_pipes)
+
if tree == "porttree":
- rval = _validate_deps(mysettings, myroot, mydo, mydbapi)
- if rval != os.EX_OK:
- return rval
+
+ if not returnpid:
+ # Validate dependency metadata here to ensure that ebuilds with
+ # invalid data are never installed via the ebuild command. Skip
+ # this when returnpid is True (assume the caller handled it).
+ rval = _validate_deps(mysettings, myroot, mydo, mydbapi)
+ if rval != os.EX_OK:
+ return rval
else:
# FEATURES=noauto only makes sense for porttree, and we don't want
@@ -741,20 +792,25 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
if "noauto" in mysettings.features:
mysettings.features.discard("noauto")
- # The info phase is special because it uses mkdtemp so and
- # user (not necessarily in the portage group) can run it.
- if mydo not in ('info',) and \
+ # If we are not using a private temp dir, then check access
+ # to the global temp dir.
+ if tmpdir is None and \
mydo not in _doebuild_commands_without_builddir:
rval = _check_temp_dir(mysettings)
if rval != os.EX_OK:
return rval
if mydo == "unmerge":
+ if returnpid:
+ writemsg("!!! doebuild: %s\n" %
+ _("returnpid is not supported for phase '%s'\n" % mydo),
+ noiselevel=-1)
return unmerge(mysettings["CATEGORY"],
mysettings["PF"], myroot, mysettings, vartree=vartree)
phases_to_run = set()
- if "noauto" in mysettings.features or \
+ if returnpid or \
+ "noauto" in mysettings.features or \
mydo not in actionmap_deps:
phases_to_run.add(mydo)
else:
@@ -805,9 +861,10 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
if newstuff:
if builddir_lock is None and \
- 'PORTAGE_BUILDIR_LOCKED' not in mysettings:
+ 'PORTAGE_BUILDDIR_LOCKED' not in mysettings:
builddir_lock = EbuildBuildDir(
- scheduler=PollScheduler().sched_iface,
+ scheduler=(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False)),
settings=mysettings)
builddir_lock.lock()
try:
@@ -825,12 +882,12 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
# in order to satisfy the sane $PWD requirement (from bug #239560)
# when pkg_nofetch is spawned.
have_build_dirs = False
- if not parallel_fetchonly and \
- mydo not in ('digest', 'fetch', 'help', 'manifest'):
+ if mydo not in ('digest', 'fetch', 'help', 'manifest'):
if not returnpid and \
- 'PORTAGE_BUILDIR_LOCKED' not in mysettings:
+ 'PORTAGE_BUILDDIR_LOCKED' not in mysettings:
builddir_lock = EbuildBuildDir(
- scheduler=PollScheduler().sched_iface,
+ scheduler=(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False)),
settings=mysettings)
builddir_lock.lock()
mystatus = prepare_build_dirs(myroot, mysettings, cleanup)
@@ -873,9 +930,8 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
else:
vardb = vartree.dbapi
cpv = mysettings.mycpv
- cp = portage.versions.cpv_getkey(cpv)
- slot = mysettings["SLOT"]
- cpv_slot = cp + ":" + slot
+ cpv_slot = "%s%s%s" % \
+ (cpv.cp, portage.dep._slot_separator, cpv.slot)
mysettings["REPLACING_VERSIONS"] = " ".join(
set(portage.versions.cpv_getversion(match) \
for match in vardb.match(cpv_slot) + \
@@ -885,8 +941,16 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
# the sandbox -- and stop now.
if mydo in ("config", "help", "info", "postinst",
"preinst", "pretend", "postrm", "prerm"):
- return _spawn_phase(mydo, mysettings,
- fd_pipes=fd_pipes, logfile=logfile, returnpid=returnpid)
+ if mydo in ("preinst", "postinst"):
+ env_file = os.path.join(os.path.dirname(mysettings["EBUILD"]),
+ "environment.bz2")
+ if os.path.isfile(env_file):
+ mysettings["PORTAGE_UPDATE_ENV"] = env_file
+ try:
+ return _spawn_phase(mydo, mysettings,
+ fd_pipes=fd_pipes, logfile=logfile, returnpid=returnpid)
+ finally:
+ mysettings.pop("PORTAGE_UPDATE_ENV", None)
mycpv = "/".join((mysettings["CATEGORY"], mysettings["PF"]))
@@ -927,7 +991,8 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
if not fetch(fetchme, mysettings, listonly=listonly,
fetchonly=fetchonly, allow_missing_digests=True,
digests=dist_digests):
- spawn_nofetch(mydbapi, myebuild, settings=mysettings)
+ spawn_nofetch(mydbapi, myebuild, settings=mysettings,
+ fd_pipes=fd_pipes)
if listonly:
# The convention for listonly mode is to report
# success in any case, even though fetch() may
@@ -959,11 +1024,7 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
mf = None
_doebuild_manifest_cache = None
return not digestgen(mysettings=mysettings, myportdb=mydbapi)
- elif mydo != 'fetch' and \
- "digest" in mysettings.features:
- # Don't do this when called by emerge or when called just
- # for fetch (especially parallel-fetch) since it's not needed
- # and it can interfere with parallel tasks.
+ elif "digest" in mysettings.features:
mf = None
_doebuild_manifest_cache = None
digestgen(mysettings=mysettings, myportdb=mydbapi)
@@ -972,14 +1033,17 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
if mydo in ("digest", "manifest"):
return 1
+ if mydo == "fetch":
+ # Return after digestgen for FEATURES=digest support.
+ # Return before digestcheck, since fetch() already
+ # checked any relevant digests.
+ return 0
+
# See above comment about fetching only when needed
if tree == 'porttree' and \
not digestcheck(checkme, mysettings, "strict" in features, mf=mf):
return 1
- if mydo == "fetch":
- return 0
-
# remove PORTAGE_ACTUAL_DISTDIR once cvs/svn is supported via SRC_URI
if tree == 'porttree' and \
((mydo != "setup" and "noauto" not in features) \
@@ -995,7 +1059,9 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
if len(actionmap_deps.get(x, [])):
actionmap[x]["dep"] = ' '.join(actionmap_deps[x])
- if mydo in actionmap:
+ regular_actionmap_phase = mydo in actionmap
+
+ if regular_actionmap_phase:
bintree = None
if mydo == "package":
# Make sure the package directory exists before executing
@@ -1019,6 +1085,9 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
actionmap, mysettings, debug, logfile=logfile,
fd_pipes=fd_pipes, returnpid=returnpid)
+ if returnpid and isinstance(retval, list):
+ return retval
+
if retval == os.EX_OK:
if mydo == "package" and bintree is not None:
bintree.inject(mysettings.mycpv,
@@ -1030,7 +1099,15 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
except OSError:
pass
- elif mydo=="qmerge":
+ elif returnpid:
+ writemsg("!!! doebuild: %s\n" %
+ _("returnpid is not supported for phase '%s'\n" % mydo),
+ noiselevel=-1)
+
+ if regular_actionmap_phase:
+ # handled above
+ pass
+ elif mydo == "qmerge":
# check to ensure install was run. this *only* pops up when users
# forget it and are using ebuild
if not os.path.exists(
@@ -1047,7 +1124,8 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
mysettings["CATEGORY"], mysettings["PF"], mysettings["D"],
os.path.join(mysettings["PORTAGE_BUILDDIR"], "build-info"),
myroot, mysettings, myebuild=mysettings["EBUILD"], mytree=tree,
- mydbapi=mydbapi, vartree=vartree, prev_mtimes=prev_mtimes)
+ mydbapi=mydbapi, vartree=vartree, prev_mtimes=prev_mtimes,
+ fd_pipes=fd_pipes)
elif mydo=="merge":
retval = spawnebuild("install", actionmap, mysettings, debug,
alwaysdep=1, logfile=logfile, fd_pipes=fd_pipes,
@@ -1063,7 +1141,9 @@ def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
mysettings["D"], os.path.join(mysettings["PORTAGE_BUILDDIR"],
"build-info"), myroot, mysettings,
myebuild=mysettings["EBUILD"], mytree=tree, mydbapi=mydbapi,
- vartree=vartree, prev_mtimes=prev_mtimes)
+ vartree=vartree, prev_mtimes=prev_mtimes,
+ fd_pipes=fd_pipes)
+
else:
writemsg_stdout(_("!!! Unknown mydo: %s\n") % mydo, noiselevel=-1)
return 1
@@ -1163,7 +1243,9 @@ def _prepare_env_file(settings):
"""
env_extractor = BinpkgEnvExtractor(background=False,
- scheduler=PollScheduler().sched_iface, settings=settings)
+ scheduler=(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False)),
+ settings=settings)
if env_extractor.dest_env_exists():
# There are lots of possible states when doebuild()
@@ -1242,7 +1324,7 @@ def _spawn_actionmap(settings):
misc_sh_binary = os.path.join(portage_bin_path,
os.path.basename(MISC_SH_BINARY))
ebuild_sh = _shell_quote(ebuild_sh_binary) + " %s"
- misc_sh = _shell_quote(misc_sh_binary) + " dyn_%s"
+ misc_sh = _shell_quote(misc_sh_binary) + " __dyn_%s"
# args are for the to spawn function
actionmap = {
@@ -1298,10 +1380,10 @@ def _validate_deps(mysettings, myroot, mydo, mydbapi):
if not pkg.built and \
mydo not in ("digest", "help", "manifest") and \
- pkg.metadata["REQUIRED_USE"] and \
- eapi_has_required_use(pkg.metadata["EAPI"]):
- result = check_required_use(pkg.metadata["REQUIRED_USE"],
- pkg.use.enabled, pkg.iuse.is_valid_flag)
+ pkg._metadata["REQUIRED_USE"] and \
+ eapi_has_required_use(pkg.eapi):
+ result = check_required_use(pkg._metadata["REQUIRED_USE"],
+ pkg.use.enabled, pkg.iuse.is_valid_flag, eapi=pkg.eapi)
if not result:
reduced_noise = result.tounicode()
writemsg("\n %s\n" % _("The following REQUIRED_USE flag" + \
@@ -1309,7 +1391,7 @@ def _validate_deps(mysettings, myroot, mydo, mydbapi):
writemsg(" %s\n" % reduced_noise,
noiselevel=-1)
normalized_required_use = \
- " ".join(pkg.metadata["REQUIRED_USE"].split())
+ " ".join(pkg._metadata["REQUIRED_USE"].split())
if reduced_noise != normalized_required_use:
writemsg("\n %s\n" % _("The above constraints " + \
"are a subset of the following complete expression:"),
@@ -1324,7 +1406,8 @@ def _validate_deps(mysettings, myroot, mydo, mydbapi):
# XXX This would be to replace getstatusoutput completely.
# XXX Issue: cannot block execution. Deadlock condition.
-def spawn(mystring, mysettings, debug=0, free=0, droppriv=0, sesandbox=0, fakeroot=0, **keywords):
+def spawn(mystring, mysettings, debug=False, free=False, droppriv=False,
+ sesandbox=False, fakeroot=False, networked=True, ipc=True, **keywords):
"""
Spawn a subprocess with extra portage-specific options.
Optiosn include:
@@ -1354,6 +1437,10 @@ def spawn(mystring, mysettings, debug=0, free=0, droppriv=0, sesandbox=0, fakero
@type sesandbox: Boolean
@param fakeroot: Run this command with faked root privileges
@type fakeroot: Boolean
+ @param networked: Run this command with networking access enabled
+ @type networked: Boolean
+ @param ipc: Run this command with host IPC access enabled
+ @type ipc: Boolean
@param keywords: Extra options encoded as a dict, to be passed to spawn
@type keywords: Dictionary
@rtype: Integer
@@ -1366,29 +1453,90 @@ def spawn(mystring, mysettings, debug=0, free=0, droppriv=0, sesandbox=0, fakero
fd_pipes = keywords.get("fd_pipes")
if fd_pipes is None:
fd_pipes = {
- 0:sys.stdin.fileno(),
- 1:sys.stdout.fileno(),
- 2:sys.stderr.fileno(),
+ 0:portage._get_stdin().fileno(),
+ 1:sys.__stdout__.fileno(),
+ 2:sys.__stderr__.fileno(),
}
# In some cases the above print statements don't flush stdout, so
# it needs to be flushed before allowing a child process to use it
# so that output always shows in the correct order.
- stdout_filenos = (sys.stdout.fileno(), sys.stderr.fileno())
+ stdout_filenos = (sys.__stdout__.fileno(), sys.__stderr__.fileno())
for fd in fd_pipes.values():
if fd in stdout_filenos:
- sys.stdout.flush()
- sys.stderr.flush()
+ sys.__stdout__.flush()
+ sys.__stderr__.flush()
break
features = mysettings.features
+
+ # Use Linux namespaces if available
+ if uid == 0 and platform.system() == 'Linux':
+ keywords['unshare_net'] = not networked
+ keywords['unshare_ipc'] = not ipc
+
# TODO: Enable fakeroot to be used together with droppriv. The
# fake ownership/permissions will have to be converted to real
# permissions in the merge phase.
fakeroot = fakeroot and uid != 0 and portage.process.fakeroot_capable
- if droppriv and uid == 0 and portage_gid and portage_uid and \
- hasattr(os, "setgroups"):
- keywords.update({"uid":portage_uid,"gid":portage_gid,
- "groups":userpriv_groups,"umask":0o02})
+ portage_build_uid = os.getuid()
+ portage_build_gid = os.getgid()
+ if uid == 0 and portage_uid and portage_gid and hasattr(os, "setgroups"):
+ if droppriv:
+ keywords.update({
+ "uid": portage_uid,
+ "gid": portage_gid,
+ "groups": userpriv_groups,
+ "umask": 0o02
+ })
+
+ # Adjust pty ownership so that subprocesses
+ # can directly access /dev/fd/{1,2}.
+ stdout_fd = fd_pipes.get(1)
+ if stdout_fd is not None:
+ try:
+ subprocess_tty = _os.ttyname(stdout_fd)
+ except OSError:
+ pass
+ else:
+ try:
+ parent_tty = _os.ttyname(sys.__stdout__.fileno())
+ except OSError:
+ parent_tty = None
+
+ if subprocess_tty != parent_tty:
+ _os.chown(subprocess_tty,
+ int(portage_uid), int(portage_gid))
+
+ if "userpriv" in features and "userpriv" not in mysettings["PORTAGE_RESTRICT"].split() and secpass >= 2:
+ # Since Python 3.4, getpwuid and getgrgid
+ # require int type (no proxies).
+ portage_build_uid = int(portage_uid)
+ portage_build_gid = int(portage_gid)
+
+ if "PORTAGE_BUILD_USER" not in mysettings:
+ user = None
+ try:
+ user = pwd.getpwuid(portage_build_uid).pw_name
+ except KeyError:
+ if portage_build_uid == 0:
+ user = "root"
+ elif portage_build_uid == portage_uid:
+ user = portage.data._portage_username
+ if user is not None:
+ mysettings["PORTAGE_BUILD_USER"] = user
+
+ if "PORTAGE_BUILD_GROUP" not in mysettings:
+ group = None
+ try:
+ group = grp.getgrgid(portage_build_gid).gr_name
+ except KeyError:
+ if portage_build_gid == 0:
+ group = "root"
+ elif portage_build_gid == portage_gid:
+ group = portage.data._portage_grpname
+ if group is not None:
+ mysettings["PORTAGE_BUILD_GROUP"] = group
+
if not free:
free=((droppriv and "usersandbox" not in features) or \
(not droppriv and "sandbox" not in features and \
@@ -1419,12 +1567,15 @@ def spawn(mystring, mysettings, debug=0, free=0, droppriv=0, sesandbox=0, fakero
mysettings["PORTAGE_SANDBOX_T"])
if keywords.get("returnpid"):
- return spawn_func(mystring, env=mysettings.environ(), **keywords)
+ return spawn_func(mystring, env=mysettings.environ(),
+ **portage._native_kwargs(keywords))
proc = EbuildSpawnProcess(
background=False, args=mystring,
- scheduler=PollScheduler().sched_iface, spawn_func=spawn_func,
- settings=mysettings, **keywords)
+ scheduler=SchedulerInterface(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False)),
+ spawn_func=spawn_func,
+ settings=mysettings, **portage._native_kwargs(keywords))
proc.start()
proc.wait()
@@ -1436,8 +1587,8 @@ def spawnebuild(mydo, actionmap, mysettings, debug, alwaysdep=0,
logfile=None, fd_pipes=None, returnpid=False):
if returnpid:
- warnings.warn("portage.spawnebuild() called " + \
- "with returnpid parameter enabled. This usage will " + \
+ warnings.warn("portage.spawnebuild() called "
+ "with returnpid parameter enabled. This usage will "
"not be supported in the future.",
DeprecationWarning, stacklevel=2)
@@ -1530,7 +1681,52 @@ def _check_build_log(mysettings, out=None):
configure_opts_warn = []
configure_opts_warn_re = re.compile(
- r'^configure: WARNING: [Uu]nrecognized options: ')
+ r'^configure: WARNING: [Uu]nrecognized options: (.*)')
+
+ qa_configure_opts = ""
+ try:
+ with io.open(_unicode_encode(os.path.join(
+ mysettings["PORTAGE_BUILDDIR"],
+ "build-info", "QA_CONFIGURE_OPTIONS"),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as qa_configure_opts_f:
+ qa_configure_opts = qa_configure_opts_f.read()
+ except IOError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+
+ qa_configure_opts = qa_configure_opts.split()
+ if qa_configure_opts:
+ if len(qa_configure_opts) > 1:
+ qa_configure_opts = "|".join("(%s)" % x for x in qa_configure_opts)
+ qa_configure_opts = "^(%s)$" % qa_configure_opts
+ else:
+ qa_configure_opts = "^%s$" % qa_configure_opts[0]
+ qa_configure_opts = re.compile(qa_configure_opts)
+
+ qa_am_maintainer_mode = []
+ try:
+ with io.open(_unicode_encode(os.path.join(
+ mysettings["PORTAGE_BUILDDIR"],
+ "build-info", "QA_AM_MAINTAINER_MODE"),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as qa_am_maintainer_mode_f:
+ qa_am_maintainer_mode = [x for x in
+ qa_am_maintainer_mode_f.read().splitlines() if x]
+ except IOError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+
+ if qa_am_maintainer_mode:
+ if len(qa_am_maintainer_mode) > 1:
+ qa_am_maintainer_mode = \
+ "|".join("(%s)" % x for x in qa_am_maintainer_mode)
+ qa_am_maintainer_mode = "^(%s)$" % qa_am_maintainer_mode
+ else:
+ qa_am_maintainer_mode = "^%s$" % qa_am_maintainer_mode[0]
+ qa_am_maintainer_mode = re.compile(qa_am_maintainer_mode)
# Exclude output from dev-libs/yaz-3.0.47 which looks like this:
#
@@ -1552,7 +1748,9 @@ def _check_build_log(mysettings, out=None):
for line in f:
line = _unicode_decode(line)
if am_maintainer_mode_re.search(line) is not None and \
- am_maintainer_mode_exclude_re.search(line) is None:
+ am_maintainer_mode_exclude_re.search(line) is None and \
+ (not qa_am_maintainer_mode or
+ qa_am_maintainer_mode.search(line) is None):
am_maintainer_mode.append(line.rstrip("\n"))
if bash_command_not_found_re.match(line) is not None and \
@@ -1562,8 +1760,11 @@ def _check_build_log(mysettings, out=None):
if helper_missing_file_re.match(line) is not None:
helper_missing_file.append(line.rstrip("\n"))
- if configure_opts_warn_re.match(line) is not None:
- configure_opts_warn.append(line.rstrip("\n"))
+ m = configure_opts_warn_re.match(line)
+ if m is not None:
+ for x in m.group(1).split(", "):
+ if not qa_configure_opts or qa_configure_opts.match(x) is None:
+ configure_opts_warn.append(x)
if make_jobserver_re.match(line) is not None:
make_jobserver.append(line.rstrip("\n"))
@@ -1612,7 +1813,7 @@ def _check_build_log(mysettings, out=None):
if configure_opts_warn:
msg = [_("QA Notice: Unrecognized configure options:")]
msg.append("")
- msg.extend("\t" + line for line in configure_opts_warn)
+ msg.extend("\t%s" % x for x in configure_opts_warn)
_eqawarn(msg)
if make_jobserver:
@@ -1637,8 +1838,12 @@ def _post_src_install_write_metadata(settings):
build_info_dir = os.path.join(settings['PORTAGE_BUILDDIR'], 'build-info')
- for k in ('IUSE',):
- v = settings.get(k)
+ metadata_keys = ['IUSE']
+ if eapi_attrs.iuse_effective:
+ metadata_keys.append('IUSE_EFFECTIVE')
+
+ for k in metadata_keys:
+ v = settings.configdict['pkg'].get(k)
if v is not None:
write_atomic(os.path.join(build_info_dir, k), v + '\n')
@@ -1654,7 +1859,7 @@ def _post_src_install_write_metadata(settings):
'BUILD_TIME'), encoding=_encodings['fs'], errors='strict'),
mode='w', encoding=_encodings['repo.content'],
errors='strict') as f:
- f.write(_unicode_decode("%.0f\n" % (time.time(),)))
+ f.write("%.0f\n" % (time.time(),))
use = frozenset(settings['PORTAGE_USE'].split())
for k in _vdb_use_conditional_keys:
@@ -1668,7 +1873,7 @@ def _post_src_install_write_metadata(settings):
continue
if k.endswith('DEPEND'):
- if eapi_attrs.slot_abi:
+ if eapi_attrs.slot_operator:
continue
token_class = Atom
else:
@@ -1686,10 +1891,10 @@ def _post_src_install_write_metadata(settings):
k), encoding=_encodings['fs'], errors='strict'),
mode='w', encoding=_encodings['repo.content'],
errors='strict') as f:
- f.write(_unicode_decode(v + '\n'))
+ f.write('%s\n' % v)
- if eapi_attrs.slot_abi:
- deps = evaluate_slot_abi_equal_deps(settings, use, QueryCommand.get_db())
+ if eapi_attrs.slot_operator:
+ deps = evaluate_slot_operator_equal_deps(settings, use, QueryCommand.get_db())
for k, v in deps.items():
filename = os.path.join(build_info_dir, k)
if not v:
@@ -1702,10 +1907,7 @@ def _post_src_install_write_metadata(settings):
k), encoding=_encodings['fs'], errors='strict'),
mode='w', encoding=_encodings['repo.content'],
errors='strict') as f:
- f.write(_unicode_decode(v + '\n'))
-
-_vdb_use_conditional_keys = ('DEPEND', 'LICENSE', 'PDEPEND',
- 'PROPERTIES', 'PROVIDE', 'RDEPEND', 'RESTRICT',)
+ f.write('%s\n' % v)
def _preinst_bsdflags(mysettings):
if bsd_chflags:
@@ -1747,6 +1949,33 @@ def _post_src_install_uid_fix(mysettings, out):
destdir = mysettings["D"]
ed_len = len(mysettings["ED"])
unicode_errors = []
+ desktop_file_validate = \
+ portage.process.find_binary("desktop-file-validate") is not None
+ xdg_dirs = mysettings.get('XDG_DATA_DIRS', '/usr/share').split(':')
+ xdg_dirs = tuple(os.path.join(i, "applications") + os.sep
+ for i in xdg_dirs if i)
+
+ qa_desktop_file = ""
+ try:
+ with io.open(_unicode_encode(os.path.join(
+ mysettings["PORTAGE_BUILDDIR"],
+ "build-info", "QA_DESKTOP_FILE"),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as f:
+ qa_desktop_file = f.read()
+ except IOError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+
+ qa_desktop_file = qa_desktop_file.split()
+ if qa_desktop_file:
+ if len(qa_desktop_file) > 1:
+ qa_desktop_file = "|".join("(%s)" % x for x in qa_desktop_file)
+ qa_desktop_file = "^(%s)$" % qa_desktop_file
+ else:
+ qa_desktop_file = "^%s$" % qa_desktop_file[0]
+ qa_desktop_file = re.compile(qa_desktop_file)
while True:
@@ -1755,6 +1984,7 @@ def _post_src_install_uid_fix(mysettings, out):
counted_inodes = set()
fixlafiles_announced = False
fixlafiles = "fixlafiles" in mysettings.features
+ desktopfile_errors = []
for parent, dirs, files in os.walk(destdir):
try:
@@ -1794,6 +2024,16 @@ def _post_src_install_uid_fix(mysettings, out):
else:
fpath = os.path.join(parent, fname)
+ fpath_relative = fpath[ed_len - 1:]
+ if desktop_file_validate and fname.endswith(".desktop") and \
+ os.path.isfile(fpath) and \
+ fpath_relative.startswith(xdg_dirs) and \
+ not (qa_desktop_file and qa_desktop_file.match(fpath_relative.strip(os.sep)) is not None):
+
+ desktop_validate = validate_desktop_entry(fpath)
+ if desktop_validate:
+ desktopfile_errors.extend(desktop_validate)
+
if fixlafiles and \
fname.endswith(".la") and os.path.isfile(fpath):
f = open(_unicode_encode(fpath,
@@ -1860,6 +2100,11 @@ def _post_src_install_uid_fix(mysettings, out):
if not unicode_error:
break
+ if desktopfile_errors:
+ for l in _merge_desktopfile_error(desktopfile_errors):
+ l = l.replace(mysettings["ED"], '/')
+ eqawarn(l, phase='install', key=mysettings.mycpv, out=out)
+
if unicode_errors:
for l in _merge_unicode_error(unicode_errors):
eqawarn(l, phase='install', key=mysettings.mycpv, out=out)
@@ -1871,7 +2116,7 @@ def _post_src_install_uid_fix(mysettings, out):
'SIZE'), encoding=_encodings['fs'], errors='strict'),
mode='w', encoding=_encodings['repo.content'],
errors='strict')
- f.write(_unicode_decode(str(size) + '\n'))
+ f.write('%d\n' % size)
f.close()
_reapply_bsdflags_to_image(mysettings)
@@ -2022,6 +2267,20 @@ def _post_src_install_soname_symlinks(mysettings, out):
for line in qa_msg:
eqawarn(line, key=mysettings.mycpv, out=out)
+def _merge_desktopfile_error(errors):
+ lines = []
+
+ msg = _("QA Notice: This package installs one or more .desktop files "
+ "that do not pass validation.")
+ lines.extend(wrap(msg, 72))
+
+ lines.append("")
+ errors.sort()
+ lines.extend("\t" + x for x in errors)
+ lines.append("")
+
+ return lines
+
def _merge_unicode_error(errors):
lines = []
@@ -2078,11 +2337,6 @@ def _handle_self_update(settings, vardb):
if settings["ROOT"] == "/" and \
portage.dep.match_from_list(
portage.const.PORTAGE_PACKAGE_ATOM, [cpv]):
- inherited = frozenset(settings.get('INHERITED', '').split())
- if not vardb.cpv_exists(cpv) or \
- '9999' in cpv or \
- 'git' in inherited or \
- 'git-2' in inherited:
- _prepare_self_update(settings)
- return True
+ _prepare_self_update(settings)
+ return True
return False
diff --git a/pym/portage/package/ebuild/fetch.py b/pym/portage/package/ebuild/fetch.py
index 60ed04da2..2424ff3c5 100644
--- a/pym/portage/package/ebuild/fetch.py
+++ b/pym/portage/package/ebuild/fetch.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -14,6 +14,10 @@ import stat
import sys
import tempfile
+try:
+ from urllib.parse import urlparse
+except ImportError:
+ from urlparse import urlparse
import portage
portage.proxy.lazyimport.lazyimport(globals(),
@@ -26,7 +30,7 @@ portage.proxy.lazyimport.lazyimport(globals(),
from portage import OrderedDict, os, selinux, shutil, _encodings, \
_shell_quote, _unicode_encode
from portage.checksum import (hashfunc_map, perform_md5, verify_all,
- _filter_unaccelarated_hashes)
+ _filter_unaccelarated_hashes, _hash_filter, _apply_hash_filter)
from portage.const import BASH_BINARY, CUSTOM_MIRRORS_FILE, \
GLOBAL_CONFIG_PATH
from portage.data import portage_gid, portage_uid, secpass, userpriv_groups
@@ -64,9 +68,9 @@ def _spawn_fetch(settings, args, **kwargs):
if "fd_pipes" not in kwargs:
kwargs["fd_pipes"] = {
- 0 : sys.stdin.fileno(),
- 1 : sys.stdout.fileno(),
- 2 : sys.stdout.fileno(),
+ 0 : portage._get_stdin().fileno(),
+ 1 : sys.__stdout__.fileno(),
+ 2 : sys.__stdout__.fileno(),
}
if "userfetch" in settings.features and \
@@ -185,7 +189,7 @@ def _check_digests(filename, digests, show_errors=1):
return False
return True
-def _check_distfile(filename, digests, eout, show_errors=1):
+def _check_distfile(filename, digests, eout, show_errors=1, hash_filter=None):
"""
@return a tuple of (match, stat_obj) where match is True if filename
matches all given digests (if any) and stat_obj is a stat result, or
@@ -212,6 +216,8 @@ def _check_distfile(filename, digests, eout, show_errors=1):
return (False, st)
else:
digests = _filter_unaccelarated_hashes(digests)
+ if hash_filter is not None:
+ digests = _apply_hash_filter(digests, hash_filter)
if _check_digests(filename, digests, show_errors=show_errors):
eout.ebegin("%s %s ;-)" % (os.path.basename(filename),
" ".join(sorted(digests))))
@@ -341,7 +347,7 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
_("!!! For fetching to a read-only filesystem, "
"locking should be turned off.\n")), noiselevel=-1)
writemsg(_("!!! This can be done by adding -distlocks to "
- "FEATURES in /etc/make.conf\n"), noiselevel=-1)
+ "FEATURES in /etc/portage/make.conf\n"), noiselevel=-1)
# use_locks = 0
# local mirrors are always added
@@ -355,6 +361,9 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
if try_mirrors:
mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]
+ hash_filter = _hash_filter(mysettings.get("PORTAGE_CHECKSUM_FILTER", ""))
+ if hash_filter.transparent:
+ hash_filter = None
skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1"
if skip_manifest:
allow_missing_digests = True
@@ -397,12 +406,16 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
for myfile, uri_set in myuris.items():
for myuri in uri_set:
file_uri_tuples.append((myfile, myuri))
+ if not uri_set:
+ file_uri_tuples.append((myfile, None))
else:
for myuri in myuris:
- file_uri_tuples.append((os.path.basename(myuri), myuri))
+ if urlparse(myuri).scheme:
+ file_uri_tuples.append((os.path.basename(myuri), myuri))
+ else:
+ file_uri_tuples.append((os.path.basename(myuri), None))
filedict = OrderedDict()
- primaryuri_indexes={}
primaryuri_dict = {}
thirdpartymirror_uris = {}
for myfile, myuri in file_uri_tuples:
@@ -410,6 +423,8 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
filedict[myfile]=[]
for y in range(0,len(locations)):
filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
+ if myuri is None:
+ continue
if myuri[:9]=="mirror://":
eidx = myuri.find("/", 9)
if eidx != -1:
@@ -424,15 +439,15 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
# now try the official mirrors
if mirrorname in thirdpartymirrors:
- random.shuffle(thirdpartymirrors[mirrorname])
-
uris = [locmirr.rstrip("/") + "/" + path \
for locmirr in thirdpartymirrors[mirrorname]]
+ random.shuffle(uris)
filedict[myfile].extend(uris)
thirdpartymirror_uris.setdefault(myfile, []).extend(uris)
- if not filedict[myfile]:
- writemsg(_("No known mirror by the name: %s\n") % (mirrorname))
+ if mirrorname not in custommirrors and \
+ mirrorname not in thirdpartymirrors:
+ writemsg(_("!!! No known mirror by the name: %s\n") % (mirrorname))
else:
writemsg(_("Invalid mirror definition in SRC_URI:\n"), noiselevel=-1)
writemsg(" %s\n" % (myuri), noiselevel=-1)
@@ -440,26 +455,30 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
if restrict_fetch or force_mirror:
# Only fetch from specific mirrors is allowed.
continue
- if "primaryuri" in restrict:
- # Use the source site first.
- if myfile in primaryuri_indexes:
- primaryuri_indexes[myfile] += 1
- else:
- primaryuri_indexes[myfile] = 0
- filedict[myfile].insert(primaryuri_indexes[myfile], myuri)
- else:
- filedict[myfile].append(myuri)
primaryuris = primaryuri_dict.get(myfile)
if primaryuris is None:
primaryuris = []
primaryuri_dict[myfile] = primaryuris
primaryuris.append(myuri)
+ # Order primaryuri_dict values to match that in SRC_URI.
+ for uris in primaryuri_dict.values():
+ uris.reverse()
+
# Prefer thirdpartymirrors over normal mirrors in cases when
# the file does not yet exist on the normal mirrors.
for myfile, uris in thirdpartymirror_uris.items():
primaryuri_dict.setdefault(myfile, []).extend(uris)
+ # Now merge primaryuri values into filedict (includes mirrors
+ # explicitly referenced in SRC_URI).
+ if "primaryuri" in restrict:
+ for myfile, uris in filedict.items():
+ filedict[myfile] = primaryuri_dict.get(myfile, []) + uris
+ else:
+ for myfile in filedict:
+ filedict[myfile] += primaryuri_dict.get(myfile, [])
+
can_fetch=True
if listonly:
@@ -637,7 +656,7 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
eout = EOutput()
eout.quiet = mysettings.get("PORTAGE_QUIET") == "1"
match, mystat = _check_distfile(
- myfile_path, pruned_digests, eout)
+ myfile_path, pruned_digests, eout, hash_filter=hash_filter)
if match:
# Skip permission adjustment for symlinks, since we don't
# want to modify anything outside of the primary DISTDIR,
@@ -709,7 +728,7 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
for x in ro_distdirs:
filename = os.path.join(x, myfile)
match, mystat = _check_distfile(
- filename, pruned_digests, eout)
+ filename, pruned_digests, eout, hash_filter=hash_filter)
if match:
readonly_file = filename
break
@@ -734,7 +753,7 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
"remaining space.\n"), noiselevel=-1)
if userfetch:
writemsg(_("!!! You may set FEATURES=\"-userfetch\""
- " in /etc/make.conf in order to fetch with\n"
+ " in /etc/portage/make.conf in order to fetch with\n"
"!!! superuser privileges.\n"), noiselevel=-1)
if fsmirrors and not os.path.exists(myfile_path) and has_space:
@@ -796,6 +815,8 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
continue
else:
digests = _filter_unaccelarated_hashes(mydigests[myfile])
+ if hash_filter is not None:
+ digests = _apply_hash_filter(digests, hash_filter)
verified_ok, reason = verify_all(myfile_path, digests)
if not verified_ok:
writemsg(_("!!! Previously fetched"
@@ -845,8 +866,8 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
protocol = loc[0:loc.find("://")]
global_config_path = GLOBAL_CONFIG_PATH
- if mysettings['EPREFIX']:
- global_config_path = os.path.join(mysettings['EPREFIX'],
+ if portage.const.EPREFIX:
+ global_config_path = os.path.join(portage.const.EPREFIX,
GLOBAL_CONFIG_PATH.lstrip(os.sep))
missing_file_param = False
@@ -955,11 +976,16 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
writemsg_stdout(_(">>> Downloading '%s'\n") % \
_hide_url_passwd(loc))
variables = {
- "DISTDIR": mysettings["DISTDIR"],
"URI": loc,
"FILE": myfile
}
+ for k in ("DISTDIR", "PORTAGE_SSH_OPTS"):
+ try:
+ variables[k] = mysettings[k]
+ except KeyError:
+ pass
+
myfetch = shlex_split(locfetch)
myfetch = [varexpand(x, mydict=variables) for x in myfetch]
myret = -1
@@ -1053,6 +1079,8 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
# net connection. This way we have a chance to try to download
# from another mirror...
digests = _filter_unaccelarated_hashes(mydigests[myfile])
+ if hash_filter is not None:
+ digests = _apply_hash_filter(digests, hash_filter)
verified_ok, reason = verify_all(myfile_path, digests)
if not verified_ok:
writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
diff --git a/pym/portage/package/ebuild/getmaskingreason.py b/pym/portage/package/ebuild/getmaskingreason.py
index 8a88c2f60..1e4ed21ce 100644
--- a/pym/portage/package/ebuild/getmaskingreason.py
+++ b/pym/portage/package/ebuild/getmaskingreason.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
__all__ = ['getmaskingreason']
@@ -6,13 +6,12 @@ __all__ = ['getmaskingreason']
import portage
from portage import os
from portage.const import USER_CONFIG_PATH
-from portage.dep import Atom, match_from_list, _slot_separator, _repo_separator
+from portage.dep import Atom, match_from_list
from portage.exception import InvalidAtom
from portage.localization import _
from portage.repository.config import _gen_valid_repo
from portage.util import grablines, normalize_path
-from portage.versions import catpkgsplit
-from _emerge.Package import Package
+from portage.versions import catpkgsplit, _pkg_str
def getmaskingreason(mycpv, metadata=None, settings=None,
portdb=None, return_location=False, myrepo=None):
@@ -60,23 +59,20 @@ def getmaskingreason(mycpv, metadata=None, settings=None,
# Sometimes we can't access SLOT or repository due to corruption.
pkg = mycpv
- if metadata is not None:
- pkg = "".join((mycpv, _slot_separator, metadata["SLOT"]))
- # At this point myrepo should be None, a valid name, or
- # Package.UNKNOWN_REPO which we ignore.
- if myrepo is not None and myrepo != Package.UNKNOWN_REPO:
- pkg = "".join((pkg, _repo_separator, myrepo))
+ try:
+ pkg.slot
+ except AttributeError:
+ pkg = _pkg_str(mycpv, metadata=metadata, repo=myrepo)
+
cpv_slot_list = [pkg]
- mycp=mysplit[0]+"/"+mysplit[1]
+ mycp = pkg.cp
- # XXX- This is a temporary duplicate of code from the config constructor.
- locations = [os.path.join(settings["PORTDIR"], "profiles")]
+ locations = []
+ if pkg.repo in settings.repositories:
+ for repo in settings.repositories[pkg.repo].masters + (settings.repositories[pkg.repo],):
+ locations.append(os.path.join(repo.location, "profiles"))
locations.extend(settings.profiles)
- for ov in settings["PORTDIR_OVERLAY"].split():
- profdir = os.path.join(normalize_path(ov), "profiles")
- if os.path.isdir(profdir):
- locations.append(profdir)
locations.append(os.path.join(settings["PORTAGE_CONFIGROOT"],
USER_CONFIG_PATH))
locations.reverse()
diff --git a/pym/portage/package/ebuild/getmaskingstatus.py b/pym/portage/package/ebuild/getmaskingstatus.py
index 9bf605db6..4b9e588f7 100644
--- a/pym/portage/package/ebuild/getmaskingstatus.py
+++ b/pym/portage/package/ebuild/getmaskingstatus.py
@@ -1,17 +1,21 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = ['getmaskingstatus']
import sys
import portage
from portage import eapi_is_supported, _eapi_is_deprecated
+from portage.exception import InvalidDependString
from portage.localization import _
from portage.package.ebuild.config import config
from portage.versions import catpkgsplit, _pkg_str
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
class _UnmaskHint(object):
@@ -48,7 +52,7 @@ def _getmaskingstatus(mycpv, settings, portdb, myrepo=None):
# emerge passed in a Package instance
pkg = mycpv
mycpv = pkg.cpv
- metadata = pkg.metadata
+ metadata = pkg._metadata
installed = pkg.installed
if metadata is None:
@@ -65,10 +69,11 @@ def _getmaskingstatus(mycpv, settings, portdb, myrepo=None):
else:
metadata["USE"] = ""
- if not hasattr(mycpv, 'slot'):
+ try:
+ mycpv.slot
+ except AttributeError:
try:
- mycpv = _pkg_str(mycpv, slot=metadata['SLOT'],
- repo=metadata.get('repository'))
+ mycpv = _pkg_str(mycpv, metadata=metadata, settings=settings)
except portage.exception.InvalidData:
raise ValueError(_("invalid CPV: %s") % mycpv)
@@ -83,6 +88,7 @@ def _getmaskingstatus(mycpv, settings, portdb, myrepo=None):
mygroups = settings._getKeywords(mycpv, metadata)
licenses = metadata["LICENSE"]
properties = metadata["PROPERTIES"]
+ restrict = metadata["RESTRICT"]
if not eapi_is_supported(eapi):
return [_MaskReason("EAPI", "EAPI %s" % eapi)]
elif _eapi_is_deprecated(eapi) and not installed:
@@ -122,6 +128,13 @@ def _getmaskingstatus(mycpv, settings, portdb, myrepo=None):
if gp=="*":
kmask=None
break
+ elif gp == "~*":
+ for x in pgroups:
+ if x[:1] == "~":
+ kmask = None
+ break
+ if kmask is None:
+ break
elif gp=="-"+myarch and myarch in pgroups:
kmask="-"+myarch
break
@@ -161,6 +174,15 @@ def _getmaskingstatus(mycpv, settings, portdb, myrepo=None):
except portage.exception.InvalidDependString as e:
rValue.append(_MaskReason("invalid", "PROPERTIES: "+str(e)))
+ try:
+ missing_restricts = settings._getMissingRestrict(mycpv, metadata)
+ if missing_restricts:
+ msg = list(missing_restricts)
+ msg.append("in RESTRICT")
+ rValue.append(_MaskReason("RESTRICT", " ".join(msg)))
+ except InvalidDependString as e:
+ rValue.append(_MaskReason("invalid", "RESTRICT: %s" % (e,)))
+
# Only show KEYWORDS masks for installed packages
# if they're not masked for any other reason.
if kmask and (not installed or not rValue):
diff --git a/pym/portage/package/ebuild/prepare_build_dirs.py b/pym/portage/package/ebuild/prepare_build_dirs.py
index b8fbdc5cf..6782160e4 100644
--- a/pym/portage/package/ebuild/prepare_build_dirs.py
+++ b/pym/portage/package/ebuild/prepare_build_dirs.py
@@ -1,6 +1,8 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = ['prepare_build_dirs']
import errno
@@ -338,12 +340,12 @@ def _prepare_workdir(mysettings):
try:
_ensure_log_subdirs(logdir, log_subdir)
except PortageException as e:
- writemsg(_unicode_decode("!!! %s\n") % (e,), noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
if os.access(log_subdir, os.W_OK):
logdir_subdir_ok = True
else:
- writemsg(_unicode_decode("!!! %s: %s\n") %
+ writemsg("!!! %s: %s\n" %
(_("Permission Denied"), log_subdir), noiselevel=-1)
tmpdir_log_path = os.path.join(
diff --git a/pym/portage/process.py b/pym/portage/process.py
index f3cec8815..ba41ea8eb 100644
--- a/pym/portage/process.py
+++ b/pym/portage/process.py
@@ -1,25 +1,30 @@
# portage.py -- core Portage functionality
-# Copyright 1998-2012 Gentoo Foundation
+# Copyright 1998-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import atexit
import errno
+import fcntl
import platform
import signal
+import socket
+import struct
import sys
import traceback
+import os as _os
from portage import os
from portage import _encodings
from portage import _unicode_encode
import portage
portage.proxy.lazyimport.lazyimport(globals(),
- 'portage.util:dump_traceback',
+ 'portage.util:dump_traceback,writemsg',
)
from portage.const import BASH_BINARY, SANDBOX_BINARY, FAKEROOT_BINARY
from portage.exception import CommandNotFound
+from portage.util._ctypes import find_library, LoadLibrary, ctypes
try:
import resource
@@ -28,12 +33,35 @@ except ImportError:
max_fd_limit = 256
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
-if os.path.isdir("/proc/%i/fd" % os.getpid()):
+# Support PEP 446 for Python >=3.4
+try:
+ _set_inheritable = _os.set_inheritable
+except AttributeError:
+ _set_inheritable = None
+
+try:
+ _FD_CLOEXEC = fcntl.FD_CLOEXEC
+except AttributeError:
+ _FD_CLOEXEC = None
+
+# Prefer /proc/self/fd if available (/dev/fd
+# doesn't work on solaris, see bug #474536).
+for _fd_dir in ("/proc/self/fd", "/dev/fd"):
+ if os.path.isdir(_fd_dir):
+ break
+ else:
+ _fd_dir = None
+
+# /dev/fd does not work on FreeBSD, see bug #478446
+if platform.system() in ('FreeBSD',) and _fd_dir == '/dev/fd':
+ _fd_dir = None
+
+if _fd_dir is not None:
def get_open_fds():
- return (int(fd) for fd in os.listdir("/proc/%i/fd" % os.getpid()) \
- if fd.isdigit())
+ return (int(fd) for fd in os.listdir(_fd_dir) if fd.isdigit())
if platform.python_implementation() == 'PyPy':
# EAGAIN observed with PyPy 1.8.
@@ -46,6 +74,13 @@ if os.path.isdir("/proc/%i/fd" % os.getpid()):
raise
return range(max_fd_limit)
+elif os.path.isdir("/proc/%s/fd" % os.getpid()):
+ # In order for this function to work in forked subprocesses,
+ # os.getpid() must be called from inside the function.
+ def get_open_fds():
+ return (int(fd) for fd in os.listdir("/proc/%s/fd" % os.getpid())
+ if fd.isdigit())
+
else:
def get_open_fds():
return range(max_fd_limit)
@@ -83,14 +118,14 @@ def spawn_bash(mycommand, debug=False, opt_name=None, **keywords):
def spawn_sandbox(mycommand, opt_name=None, **keywords):
if not sandbox_capable:
return spawn_bash(mycommand, opt_name=opt_name, **keywords)
- args=[SANDBOX_BINARY]
+ args = [SANDBOX_BINARY]
if not opt_name:
opt_name = os.path.basename(mycommand.split()[0])
args.append(mycommand)
return spawn(args, opt_name=opt_name, **keywords)
def spawn_fakeroot(mycommand, fakeroot_state=None, opt_name=None, **keywords):
- args=[FAKEROOT_BINARY]
+ args = [FAKEROOT_BINARY]
if not opt_name:
opt_name = os.path.basename(mycommand.split()[0])
if fakeroot_state:
@@ -141,30 +176,28 @@ def run_exitfuncs():
atexit.register(run_exitfuncs)
-# We need to make sure that any processes spawned are killed off when
-# we exit. spawn() takes care of adding and removing pids to this list
-# as it creates and cleans up processes.
-spawned_pids = []
-def cleanup():
- while spawned_pids:
- pid = spawned_pids.pop()
+# It used to be necessary for API consumers to remove pids from spawned_pids,
+# since otherwise it would accumulate a pids endlessly. Now, spawned_pids is
+# just an empty dummy list, so for backward compatibility, ignore ValueError
+# for removal on non-existent items.
+class _dummy_list(list):
+ def remove(self, item):
+ # TODO: Trigger a DeprecationWarning here, after stable portage
+ # has dummy spawned_pids.
try:
- # With waitpid and WNOHANG, only check the
- # first element of the tuple since the second
- # element may vary (bug #337465).
- if os.waitpid(pid, os.WNOHANG)[0] == 0:
- os.kill(pid, signal.SIGTERM)
- os.waitpid(pid, 0)
- except OSError:
- # This pid has been cleaned up outside
- # of spawn().
+ list.remove(self, item)
+ except ValueError:
pass
-atexit_register(cleanup)
+spawned_pids = _dummy_list()
+
+def cleanup():
+ pass
def spawn(mycommand, env={}, opt_name=None, fd_pipes=None, returnpid=False,
uid=None, gid=None, groups=None, umask=None, logfile=None,
- path_lookup=True, pre_exec=None):
+ path_lookup=True, pre_exec=None, close_fds=True, unshare_net=False,
+ unshare_ipc=False, cgroup=None):
"""
Spawns a given command.
@@ -175,6 +208,7 @@ def spawn(mycommand, env={}, opt_name=None, fd_pipes=None, returnpid=False,
@param opt_name: an optional name for the spawn'd process (defaults to the binary name)
@type opt_name: String
@param fd_pipes: A dict of mapping for pipes, { '0': stdin, '1': stdout } for example
+ (default is {0:stdin, 1:stdout, 2:stderr})
@type fd_pipes: Dictionary
@param returnpid: Return the Process IDs for a successful spawn.
NOTE: This requires the caller clean up all the PIDs, otherwise spawn will clean them.
@@ -193,7 +227,16 @@ def spawn(mycommand, env={}, opt_name=None, fd_pipes=None, returnpid=False,
@type path_lookup: Boolean
@param pre_exec: A function to be called with no arguments just prior to the exec call.
@type pre_exec: callable
-
+ @param close_fds: If True, then close all file descriptors except those
+ referenced by fd_pipes (default is True).
+ @type close_fds: Boolean
+ @param unshare_net: If True, networking will be unshared from the spawned process
+ @type unshare_net: Boolean
+ @param unshare_ipc: If True, IPC will be unshared from the spawned process
+ @type unshare_ipc: Boolean
+ @param cgroup: CGroup path to bind the process to
+ @type cgroup: String
+
logfile requires stdout and stderr to be assigned to this process (ie not pointed
somewhere else.)
@@ -226,9 +269,9 @@ def spawn(mycommand, env={}, opt_name=None, fd_pipes=None, returnpid=False,
# default to propagating our stdin, stdout and stderr.
if fd_pipes is None:
fd_pipes = {
- 0:sys.stdin.fileno(),
- 1:sys.stdout.fileno(),
- 2:sys.stderr.fileno(),
+ 0:portage._get_stdin().fileno(),
+ 1:sys.__stdout__.fileno(),
+ 2:sys.__stderr__.fileno(),
}
# mypids will hold the pids of all processes created.
@@ -256,21 +299,40 @@ def spawn(mycommand, env={}, opt_name=None, fd_pipes=None, returnpid=False,
fd_pipes[1] = pw
fd_pipes[2] = pw
- pid = os.fork()
+ # This caches the libc library lookup in the current
+ # process, so that it's only done once rather than
+ # for each child process.
+ if unshare_net or unshare_ipc:
+ find_library("c")
- if pid == 0:
- try:
- _exec(binary, mycommand, opt_name, fd_pipes,
- env, gid, groups, uid, umask, pre_exec)
- except SystemExit:
- raise
- except Exception as e:
- # We need to catch _any_ exception so that it doesn't
- # propagate out of this function and cause exiting
- # with anything other than os._exit()
- sys.stderr.write("%s:\n %s\n" % (e, " ".join(mycommand)))
- traceback.print_exc()
- sys.stderr.flush()
+ parent_pid = os.getpid()
+ pid = None
+ try:
+ pid = os.fork()
+
+ if pid == 0:
+ try:
+ _exec(binary, mycommand, opt_name, fd_pipes,
+ env, gid, groups, uid, umask, pre_exec, close_fds,
+ unshare_net, unshare_ipc, cgroup)
+ except SystemExit:
+ raise
+ except Exception as e:
+ # We need to catch _any_ exception so that it doesn't
+ # propagate out of this function and cause exiting
+ # with anything other than os._exit()
+ writemsg("%s:\n %s\n" % (e, " ".join(mycommand)),
+ noiselevel=-1)
+ traceback.print_exc()
+ sys.stderr.flush()
+
+ finally:
+ if pid == 0 or (pid is None and os.getpid() != parent_pid):
+ # Call os._exit() from a finally block in order
+ # to suppress any finally blocks from earlier
+ # in the call stack (see bug #345289). This
+ # finally block has to be setup before the fork
+ # in order to avoid a race condition.
os._exit(1)
if not isinstance(pid, int):
@@ -278,7 +340,6 @@ def spawn(mycommand, env={}, opt_name=None, fd_pipes=None, returnpid=False,
# Add the pid to our local and the global pid lists.
mypids.append(pid)
- spawned_pids.append(pid)
# If we started a tee process the write side of the pipe is no
# longer needed, so close it.
@@ -301,10 +362,6 @@ def spawn(mycommand, env={}, opt_name=None, fd_pipes=None, returnpid=False,
# and wait for it.
retval = os.waitpid(pid, 0)[1]
- # When it's done, we can remove it from the
- # global pid list as well.
- spawned_pids.remove(pid)
-
if retval:
# If it failed, kill off anything else that
# isn't dead yet.
@@ -315,7 +372,6 @@ def spawn(mycommand, env={}, opt_name=None, fd_pipes=None, returnpid=False,
if os.waitpid(pid, os.WNOHANG)[0] == 0:
os.kill(pid, signal.SIGTERM)
os.waitpid(pid, 0)
- spawned_pids.remove(pid)
# If it got a signal, return the signal that was sent.
if (retval & 0xff):
@@ -328,7 +384,7 @@ def spawn(mycommand, env={}, opt_name=None, fd_pipes=None, returnpid=False,
return 0
def _exec(binary, mycommand, opt_name, fd_pipes, env, gid, groups, uid, umask,
- pre_exec):
+ pre_exec, close_fds, unshare_net, unshare_ipc, cgroup):
"""
Execute a given binary with options
@@ -353,10 +409,16 @@ def _exec(binary, mycommand, opt_name, fd_pipes, env, gid, groups, uid, umask,
@type umask: Integer
@param pre_exec: A function to be called with no arguments just prior to the exec call.
@type pre_exec: callable
+ @param unshare_net: If True, networking will be unshared from the spawned process
+ @type unshare_net: Boolean
+ @param unshare_ipc: If True, IPC will be unshared from the spawned process
+ @type unshare_ipc: Boolean
+ @param cgroup: CGroup path to bind the process to
+ @type cgroup: String
@rtype: None
@return: Never returns (calls os.execve)
"""
-
+
# If the process we're creating hasn't been given a name
# assign it the name of the executable.
if not opt_name:
@@ -371,6 +433,10 @@ def _exec(binary, mycommand, opt_name, fd_pipes, env, gid, groups, uid, umask,
myargs = [opt_name]
myargs.extend(mycommand[1:])
+ # Avoid a potential UnicodeEncodeError from os.execve().
+ myargs = [_unicode_encode(x, encoding=_encodings['fs'],
+ errors='strict') for x in myargs]
+
# Use default signal handlers in order to avoid problems
# killing subprocesses as reported in bug #353239.
signal.signal(signal.SIGINT, signal.SIG_DFL)
@@ -383,15 +449,63 @@ def _exec(binary, mycommand, opt_name, fd_pipes, env, gid, groups, uid, umask,
# the parent process (see bug #289486).
signal.signal(signal.SIGQUIT, signal.SIG_DFL)
- _setup_pipes(fd_pipes)
+ _setup_pipes(fd_pipes, close_fds=close_fds, inheritable=True)
+
+ # Add to cgroup
+ # it's better to do it from the child since we can guarantee
+ # it is done before we start forking children
+ if cgroup:
+ with open(os.path.join(cgroup, 'cgroup.procs'), 'a') as f:
+ f.write('%d\n' % os.getpid())
+
+ # Unshare (while still uid==0)
+ if unshare_net or unshare_ipc:
+ filename = find_library("c")
+ if filename is not None:
+ libc = LoadLibrary(filename)
+ if libc is not None:
+ CLONE_NEWIPC = 0x08000000
+ CLONE_NEWNET = 0x40000000
+
+ flags = 0
+ if unshare_net:
+ flags |= CLONE_NEWNET
+ if unshare_ipc:
+ flags |= CLONE_NEWIPC
+
+ try:
+ if libc.unshare(flags) != 0:
+ writemsg("Unable to unshare: %s\n" % (
+ errno.errorcode.get(ctypes.get_errno(), '?')),
+ noiselevel=-1)
+ else:
+ if unshare_net:
+ # 'up' the loopback
+ IFF_UP = 0x1
+ ifreq = struct.pack('16sh', b'lo', IFF_UP)
+ SIOCSIFFLAGS = 0x8914
+
+ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
+ try:
+ fcntl.ioctl(sock, SIOCSIFFLAGS, ifreq)
+ except IOError as e:
+ writemsg("Unable to enable loopback interface: %s\n" % (
+ errno.errorcode.get(e.errno, '?')),
+ noiselevel=-1)
+ sock.close()
+ except AttributeError:
+ # unshare() not supported by libc
+ pass
# Set requested process permissions.
if gid:
- os.setgid(gid)
+ # Cast proxies to int, in case it matters.
+ os.setgid(int(gid))
if groups:
os.setgroups(groups)
if uid:
- os.setuid(uid)
+ # Cast proxies to int, in case it matters.
+ os.setuid(int(uid))
if umask:
os.umask(umask)
if pre_exec:
@@ -400,9 +514,16 @@ def _exec(binary, mycommand, opt_name, fd_pipes, env, gid, groups, uid, umask,
# And switch to the new process.
os.execve(binary, myargs, env)
-def _setup_pipes(fd_pipes, close_fds=True):
+def _setup_pipes(fd_pipes, close_fds=True, inheritable=None):
"""Setup pipes for a forked process.
+ Even when close_fds is False, file descriptors referenced as
+ values in fd_pipes are automatically closed if they do not also
+ occur as keys in fd_pipes. It is assumed that the caller will
+ explicitly add them to the fd_pipes keys if they are intended
+ to remain open. This allows for convenient elimination of
+ unnecessary duplicate file descriptors.
+
WARNING: When not followed by exec, the close_fds behavior
can trigger interference from destructors that close file
descriptors. This interference happens when the garbage
@@ -413,22 +534,92 @@ def _setup_pipes(fd_pipes, close_fds=True):
and also with CPython under some circumstances (as triggered
by xmpppy in bug #374335). In order to close a safe subset of
file descriptors, see portage.locks._close_fds().
+
+ NOTE: When not followed by exec, even when close_fds is False,
+ it's still possible for dup2() calls to cause interference in a
+ way that's similar to the way that close_fds interferes (since
+ dup2() has to close the target fd if it happens to be open).
+ It's possible to avoid such interference by using allocated
+ file descriptors as the keys in fd_pipes. For example:
+
+ pr, pw = os.pipe()
+ fd_pipes[pw] = pw
+
+ By using the allocated pw file descriptor as the key in fd_pipes,
+ it's not necessary for dup2() to close a file descriptor (it
+ actually does nothing in this case), which avoids possible
+ interference.
"""
- my_fds = {}
+
+ reverse_map = {}
# To protect from cases where direct assignment could
- # clobber needed fds ({1:2, 2:1}) we first dupe the fds
- # into unused fds.
- for fd in fd_pipes:
- my_fds[fd] = os.dup(fd_pipes[fd])
- # Then assign them to what they should be.
- for fd in my_fds:
- os.dup2(my_fds[fd], fd)
+ # clobber needed fds ({1:2, 2:1}) we create a reverse map
+ # in order to know when it's necessary to create temporary
+ # backup copies with os.dup().
+ for newfd, oldfd in fd_pipes.items():
+ newfds = reverse_map.get(oldfd)
+ if newfds is None:
+ newfds = []
+ reverse_map[oldfd] = newfds
+ newfds.append(newfd)
+
+ # Assign newfds via dup2(), making temporary backups when
+ # necessary, and closing oldfd if the caller has not
+ # explicitly requested for it to remain open by adding
+ # it to the keys of fd_pipes.
+ while reverse_map:
+
+ oldfd, newfds = reverse_map.popitem()
+ old_fdflags = None
+
+ for newfd in newfds:
+ if newfd in reverse_map:
+ # Make a temporary backup before re-assignment, assuming
+ # that backup_fd won't collide with a key in reverse_map
+ # (since all of the keys correspond to open file
+ # descriptors, and os.dup() only allocates a previously
+ # unused file discriptors).
+ backup_fd = os.dup(newfd)
+ reverse_map[backup_fd] = reverse_map.pop(newfd)
+
+ if oldfd != newfd:
+ os.dup2(oldfd, newfd)
+ if _set_inheritable is not None:
+ # Don't do this unless _set_inheritable is available,
+ # since it's used below to ensure correct state, and
+ # otherwise /dev/null stdin fails to inherit (at least
+ # with Python versions from 3.1 to 3.3).
+ if old_fdflags is None:
+ old_fdflags = fcntl.fcntl(oldfd, fcntl.F_GETFD)
+ fcntl.fcntl(newfd, fcntl.F_SETFD, old_fdflags)
+
+ if _set_inheritable is not None:
+
+ inheritable_state = None
+ if not (old_fdflags is None or _FD_CLOEXEC is None):
+ inheritable_state = not bool(old_fdflags & _FD_CLOEXEC)
+
+ if inheritable is not None:
+ if inheritable_state is not inheritable:
+ _set_inheritable(newfd, inheritable)
+
+ elif newfd in (0, 1, 2):
+ if inheritable_state is not True:
+ _set_inheritable(newfd, True)
+
+ if oldfd not in fd_pipes:
+ # If oldfd is not a key in fd_pipes, then it's safe
+ # to close now, since we've already made all of the
+ # requested duplicates. This also closes every
+ # backup_fd that may have been created on previous
+ # iterations of this loop.
+ os.close(oldfd)
if close_fds:
# Then close _all_ fds that haven't been explicitly
# requested to be kept open.
for fd in get_open_fds():
- if fd not in my_fds:
+ if fd not in fd_pipes:
try:
os.close(fd)
except OSError:
@@ -443,8 +634,16 @@ def find_binary(binary):
@rtype: None or string
@return: full path to binary or None if the binary could not be located.
"""
- for path in os.environ.get("PATH", "").split(":"):
- filename = "%s/%s" % (path, binary)
- if os.access(filename, os.X_OK) and os.path.isfile(filename):
+ paths = os.environ.get("PATH", "")
+ if sys.hexversion >= 0x3000000 and isinstance(binary, bytes):
+ # return bytes when input is bytes
+ paths = paths.encode(sys.getfilesystemencoding(), 'surrogateescape')
+ paths = paths.split(b':')
+ else:
+ paths = paths.split(':')
+
+ for path in paths:
+ filename = _os.path.join(path, binary)
+ if _os.access(filename, os.X_OK) and _os.path.isfile(filename):
return filename
return None
diff --git a/pym/portage/proxy/lazyimport.py b/pym/portage/proxy/lazyimport.py
index ad4a54271..5aa7e50ca 100644
--- a/pym/portage/proxy/lazyimport.py
+++ b/pym/portage/proxy/lazyimport.py
@@ -1,4 +1,4 @@
-# Copyright 2009 Gentoo Foundation
+# Copyright 2009-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
__all__ = ['lazyimport']
@@ -14,6 +14,7 @@ except ImportError:
from portage.proxy.objectproxy import ObjectProxy
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
_module_proxies = {}
@@ -32,7 +33,7 @@ def _preload_portage_submodules():
while True:
remaining = False
for name in list(_module_proxies):
- if name.startswith('portage.'):
+ if name.startswith('portage.') or name.startswith('_emerge.'):
if name in imported:
continue
imported.add(name)
diff --git a/pym/portage/proxy/objectproxy.py b/pym/portage/proxy/objectproxy.py
index 92b36d111..a755774ae 100644
--- a/pym/portage/proxy/objectproxy.py
+++ b/pym/portage/proxy/objectproxy.py
@@ -1,4 +1,4 @@
-# Copyright 2008-2009 Gentoo Foundation
+# Copyright 2008-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import sys
@@ -30,6 +30,13 @@ class ObjectProxy(object):
result = object.__getattribute__(self, '_get_target')()
return result(*args, **kwargs)
+ def __enter__(self):
+ return object.__getattribute__(self, '_get_target')().__enter__()
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ return object.__getattribute__(self, '_get_target')().__exit__(
+ exc_type, exc_value, traceback)
+
def __setitem__(self, key, value):
object.__getattribute__(self, '_get_target')()[key] = value
diff --git a/pym/portage/repository/config.py b/pym/portage/repository/config.py
index 9b43f3872..5e0d05523 100644
--- a/pym/portage/repository/config.py
+++ b/pym/portage/repository/config.py
@@ -1,6 +1,8 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
import io
import logging
import warnings
@@ -8,25 +10,35 @@ import sys
import re
try:
- from configparser import ParsingError
+ from configparser import Error as ConfigParserError
if sys.hexversion >= 0x3020000:
from configparser import ConfigParser as SafeConfigParser
else:
from configparser import SafeConfigParser
except ImportError:
- from ConfigParser import SafeConfigParser, ParsingError
+ from ConfigParser import SafeConfigParser, Error as ConfigParserError
+import portage
from portage import eclass_cache, os
from portage.const import (MANIFEST2_HASH_FUNCTIONS, MANIFEST2_REQUIRED_HASH,
- REPO_NAME_LOC, USER_CONFIG_PATH)
+ PORTAGE_BASE_PATH, REPO_NAME_LOC, USER_CONFIG_PATH)
+from portage.eapi import eapi_allows_directories_on_profile_level_and_repository_level
from portage.env.loaders import KeyValuePairFileLoader
from portage.util import (normalize_path, read_corresponding_eapi_file, shlex_split,
- stack_lists, writemsg, writemsg_level)
+ stack_lists, writemsg, writemsg_level, _recursive_file_list)
+from portage.util._path import exists_raise_eaccess, isdir_raise_eaccess
from portage.localization import _
from portage import _unicode_decode
from portage import _unicode_encode
from portage import _encodings
from portage import manifest
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+# Characters prohibited by repoman's file.name check.
+_invalid_path_char_re = re.compile(r'[^a-zA-Z0-9._\-+:/]')
+
_valid_profile_formats = frozenset(
['pms', 'portage-1', 'portage-2'])
@@ -48,38 +60,76 @@ def _gen_valid_repo(name):
name = None
return name
+def _find_invalid_path_char(path, pos=0, endpos=None):
+ """
+ Returns the position of the first invalid character found in basename,
+ or -1 if no invalid characters are found.
+ """
+ if endpos is None:
+ endpos = len(path)
+
+ m = _invalid_path_char_re.search(path, pos=pos, endpos=endpos)
+ if m is not None:
+ return m.start()
+
+ return -1
+
class RepoConfig(object):
"""Stores config of one repository"""
__slots__ = ('aliases', 'allow_missing_manifest', 'allow_provide_virtual',
'cache_formats', 'create_manifest', 'disable_manifest', 'eapi',
- 'eclass_db', 'eclass_locations', 'eclass_overrides', 'format', 'location',
+ 'eclass_db', 'eclass_locations', 'eclass_overrides',
+ 'find_invalid_path_char', 'force', 'format', 'local_config', 'location',
'main_repo', 'manifest_hashes', 'masters', 'missing_repo_name',
'name', 'portage1_profiles', 'portage1_profiles_compat', 'priority',
- 'profile_formats', 'sign_commit', 'sign_manifest', 'sync',
- 'thin_manifest', 'update_changelog', 'user_location')
+ 'profile_formats', 'sign_commit', 'sign_manifest', 'sync_cvs_repo',
+ 'sync_type', 'sync_uri', 'thin_manifest', 'update_changelog',
+ 'user_location', '_eapis_banned', '_eapis_deprecated', '_masters_orig')
- def __init__(self, name, repo_opts):
+ def __init__(self, name, repo_opts, local_config=True):
"""Build a RepoConfig with options in repo_opts
Try to read repo_name in repository location, but if
it is not found use variable name as repository name"""
- aliases = repo_opts.get('aliases')
- if aliases is not None:
- aliases = tuple(aliases.split())
+
+ force = repo_opts.get('force')
+ if force is not None:
+ force = tuple(force.split())
+ self.force = force
+ if force is None:
+ force = ()
+
+ self.local_config = local_config
+
+ if local_config or 'aliases' in force:
+ aliases = repo_opts.get('aliases')
+ if aliases is not None:
+ aliases = tuple(aliases.split())
+ else:
+ aliases = None
+
self.aliases = aliases
- eclass_overrides = repo_opts.get('eclass-overrides')
- if eclass_overrides is not None:
- eclass_overrides = tuple(eclass_overrides.split())
+ if local_config or 'eclass-overrides' in force:
+ eclass_overrides = repo_opts.get('eclass-overrides')
+ if eclass_overrides is not None:
+ eclass_overrides = tuple(eclass_overrides.split())
+ else:
+ eclass_overrides = None
+
self.eclass_overrides = eclass_overrides
# Eclass databases and locations are computed later.
self.eclass_db = None
self.eclass_locations = None
- # Masters from repos.conf override layout.conf.
- masters = repo_opts.get('masters')
- if masters is not None:
- masters = tuple(masters.split())
+ if local_config or 'masters' in force:
+ # Masters from repos.conf override layout.conf.
+ masters = repo_opts.get('masters')
+ if masters is not None:
+ masters = tuple(masters.split())
+ else:
+ masters = None
+
self.masters = masters
#The main-repo key makes only sense for the 'DEFAULT' section.
@@ -93,11 +143,22 @@ class RepoConfig(object):
priority = None
self.priority = priority
- sync = repo_opts.get('sync')
- if sync is not None:
- sync = sync.strip()
- self.sync = sync
+ sync_cvs_repo = repo_opts.get('sync-cvs-repo')
+ if sync_cvs_repo is not None:
+ sync_cvs_repo = sync_cvs_repo.strip()
+ self.sync_cvs_repo = sync_cvs_repo or None
+
+ sync_type = repo_opts.get('sync-type')
+ if sync_type is not None:
+ sync_type = sync_type.strip()
+ self.sync_type = sync_type or None
+ sync_uri = repo_opts.get('sync-uri')
+ if sync_uri is not None:
+ sync_uri = sync_uri.strip()
+ self.sync_uri = sync_uri or None
+
+ # Not implemented.
format = repo_opts.get('format')
if format is not None:
format = format.strip()
@@ -106,7 +167,7 @@ class RepoConfig(object):
location = repo_opts.get('location')
self.user_location = location
if location is not None and location.strip():
- if os.path.isdir(location):
+ if os.path.isdir(location) or portage._sync_mode:
location = os.path.realpath(location)
else:
location = None
@@ -114,14 +175,23 @@ class RepoConfig(object):
eapi = None
missing = True
+ self.name = name
if self.location is not None:
eapi = read_corresponding_eapi_file(os.path.join(self.location, REPO_NAME_LOC))
- name, missing = self._read_valid_repo_name(self.location)
- elif name == "DEFAULT":
+ self.name, missing = self._read_valid_repo_name(self.location)
+ if missing:
+ # The name from repos.conf has to be used here for
+ # things like emerge-webrsync to work when the repo
+ # is empty (bug #484950).
+ if name is not None:
+ self.name = name
+ if portage._sync_mode:
+ missing = False
+
+ elif name == "DEFAULT":
missing = False
self.eapi = eapi
- self.name = name
self.missing_repo_name = missing
# sign_commit is disabled by default, since it requires Git >=1.7.9,
# and key_id configured by `git config user.signingkey key_id`
@@ -137,18 +207,20 @@ class RepoConfig(object):
self.cache_formats = None
self.portage1_profiles = True
self.portage1_profiles_compat = False
+ self.find_invalid_path_char = _find_invalid_path_char
+ self._masters_orig = None
# Parse layout.conf.
if self.location:
- layout_filename = os.path.join(self.location, "metadata", "layout.conf")
layout_data = parse_layout_conf(self.location, self.name)[0]
+ self._masters_orig = layout_data['masters']
# layout.conf masters may be overridden here if we have a masters
# setting from the user's repos.conf
if self.masters is None:
self.masters = layout_data['masters']
- if layout_data['aliases']:
+ if (local_config or 'aliases' in force) and layout_data['aliases']:
aliases = self.aliases
if aliases is None:
aliases = ()
@@ -156,6 +228,12 @@ class RepoConfig(object):
# them the ability to do incremental overrides
self.aliases = layout_data['aliases'] + tuple(aliases)
+ if layout_data['repo-name']:
+ # allow layout.conf to override repository name
+ # useful when having two copies of the same repo enabled
+ # to avoid modifying profiles/repo_name in one of them
+ self.name = layout_data['repo-name']
+
for value in ('allow-missing-manifest',
'allow-provide-virtual', 'cache-formats',
'create-manifest', 'disable-manifest', 'manifest-hashes',
@@ -163,9 +241,19 @@ class RepoConfig(object):
'sign-commit', 'sign-manifest', 'thin-manifest', 'update-changelog'):
setattr(self, value.lower().replace("-", "_"), layout_data[value])
- self.portage1_profiles = any(x in _portage1_profiles_allow_directories
- for x in layout_data['profile-formats'])
- self.portage1_profiles_compat = layout_data['profile-formats'] == ('portage-1-compat',)
+ self.portage1_profiles = eapi_allows_directories_on_profile_level_and_repository_level(eapi) or \
+ any(x in _portage1_profiles_allow_directories for x in layout_data['profile-formats'])
+ self.portage1_profiles_compat = not eapi_allows_directories_on_profile_level_and_repository_level(eapi) and \
+ layout_data['profile-formats'] == ('portage-1-compat',)
+
+ self._eapis_banned = frozenset(layout_data['eapis-banned'])
+ self._eapis_deprecated = frozenset(layout_data['eapis-deprecated'])
+
+ def eapi_is_banned(self, eapi):
+ return eapi in self._eapis_banned
+
+ def eapi_is_deprecated(self, eapi):
+ return eapi in self._eapis_deprecated
def iter_pregenerated_caches(self, auxdbkeys, readonly=True, force=False):
"""
@@ -178,7 +266,11 @@ class RepoConfig(object):
if not formats:
if not force:
return
- formats = ('pms',)
+ # The default egencache format was 'pms' prior to portage-2.1.11.32
+ # (portage versions prior to portage-2.1.11.14 will NOT
+ # recognize md5-dict format unless it is explicitly listed in
+ # layout.conf).
+ formats = ('md5-dict',)
for fmt in formats:
name = None
@@ -209,7 +301,8 @@ class RepoConfig(object):
kwds['hashes'] = self.manifest_hashes
if self.disable_manifest:
kwds['from_scratch'] = True
- return manifest.Manifest(*args, **kwds)
+ kwds['find_invalid_path_char'] = self.find_invalid_path_char
+ return manifest.Manifest(*args, **portage._native_kwargs(kwds))
def update(self, new_repo):
"""Update repository with options in another RepoConfig"""
@@ -272,8 +365,12 @@ class RepoConfig(object):
repo_msg.append(indent + "format: " + self.format)
if self.user_location:
repo_msg.append(indent + "location: " + self.user_location)
- if self.sync:
- repo_msg.append(indent + "sync: " + self.sync)
+ if self.sync_cvs_repo:
+ repo_msg.append(indent + "sync-cvs-repo: " + self.sync_cvs_repo)
+ if self.sync_type:
+ repo_msg.append(indent + "sync-type: " + self.sync_type)
+ if self.sync_uri:
+ repo_msg.append(indent + "sync-uri: " + self.sync_uri)
if self.masters:
repo_msg.append(indent + "masters: " + " ".join(master.name for master in self.masters))
if self.priority is not None:
@@ -281,19 +378,19 @@ class RepoConfig(object):
if self.aliases:
repo_msg.append(indent + "aliases: " + " ".join(self.aliases))
if self.eclass_overrides:
- repo_msg.append(indent + "eclass_overrides: " + \
+ repo_msg.append(indent + "eclass-overrides: " + \
" ".join(self.eclass_overrides))
repo_msg.append("")
return "\n".join(repo_msg)
def __repr__(self):
- return "<portage.repository.config.RepoConfig(name='%s', location='%s')>" % (self.name, _unicode_decode(self.location))
+ return "<portage.repository.config.RepoConfig(name=%r, location=%r)>" % (self.name, _unicode_decode(self.location))
def __str__(self):
d = {}
for k in self.__slots__:
d[k] = getattr(self, k, None)
- return _unicode_decode("%s") % (d,)
+ return "%s" % (d,)
if sys.hexversion < 0x3000000:
@@ -306,11 +403,14 @@ class RepoConfigLoader(object):
"""Loads and store config of several repositories, loaded from PORTDIR_OVERLAY or repos.conf"""
@staticmethod
- def _add_repositories(portdir, portdir_overlay, prepos, ignored_map, ignored_location_map):
+ def _add_repositories(portdir, portdir_overlay, prepos,
+ ignored_map, ignored_location_map, local_config, default_portdir):
"""Add overlays in PORTDIR_OVERLAY as repositories"""
overlays = []
+ portdir_orig = None
if portdir:
portdir = normalize_path(portdir)
+ portdir_orig = portdir
overlays.append(portdir)
try:
port_ov = [normalize_path(i) for i in shlex_split(portdir_overlay)]
@@ -344,43 +444,57 @@ class RepoConfigLoader(object):
#overlay priority is negative because we want them to be looked before any other repo
base_priority = 0
for ov in overlays:
- if os.path.isdir(ov):
+ # Ignore missing directory for 'gentoo' so that
+ # first sync with emerge-webrsync is possible.
+ if isdir_raise_eaccess(ov) or \
+ (base_priority == 0 and ov is portdir):
repo_opts = default_repo_opts.copy()
repo_opts['location'] = ov
- repo = RepoConfig(None, repo_opts)
+ repo = RepoConfig(None, repo_opts, local_config=local_config)
# repos_conf_opts contains options from repos.conf
repos_conf_opts = repos_conf.get(repo.name)
if repos_conf_opts is not None:
# Selectively copy only the attributes which
# repos.conf is allowed to override.
- for k in ('aliases', 'eclass_overrides', 'masters', 'priority'):
+ for k in ('aliases', 'eclass_overrides', 'force', 'masters',
+ 'priority', 'sync_cvs_repo', 'sync_type', 'sync_uri'):
v = getattr(repos_conf_opts, k, None)
if v is not None:
setattr(repo, k, v)
if repo.name in prepos:
+ # Silently ignore when PORTDIR overrides the location
+ # setting from the default repos.conf (bug #478544).
old_location = prepos[repo.name].location
- if old_location is not None and old_location != repo.location:
+ if old_location is not None and \
+ old_location != repo.location and \
+ not (base_priority == 0 and
+ old_location == default_portdir):
ignored_map.setdefault(repo.name, []).append(old_location)
ignored_location_map[old_location] = repo.name
if old_location == portdir:
portdir = repo.user_location
- if ov == portdir and portdir not in port_ov:
- repo.priority = -1000
- elif repo.priority is None:
- repo.priority = base_priority
- base_priority += 1
+ if repo.priority is None:
+ if base_priority == 0 and ov == portdir_orig:
+ # If it's the original PORTDIR setting and it's not
+ # in PORTDIR_OVERLAY, then it will be assigned a
+ # special priority setting later.
+ pass
+ else:
+ repo.priority = base_priority
+ base_priority += 1
prepos[repo.name] = repo
else:
- writemsg(_("!!! Invalid PORTDIR_OVERLAY"
- " (not a dir): '%s'\n") % ov, noiselevel=-1)
+
+ if not portage._sync_mode:
+ writemsg(_("!!! Invalid PORTDIR_OVERLAY (not a dir): '%s'\n") % ov, noiselevel=-1)
return portdir
@staticmethod
- def _parse(paths, prepos, ignored_map, ignored_location_map):
+ def _parse(paths, prepos, ignored_map, ignored_location_map, local_config, portdir):
"""Parse files in paths to load config"""
parser = SafeConfigParser()
@@ -388,49 +502,78 @@ class RepoConfigLoader(object):
try:
# Python >=3.2
read_file = parser.read_file
+ source_kwarg = 'source'
except AttributeError:
read_file = parser.readfp
+ source_kwarg = 'filename'
+ recursive_paths = []
for p in paths:
- f = None
- try:
- f = io.open(_unicode_encode(p,
- encoding=_encodings['fs'], errors='strict'),
- mode='r', encoding=_encodings['repo.content'],
- errors='replace')
- except EnvironmentError:
- pass
+ if isinstance(p, basestring):
+ recursive_paths.extend(_recursive_file_list(p))
else:
+ recursive_paths.append(p)
+
+ for p in recursive_paths:
+ if isinstance(p, basestring):
+ f = None
try:
- read_file(f)
- except ParsingError as e:
- writemsg(_unicode_decode(
- _("!!! Error while reading repo config file: %s\n")
- ) % e, noiselevel=-1)
- finally:
- if f is not None:
- f.close()
-
- prepos['DEFAULT'] = RepoConfig("DEFAULT", parser.defaults())
+ f = io.open(_unicode_encode(p,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace')
+ except EnvironmentError:
+ pass
+ else:
+ # The 'source' keyword argument is needed since otherwise
+ # ConfigParser in Python <3.3.3 may throw a TypeError
+ # because it assumes that f.name is a native string rather
+ # than binary when constructing error messages.
+ kwargs = {source_kwarg: p}
+ read_file(f, **portage._native_kwargs(kwargs))
+ finally:
+ if f is not None:
+ f.close()
+ elif isinstance(p, io.StringIO):
+ kwargs = {source_kwarg: "<io.StringIO>"}
+ read_file(p, **portage._native_kwargs(kwargs))
+ else:
+ raise TypeError("Unsupported type %r of element %r of 'paths' argument" % (type(p), p))
+
+ prepos['DEFAULT'] = RepoConfig("DEFAULT",
+ parser.defaults(), local_config=local_config)
+
for sname in parser.sections():
optdict = {}
for oname in parser.options(sname):
optdict[oname] = parser.get(sname, oname)
- repo = RepoConfig(sname, optdict)
- if repo.location and not os.path.exists(repo.location):
- writemsg(_("!!! Invalid repos.conf entry '%s'"
- " (not a dir): '%s'\n") % (sname, repo.location), noiselevel=-1)
+ repo = RepoConfig(sname, optdict, local_config=local_config)
+
+ if repo.sync_type is not None and repo.sync_uri is None:
+ writemsg_level("!!! %s\n" % _("Repository '%s' has sync-type attribute, but is missing sync-uri attribute") %
+ sname, level=logging.ERROR, noiselevel=-1)
continue
- if repo.name in prepos:
- old_location = prepos[repo.name].location
- if old_location is not None and repo.location is not None and old_location != repo.location:
- ignored_map.setdefault(repo.name, []).append(old_location)
- ignored_location_map[old_location] = repo.name
- prepos[repo.name].update(repo)
- else:
- prepos[repo.name] = repo
+ if repo.sync_uri is not None and repo.sync_type is None:
+ writemsg_level("!!! %s\n" % _("Repository '%s' has sync-uri attribute, but is missing sync-type attribute") %
+ sname, level=logging.ERROR, noiselevel=-1)
+ continue
+
+ if repo.sync_type not in (None, "cvs", "git", "rsync"):
+ writemsg_level("!!! %s\n" % _("Repository '%s' has sync-type attribute set to unsupported value: '%s'") %
+ (sname, repo.sync_type), level=logging.ERROR, noiselevel=-1)
+ continue
+
+ if repo.sync_type == "cvs" and repo.sync_cvs_repo is None:
+ writemsg_level("!!! %s\n" % _("Repository '%s' has sync-type=cvs, but is missing sync-cvs-repo attribute") %
+ sname, level=logging.ERROR, noiselevel=-1)
+ continue
+
+ # For backward compatibility with locations set via PORTDIR and
+ # PORTDIR_OVERLAY, delay validation of the location and repo.name
+ # until after PORTDIR and PORTDIR_OVERLAY have been processed.
+ prepos[sname] = repo
def __init__(self, paths, settings):
"""Load config from files in paths"""
@@ -441,15 +584,42 @@ class RepoConfigLoader(object):
ignored_map = {}
ignored_location_map = {}
- portdir = settings.get('PORTDIR', '')
- portdir_overlay = settings.get('PORTDIR_OVERLAY', '')
+ if "PORTAGE_REPOSITORIES" in settings:
+ portdir = ""
+ portdir_overlay = ""
+ portdir_sync = ""
+ else:
+ portdir = settings.get("PORTDIR", "")
+ portdir_overlay = settings.get("PORTDIR_OVERLAY", "")
+ portdir_sync = settings.get("SYNC", "")
- self._parse(paths, prepos, ignored_map, ignored_location_map)
+ try:
+ self._parse(paths, prepos, ignored_map,
+ ignored_location_map, settings.local_config,
+ portdir)
+ except ConfigParserError as e:
+ writemsg(
+ _("!!! Error while reading repo config file: %s\n") % e,
+ noiselevel=-1)
+ # The configparser state is unreliable (prone to quirky
+ # exceptions) after it has thrown an error, so use empty
+ # config and try to fall back to PORTDIR{,_OVERLAY}.
+ prepos.clear()
+ prepos['DEFAULT'] = RepoConfig('DEFAULT',
+ {}, local_config=settings.local_config)
+ location_map.clear()
+ treemap.clear()
+ ignored_map.clear()
+ ignored_location_map.clear()
+
+ default_portdir = os.path.join(os.sep,
+ settings['EPREFIX'].lstrip(os.sep), 'usr', 'portage')
# If PORTDIR_OVERLAY contains a repo with the same repo_name as
# PORTDIR, then PORTDIR is overridden.
portdir = self._add_repositories(portdir, portdir_overlay, prepos,
- ignored_map, ignored_location_map)
+ ignored_map, ignored_location_map, settings.local_config,
+ default_portdir)
if portdir and portdir.strip():
portdir = os.path.realpath(portdir)
@@ -460,9 +630,51 @@ class RepoConfigLoader(object):
for repo in prepos.values()
if repo.location is not None and repo.missing_repo_name)
- #Take aliases into account.
- new_prepos = {}
- for repo_name, repo in prepos.items():
+ # Do this before expanding aliases, so that location_map and
+ # treemap consistently map unaliased names whenever available.
+ for repo_name, repo in list(prepos.items()):
+ if repo.location is None:
+ if repo_name != 'DEFAULT':
+ # Skip this warning for repoman (bug #474578).
+ if settings.local_config and paths:
+ writemsg_level("!!! %s\n" % _("Section '%s' in repos.conf is missing location attribute") %
+ repo.name, level=logging.ERROR, noiselevel=-1)
+ del prepos[repo_name]
+ continue
+ else:
+ if not portage._sync_mode:
+ if not isdir_raise_eaccess(repo.location):
+ writemsg_level("!!! %s\n" % _("Section '%s' in repos.conf has location attribute set "
+ "to nonexistent directory: '%s'") %
+ (repo_name, repo.location), level=logging.ERROR, noiselevel=-1)
+
+ # Ignore missing directory for 'gentoo' so that
+ # first sync with emerge-webrsync is possible.
+ if repo.name != 'gentoo':
+ del prepos[repo_name]
+ continue
+
+ # After removing support for PORTDIR_OVERLAY, the following check can be:
+ # if repo.missing_repo_name:
+ if repo.missing_repo_name and repo.name != repo_name:
+ writemsg_level("!!! %s\n" % _("Section '%s' in repos.conf refers to repository "
+ "without repository name set in '%s'") %
+ (repo_name, os.path.join(repo.location, REPO_NAME_LOC)), level=logging.ERROR, noiselevel=-1)
+ del prepos[repo_name]
+ continue
+
+ if repo.name != repo_name:
+ writemsg_level("!!! %s\n" % _("Section '%s' in repos.conf has name different "
+ "from repository name '%s' set inside repository") %
+ (repo_name, repo.name), level=logging.ERROR, noiselevel=-1)
+ del prepos[repo_name]
+ continue
+
+ location_map[repo.location] = repo_name
+ treemap[repo_name] = repo.location
+
+ # Add alias mappings, but never replace unaliased mappings.
+ for repo_name, repo in list(prepos.items()):
names = set()
names.add(repo_name)
if repo.aliases:
@@ -470,36 +682,55 @@ class RepoConfigLoader(object):
names.update(aliases)
for name in names:
- if name in new_prepos:
+ if name in prepos and prepos[name].location is not None:
+ if name == repo_name:
+ # unaliased names already handled earlier
+ continue
writemsg_level(_("!!! Repository name or alias '%s', " + \
"defined for repository '%s', overrides " + \
"existing alias or repository.\n") % (name, repo_name), level=logging.WARNING, noiselevel=-1)
- new_prepos[name] = repo
- prepos = new_prepos
+ # Never replace an unaliased mapping with
+ # an aliased mapping.
+ continue
+ prepos[name] = repo
+ if repo.location is not None:
+ if repo.location not in location_map:
+ # Never replace an unaliased mapping with
+ # an aliased mapping.
+ location_map[repo.location] = name
+ treemap[name] = repo.location
+
+ main_repo = prepos['DEFAULT'].main_repo
+ if main_repo is None or main_repo not in prepos:
+ #setting main_repo if it was not set in repos.conf
+ main_repo = location_map.get(portdir)
+ if main_repo is not None:
+ prepos['DEFAULT'].main_repo = main_repo
+ else:
+ prepos['DEFAULT'].main_repo = None
+ if portdir and not portage._sync_mode:
+ writemsg(_("!!! main-repo not set in DEFAULT and PORTDIR is empty.\n"), noiselevel=-1)
- for (name, r) in prepos.items():
- if r.location is not None:
- location_map[r.location] = name
- treemap[name] = r.location
+ if main_repo is not None and prepos[main_repo].priority is None:
+ # This happens if main-repo has been set in repos.conf.
+ prepos[main_repo].priority = -1000
- # filter duplicates from aliases, by only including
- # items where repo.name == key
+ # Backward compatible SYNC support for mirrorselect.
+ if portdir_sync and main_repo is not None:
+ if portdir_sync.startswith("rsync://"):
+ prepos[main_repo].sync_uri = portdir_sync
+ prepos[main_repo].sync_type = "rsync"
- prepos_order = sorted(prepos.items(), key=lambda r:r[1].priority or 0)
+ # Include repo.name in sort key, for predictable sorting
+ # even when priorities are equal.
+ prepos_order = sorted(prepos.items(),
+ key=lambda r:(r[1].priority or 0, r[1].name))
+ # filter duplicates from aliases, by only including
+ # items where repo.name == key
prepos_order = [repo.name for (key, repo) in prepos_order
- if repo.name == key and repo.location is not None]
-
- if prepos['DEFAULT'].main_repo is None or \
- prepos['DEFAULT'].main_repo not in prepos:
- #setting main_repo if it was not set in repos.conf
- if portdir in location_map:
- prepos['DEFAULT'].main_repo = location_map[portdir]
- elif portdir in ignored_location_map:
- prepos['DEFAULT'].main_repo = ignored_location_map[portdir]
- else:
- prepos['DEFAULT'].main_repo = None
- writemsg(_("!!! main-repo not set in DEFAULT and PORTDIR is empty. \n"), noiselevel=-1)
+ if repo.name == key and key != 'DEFAULT' and
+ repo.location is not None]
self.prepos = prepos
self.prepos_order = prepos_order
@@ -578,6 +809,18 @@ class RepoConfigLoader(object):
eclass_db.append(tree_db)
repo.eclass_db = eclass_db
+ for repo_name, repo in prepos.items():
+ if repo_name == "DEFAULT":
+ continue
+
+ if repo._masters_orig is None and self.mainRepo() and \
+ repo.name != self.mainRepo().name and not portage._sync_mode:
+ # TODO: Delete masters code in pym/portage/tests/resolver/ResolverPlayground.py when deleting this warning.
+ writemsg_level("!!! %s\n" % _("Repository '%s' is missing masters attribute in '%s'") %
+ (repo.name, os.path.join(repo.location, "metadata", "layout.conf")) +
+ "!!! %s\n" % _("Set 'masters = %s' in this file for future compatibility") %
+ self.mainRepo().name, level=logging.WARNING, noiselevel=-1)
+
self._prepos_changed = True
self._repo_location_list = []
@@ -613,10 +856,10 @@ class RepoConfigLoader(object):
def mainRepo(self):
"""Returns the main repo"""
- maid_repo = self.prepos['DEFAULT'].main_repo
- if maid_repo is None:
+ main_repo = self.prepos['DEFAULT'].main_repo
+ if main_repo is None:
return None
- return self.prepos[maid_repo]
+ return self.prepos[main_repo]
def _check_locations(self):
"""Check if repositories location are correct and show a warning message if not"""
@@ -625,7 +868,7 @@ class RepoConfigLoader(object):
if r.location is None:
writemsg(_("!!! Location not set for repository %s\n") % name, noiselevel=-1)
else:
- if not os.path.isdir(r.location):
+ if not isdir_raise_eaccess(r.location) and not portage._sync_mode:
self.prepos_order.remove(name)
writemsg(_("!!! Invalid Repository Location"
" (not a dir): '%s'\n") % r.location, noiselevel=-1)
@@ -650,19 +893,66 @@ class RepoConfigLoader(object):
def get_repo_for_location(self, location):
return self.prepos[self.get_name_for_location(location)]
+ def __setitem__(self, repo_name, repo):
+ # self.prepos[repo_name] = repo
+ raise NotImplementedError
+
def __getitem__(self, repo_name):
return self.prepos[repo_name]
+ def __delitem__(self, repo_name):
+ if repo_name == self.prepos['DEFAULT'].main_repo:
+ self.prepos['DEFAULT'].main_repo = None
+ location = self.prepos[repo_name].location
+ del self.prepos[repo_name]
+ if repo_name in self.prepos_order:
+ self.prepos_order.remove(repo_name)
+ for k, v in self.location_map.copy().items():
+ if v == repo_name:
+ del self.location_map[k]
+ if repo_name in self.treemap:
+ del self.treemap[repo_name]
+ self._repo_location_list = tuple(x for x in self._repo_location_list if x != location)
+
def __iter__(self):
for repo_name in self.prepos_order:
yield self.prepos[repo_name]
-def load_repository_config(settings):
- #~ repoconfigpaths = [os.path.join(settings.global_config_path, "repos.conf")]
+ def __contains__(self, repo_name):
+ return repo_name in self.prepos
+
+ def config_string(self):
+ str_or_int_keys = ("format", "location", "main_repo", "priority", "sync_cvs_repo", "sync_type", "sync_uri")
+ str_tuple_keys = ("aliases", "eclass_overrides", "force")
+ repo_config_tuple_keys = ("masters",)
+ keys = str_or_int_keys + str_tuple_keys + repo_config_tuple_keys
+ config_string = ""
+ for repo_name, repo in sorted(self.prepos.items(), key=lambda x: (x[0] != "DEFAULT", x[0])):
+ config_string += "\n[%s]\n" % repo_name
+ for key in sorted(keys):
+ if key == "main_repo" and repo_name != "DEFAULT":
+ continue
+ if getattr(repo, key) is not None:
+ if key in str_or_int_keys:
+ config_string += "%s = %s\n" % (key.replace("_", "-"), getattr(repo, key))
+ elif key in str_tuple_keys:
+ config_string += "%s = %s\n" % (key.replace("_", "-"), " ".join(getattr(repo, key)))
+ elif key in repo_config_tuple_keys:
+ config_string += "%s = %s\n" % (key.replace("_", "-"), " ".join(x.name for x in getattr(repo, key)))
+ return config_string.lstrip("\n")
+
+def load_repository_config(settings, extra_files=None):
repoconfigpaths = []
- if settings.local_config:
- repoconfigpaths.append(os.path.join(settings["PORTAGE_CONFIGROOT"],
- USER_CONFIG_PATH, "repos.conf"))
+ if "PORTAGE_REPOSITORIES" in settings:
+ repoconfigpaths.append(io.StringIO(settings["PORTAGE_REPOSITORIES"]))
+ else:
+ if portage._not_installed:
+ repoconfigpaths.append(os.path.join(PORTAGE_BASE_PATH, "cnf", "repos.conf"))
+ else:
+ repoconfigpaths.append(os.path.join(settings.global_config_path, "repos.conf"))
+ repoconfigpaths.append(os.path.join(settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH, "repos.conf"))
+ if extra_files:
+ repoconfigpaths.extend(extra_files)
return RepoConfigLoader(repoconfigpaths, settings)
def _get_repo_name(repo_location, cached=None):
@@ -696,6 +986,9 @@ def parse_layout_conf(repo_location, repo_name=None):
data['allow-provide-virtual'] = \
layout_data.get('allow-provide-virtuals', 'false').lower() == 'true'
+ data['eapis-banned'] = tuple(layout_data.get('eapis-banned', '').split())
+ data['eapis-deprecated'] = tuple(layout_data.get('eapis-deprecated', '').split())
+
data['sign-commit'] = layout_data.get('sign-commits', 'false').lower() \
== 'true'
@@ -705,6 +998,8 @@ def parse_layout_conf(repo_location, repo_name=None):
data['thin-manifest'] = layout_data.get('thin-manifests', 'false').lower() \
== 'true'
+ data['repo-name'] = _gen_valid_repo(layout_data.get('repo-name', ''))
+
manifest_policy = layout_data.get('use-manifests', 'strict').lower()
data['allow-missing-manifest'] = manifest_policy != 'strict'
data['create-manifest'] = manifest_policy != 'false'
@@ -713,9 +1008,18 @@ def parse_layout_conf(repo_location, repo_name=None):
# for compatibility w/ PMS, fallback to pms; but also check if the
# cache exists or not.
cache_formats = layout_data.get('cache-formats', '').lower().split()
- if not cache_formats and os.path.isdir(
- os.path.join(repo_location, 'metadata', 'cache')):
- cache_formats = ['pms']
+ if not cache_formats:
+ # Auto-detect cache formats, and prefer md5-cache if available.
+ # This behavior was deployed in portage-2.1.11.14, so that the
+ # default egencache format could eventually be changed to md5-dict
+ # in portage-2.1.11.32. WARNING: Versions prior to portage-2.1.11.14
+ # will NOT recognize md5-dict format unless it is explicitly
+ # listed in layout.conf.
+ cache_formats = []
+ if os.path.isdir(os.path.join(repo_location, 'metadata', 'md5-cache')):
+ cache_formats.append('md5-dict')
+ if os.path.isdir(os.path.join(repo_location, 'metadata', 'cache')):
+ cache_formats.append('pms')
data['cache-formats'] = tuple(cache_formats)
manifest_hashes = layout_data.get('manifest-hashes')
@@ -754,7 +1058,7 @@ def parse_layout_conf(repo_location, repo_name=None):
raw_formats = layout_data.get('profile-formats')
if raw_formats is None:
- if eapi in ('4-python',):
+ if eapi_allows_directories_on_profile_level_and_repository_level(eapi):
raw_formats = ('portage-1',)
else:
raw_formats = ('portage-1-compat',)
diff --git a/pym/portage/tests/__init__.py b/pym/portage/tests/__init__.py
index 492ece44b..84e732a1c 100644
--- a/pym/portage/tests/__init__.py
+++ b/pym/portage/tests/__init__.py
@@ -1,5 +1,5 @@
# tests/__init__.py -- Portage Unit Test functionality
-# Copyright 2006-2011 Gentoo Foundation
+# Copyright 2006-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -7,26 +7,40 @@ from __future__ import print_function
import sys
import time
import unittest
-from optparse import OptionParser, OptionValueError
try:
from unittest.runner import _TextTestResult # new in python-2.7
except ImportError:
from unittest import _TextTestResult
+try:
+ # They added the skip framework to python-2.7.
+ # Drop this once we drop python-2.6 support.
+ unittest_skip_shims = False
+ import unittest.SkipTest as SkipTest # new in python-2.7
+except ImportError:
+ unittest_skip_shims = True
+
+import portage
from portage import os
from portage import _encodings
from portage import _unicode_decode
+from portage.util._argparse import ArgumentParser
def main():
suite = unittest.TestSuite()
basedir = os.path.dirname(os.path.realpath(__file__))
usage = "usage: %s [options] [tests to run]" % os.path.basename(sys.argv[0])
- parser = OptionParser(usage=usage)
- parser.add_option("-l", "--list", help="list all tests",
+ parser = ArgumentParser(usage=usage)
+ parser.add_argument("-l", "--list", help="list all tests",
action="store_true", dest="list_tests")
- (options, args) = parser.parse_args(args=sys.argv)
+ options, args = parser.parse_known_args(args=sys.argv)
+
+ if (os.environ.get('NOCOLOR') in ('yes', 'true') or
+ os.environ.get('TERM') == 'dumb' or
+ not sys.stdout.isatty()):
+ portage.output.nocolor()
if options.list_tests:
testdir = os.path.dirname(sys.argv[0])
@@ -70,15 +84,12 @@ def getTestFromCommandLine(args, base_path):
def getTestDirs(base_path):
TEST_FILE = b'__test__'
- svn_dirname = b'.svn'
testDirs = []
# the os.walk help mentions relative paths as being quirky
# I was tired of adding dirs to the list, so now we add __test__
# to each dir we want tested.
for root, dirs, files in os.walk(base_path):
- if svn_dirname in dirs:
- dirs.remove(svn_dirname)
try:
root = _unicode_decode(root,
encoding=_encodings['fs'], errors='strict')
@@ -93,7 +104,7 @@ def getTestDirs(base_path):
def getTestNames(path):
files = os.listdir(path)
- files = [ f[:-3] for f in files if f.startswith("test") and f.endswith(".py") ]
+ files = [f[:-3] for f in files if f.startswith("test") and f.endswith(".py")]
files.sort()
return files
@@ -134,14 +145,14 @@ class TextTestResult(_TextTestResult):
self.portage_skipped = []
def addTodo(self, test, info):
- self.todoed.append((test,info))
+ self.todoed.append((test, info))
if self.showAll:
self.stream.writeln("TODO")
elif self.dots:
self.stream.write(".")
def addPortageSkip(self, test, info):
- self.portage_skipped.append((test,info))
+ self.portage_skipped.append((test, info))
if self.showAll:
self.stream.writeln("SKIP")
elif self.dots:
@@ -185,10 +196,14 @@ class TestCase(unittest.TestCase):
except:
result.addError(self, sys.exc_info())
return
+
ok = False
try:
testMethod()
ok = True
+ except SkipTest as e:
+ result.addPortageSkip(self, "%s: SKIP: %s" %
+ (testMethod, str(e)))
except self.failureException:
if self.portage_skip is not None:
if self.portage_skip is True:
@@ -197,13 +212,14 @@ class TestCase(unittest.TestCase):
result.addPortageSkip(self, "%s: SKIP: %s" %
(testMethod, self.portage_skip))
elif self.todo:
- result.addTodo(self,"%s: TODO" % testMethod)
+ result.addTodo(self, "%s: TODO" % testMethod)
else:
result.addFailure(self, sys.exc_info())
except (KeyboardInterrupt, SystemExit):
raise
except:
result.addError(self, sys.exc_info())
+
try:
self.tearDown()
except SystemExit:
@@ -213,7 +229,8 @@ class TestCase(unittest.TestCase):
except:
result.addError(self, sys.exc_info())
ok = False
- if ok: result.addSuccess(self)
+ if ok:
+ result.addSuccess(self)
finally:
result.stopTest(self)
@@ -230,10 +247,48 @@ class TestCase(unittest.TestCase):
except excClass:
return
else:
- if hasattr(excClass,'__name__'): excName = excClass.__name__
+ if hasattr(excClass, '__name__'): excName = excClass.__name__
else: excName = str(excClass)
raise self.failureException("%s not raised: %s" % (excName, msg))
+ def assertExists(self, path):
+ """Make sure |path| exists"""
+ if not os.path.exists(path):
+ msg = ['path is missing: %s' % (path,)]
+ while path != '/':
+ path = os.path.dirname(path)
+ if not path:
+ # If we're given something like "foo", abort once we get to "".
+ break
+ result = os.path.exists(path)
+ msg.append('\tos.path.exists(%s): %s' % (path, result))
+ if result:
+ msg.append('\tcontents: %r' % os.listdir(path))
+ break
+ raise self.failureException('\n'.join(msg))
+
+ def assertNotExists(self, path):
+ """Make sure |path| does not exist"""
+ if os.path.exists(path):
+ raise self.failureException('path exists when it should not: %s' % path)
+
+if unittest_skip_shims:
+ # Shim code for <python-2.7.
+ class SkipTest(Exception):
+ """unittest.SkipTest shim for <python-2.7"""
+
+ def skipTest(self, reason):
+ raise SkipTest(reason)
+ setattr(TestCase, 'skipTest', skipTest)
+
+ def assertIn(self, member, container, msg=None):
+ self.assertTrue(member in container, msg=msg)
+ setattr(TestCase, 'assertIn', assertIn)
+
+ def assertNotIn(self, member, container, msg=None):
+ self.assertFalse(member in container, msg=msg)
+ setattr(TestCase, 'assertNotIn', assertNotIn)
+
class TextTestRunner(unittest.TextTestRunner):
"""
We subclass unittest.TextTestRunner to output SKIP for tests that fail but are skippable
@@ -271,8 +326,8 @@ class TextTestRunner(unittest.TextTestRunner):
self.stream.writeln("OK")
return result
-test_cps = ['sys-apps/portage','virtual/portage']
-test_versions = ['1.0', '1.0-r1','2.3_p4','1.0_alpha57']
-test_slots = [ None, '1','gentoo-sources-2.6.17','spankywashere']
-test_usedeps = ['foo','-bar', ('foo','bar'),
- ('foo','-bar'), ('foo?', '!bar?') ]
+test_cps = ['sys-apps/portage', 'virtual/portage']
+test_versions = ['1.0', '1.0-r1', '2.3_p4', '1.0_alpha57']
+test_slots = [None, '1', 'gentoo-sources-2.6.17', 'spankywashere']
+test_usedeps = ['foo', '-bar', ('foo', 'bar'),
+ ('foo', '-bar'), ('foo?', '!bar?')]
diff --git a/pym/portage/tests/bin/setup_env.py b/pym/portage/tests/bin/setup_env.py
index 1f8554e42..9cc26df08 100644
--- a/pym/portage/tests/bin/setup_env.py
+++ b/pym/portage/tests/bin/setup_env.py
@@ -1,19 +1,17 @@
# setup_env.py -- Make sure bin subdir has sane env for testing
-# Copyright 2007-2011 Gentoo Foundation
+# Copyright 2007-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import tempfile
from portage import os
from portage import shutil
+from portage.const import PORTAGE_BIN_PATH
+from portage.const import PORTAGE_PYM_PATH
from portage.tests import TestCase
from portage.process import spawn
-basepath = os.path.join(os.path.dirname(os.path.dirname(
- os.path.abspath(__file__))),
- "..", "..", "..")
-bindir = os.path.join(basepath, "bin")
-pymdir = os.path.join(basepath, "pym")
+bindir = PORTAGE_BIN_PATH
basedir = None
env = None
@@ -30,20 +28,20 @@ def binTestsInit():
global basedir, env
basedir = tempfile.mkdtemp()
env = {}
- env["EAPI"] = "0"
- env["D"] = os.path.join(basedir, "image")
- env["T"] = os.path.join(basedir, "temp")
- env["S"] = os.path.join(basedir, "workdir")
- env["PF"] = "portage-tests-0.09-r1"
- env["PATH"] = bindir + ":" + os.environ["PATH"]
- env["PORTAGE_BIN_PATH"] = bindir
- env["PORTAGE_PYM_PATH"] = pymdir
- env["PORTAGE_INST_UID"] = str(os.getuid())
- env["PORTAGE_INST_GID"] = str(os.getgid())
- env["DESTTREE"] = "/usr"
- os.mkdir(env["D"])
- os.mkdir(env["T"])
- os.mkdir(env["S"])
+ env['EAPI'] = '0'
+ env['D'] = os.path.join(basedir, 'image')
+ env['T'] = os.path.join(basedir, 'temp')
+ env['S'] = os.path.join(basedir, 'workdir')
+ env['PF'] = 'portage-tests-0.09-r1'
+ env['PATH'] = bindir + ':' + os.environ['PATH']
+ env['PORTAGE_BIN_PATH'] = bindir
+ env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH
+ env['PORTAGE_INST_UID'] = str(os.getuid())
+ env['PORTAGE_INST_GID'] = str(os.getgid())
+ env['DESTTREE'] = '/usr'
+ os.mkdir(env['D'])
+ os.mkdir(env['T'])
+ os.mkdir(env['S'])
class BinTestCase(TestCase):
def init(self):
@@ -53,7 +51,7 @@ class BinTestCase(TestCase):
def _exists_in_D(path):
# Note: do not use os.path.join() here, we assume D to end in /
- return os.access(env["D"] + path, os.W_OK)
+ return os.access(env['D'] + path, os.W_OK)
def exists_in_D(path):
if not _exists_in_D(path):
raise TestCase.failureException
@@ -68,7 +66,7 @@ def portage_func(func, args, exit_status=0):
f = open('/dev/null', 'wb')
fd_pipes = {0:0,1:f.fileno(),2:f.fileno()}
def pre_exec():
- os.chdir(env["S"])
+ os.chdir(env['S'])
spawn([func] + args.split(), env=env,
fd_pipes=fd_pipes, pre_exec=pre_exec)
f.close()
@@ -80,10 +78,10 @@ def create_portage_wrapper(bin):
return portage_func(*newargs)
return derived_func
-for bin in os.listdir(os.path.join(bindir, "ebuild-helpers")):
- if bin.startswith("do") or \
- bin.startswith("new") or \
- bin.startswith("prep") or \
- bin in ["ecompress","ecompressdir","fowners","fperms"]:
+for bin in os.listdir(os.path.join(bindir, 'ebuild-helpers')):
+ if bin.startswith('do') or \
+ bin.startswith('new') or \
+ bin.startswith('prep') or \
+ bin in ('ecompress', 'ecompressdir', 'fowners', 'fperms'):
globals()[bin] = create_portage_wrapper(
- os.path.join(bindir, "ebuild-helpers", bin))
+ os.path.join(bindir, 'ebuild-helpers', bin))
diff --git a/pym/portage/tests/dbapi/test_fakedbapi.py b/pym/portage/tests/dbapi/test_fakedbapi.py
index e3843f0a4..771356350 100644
--- a/pym/portage/tests/dbapi/test_fakedbapi.py
+++ b/pym/portage/tests/dbapi/test_fakedbapi.py
@@ -1,4 +1,4 @@
-# Copyright 2011-2012 Gentoo Foundation
+# Copyright 2011-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import tempfile
@@ -42,10 +42,12 @@ class TestFakedbapi(TestCase):
tempdir = tempfile.mkdtemp()
try:
- portdir = os.path.join(tempdir, "usr/portage")
- os.makedirs(portdir)
+ test_repo = os.path.join(tempdir, "var", "repositories", "test_repo")
+ os.makedirs(os.path.join(test_repo, "profiles"))
+ with open(os.path.join(test_repo, "profiles", "repo_name"), "w") as f:
+ f.write("test_repo")
env = {
- "PORTDIR": portdir,
+ "PORTAGE_REPOSITORIES": "[DEFAULT]\nmain-repo = test_repo\n[test_repo]\nlocation = %s" % test_repo
}
fakedb = fakedbapi(settings=config(config_profile_path="",
env=env, eprefix=tempdir))
diff --git a/pym/portage/tests/dbapi/test_portdb_cache.py b/pym/portage/tests/dbapi/test_portdb_cache.py
new file mode 100644
index 000000000..94af96eaf
--- /dev/null
+++ b/pym/portage/tests/dbapi/test_portdb_cache.py
@@ -0,0 +1,183 @@
+# Copyright 2012-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import subprocess
+import sys
+import textwrap
+
+import portage
+from portage import os
+from portage import _unicode_decode
+from portage.const import (BASH_BINARY, PORTAGE_BIN_PATH,
+ PORTAGE_PYM_PATH, USER_CONFIG_PATH)
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground
+from portage.util import ensure_dirs
+
+class PortdbCacheTestCase(TestCase):
+
+ def testPortdbCache(self):
+ debug = False
+
+ ebuilds = {
+ "dev-libs/A-1": {},
+ "dev-libs/A-2": {},
+ "sys-apps/B-1": {},
+ "sys-apps/B-2": {},
+ }
+
+ playground = ResolverPlayground(ebuilds=ebuilds, debug=debug)
+ settings = playground.settings
+ eprefix = settings["EPREFIX"]
+ test_repo_location = settings.repositories["test_repo"].location
+ user_config_dir = os.path.join(eprefix, USER_CONFIG_PATH)
+ metadata_dir = os.path.join(test_repo_location, "metadata")
+ md5_cache_dir = os.path.join(metadata_dir, "md5-cache")
+ pms_cache_dir = os.path.join(metadata_dir, "cache")
+ layout_conf_path = os.path.join(metadata_dir, "layout.conf")
+
+ portage_python = portage._python_interpreter
+ egencache_cmd = (portage_python, "-b", "-Wd",
+ os.path.join(PORTAGE_BIN_PATH, "egencache"),
+ "--repo", "test_repo",
+ "--repositories-configuration", settings.repositories.config_string())
+ python_cmd = (portage_python, "-b", "-Wd", "-c")
+
+ test_commands = (
+ (lambda: not os.path.exists(pms_cache_dir),),
+ (lambda: not os.path.exists(md5_cache_dir),),
+ python_cmd + (textwrap.dedent("""
+ import os, sys, portage
+ if portage.portdb.porttree_root in portage.portdb._pregen_auxdb:
+ sys.exit(1)
+ """),),
+
+ egencache_cmd + ("--update",),
+ (lambda: not os.path.exists(pms_cache_dir),),
+ (lambda: os.path.exists(md5_cache_dir),),
+ python_cmd + (textwrap.dedent("""
+ import os, sys, portage
+ if portage.portdb.porttree_root not in portage.portdb._pregen_auxdb:
+ sys.exit(1)
+ """),),
+ python_cmd + (textwrap.dedent("""
+ import os, sys, portage
+ from portage.cache.flat_hash import md5_database
+ if not isinstance(portage.portdb._pregen_auxdb[portage.portdb.porttree_root], md5_database):
+ sys.exit(1)
+ """),),
+
+ (BASH_BINARY, "-c", "echo %s > %s" %
+ tuple(map(portage._shell_quote,
+ ("cache-formats = md5-dict pms", layout_conf_path,)))),
+ egencache_cmd + ("--update",),
+ (lambda: os.path.exists(md5_cache_dir),),
+ python_cmd + (textwrap.dedent("""
+ import os, sys, portage
+ if portage.portdb.porttree_root not in portage.portdb._pregen_auxdb:
+ sys.exit(1)
+ """),),
+ python_cmd + (textwrap.dedent("""
+ import os, sys, portage
+ from portage.cache.flat_hash import md5_database
+ if not isinstance(portage.portdb._pregen_auxdb[portage.portdb.porttree_root], md5_database):
+ sys.exit(1)
+ """),),
+
+ # Disable DeprecationWarnings, since the pms format triggers them
+ # in portdbapi._create_pregen_cache().
+ (BASH_BINARY, "-c", "echo %s > %s" %
+ tuple(map(portage._shell_quote,
+ ("cache-formats = pms md5-dict", layout_conf_path,)))),
+ (portage_python, "-b", "-Wd", "-Wi::DeprecationWarning", "-c") + (textwrap.dedent("""
+ import os, sys, portage
+ if portage.portdb.porttree_root not in portage.portdb._pregen_auxdb:
+ sys.exit(1)
+ """),),
+ (portage_python, "-b", "-Wd", "-Wi::DeprecationWarning", "-c") + (textwrap.dedent("""
+ import os, sys, portage
+ from portage.cache.metadata import database as pms_database
+ if not isinstance(portage.portdb._pregen_auxdb[portage.portdb.porttree_root], pms_database):
+ sys.exit(1)
+ """),),
+
+ # Test auto-detection and preference for md5-cache when both
+ # cache formats are available but layout.conf is absent.
+ (BASH_BINARY, "-c", "rm %s" % portage._shell_quote(layout_conf_path)),
+ python_cmd + (textwrap.dedent("""
+ import os, sys, portage
+ if portage.portdb.porttree_root not in portage.portdb._pregen_auxdb:
+ sys.exit(1)
+ """),),
+ python_cmd + (textwrap.dedent("""
+ import os, sys, portage
+ from portage.cache.flat_hash import md5_database
+ if not isinstance(portage.portdb._pregen_auxdb[portage.portdb.porttree_root], md5_database):
+ sys.exit(1)
+ """),),
+ )
+
+ pythonpath = os.environ.get("PYTHONPATH")
+ if pythonpath is not None and not pythonpath.strip():
+ pythonpath = None
+ if pythonpath is not None and \
+ pythonpath.split(":")[0] == PORTAGE_PYM_PATH:
+ pass
+ else:
+ if pythonpath is None:
+ pythonpath = ""
+ else:
+ pythonpath = ":" + pythonpath
+ pythonpath = PORTAGE_PYM_PATH + pythonpath
+
+ env = {
+ "PATH" : os.environ.get("PATH", ""),
+ "PORTAGE_OVERRIDE_EPREFIX" : eprefix,
+ "PORTAGE_PYTHON" : portage_python,
+ "PORTAGE_REPOSITORIES" : settings.repositories.config_string(),
+ "PYTHONPATH" : pythonpath,
+ }
+
+ if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
+ env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]
+
+ dirs = [user_config_dir]
+
+ try:
+ for d in dirs:
+ ensure_dirs(d)
+
+ if debug:
+ # The subprocess inherits both stdout and stderr, for
+ # debugging purposes.
+ stdout = None
+ else:
+ # The subprocess inherits stderr so that any warnings
+ # triggered by python -Wd will be visible.
+ stdout = subprocess.PIPE
+
+ for i, args in enumerate(test_commands):
+
+ if hasattr(args[0], '__call__'):
+ self.assertTrue(args[0](),
+ "callable at index %s failed" % (i,))
+ continue
+
+ proc = subprocess.Popen(args,
+ env=env, stdout=stdout)
+
+ if debug:
+ proc.wait()
+ else:
+ output = proc.stdout.readlines()
+ proc.wait()
+ proc.stdout.close()
+ if proc.returncode != os.EX_OK:
+ for line in output:
+ sys.stderr.write(_unicode_decode(line))
+
+ self.assertEqual(os.EX_OK, proc.returncode,
+ "command %d failed with args %s" % (i, args,))
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/dep/testAtom.py b/pym/portage/tests/dep/testAtom.py
index f5a7d3749..da58be27c 100644
--- a/pym/portage/tests/dep/testAtom.py
+++ b/pym/portage/tests/dep/testAtom.py
@@ -1,4 +1,4 @@
-# Copyright 2006, 2010 Gentoo Foundation
+# Copyright 2006-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage.tests import TestCase
@@ -10,154 +10,157 @@ class TestAtom(TestCase):
def testAtom(self):
tests = (
- ( "=sys-apps/portage-2.1-r1:0[doc,a=,!b=,c?,!d?,-e]",
- ('=', 'sys-apps/portage', '2.1-r1', '0', '[doc,a=,!b=,c?,!d?,-e]', None), False, False ),
- ( "=sys-apps/portage-2.1-r1*:0[doc]",
- ('=*', 'sys-apps/portage', '2.1-r1', '0', '[doc]', None), False, False ),
- ( "sys-apps/portage:0[doc]",
- (None, 'sys-apps/portage', None, '0', '[doc]', None), False, False ),
- ( "sys-apps/portage:0[doc]",
- (None, 'sys-apps/portage', None, '0', '[doc]', None), False, False ),
- ( "*/*",
- (None, '*/*', None, None, None, None), True, False ),
- ( "=*/*-*9999*",
- ('=*', '*/*', '*9999*', None, None, None), True, False ),
- ( "=*/*-*9999*:0::repo_name",
- ('=*', '*/*', '*9999*', '0', None, 'repo_name'), True, True ),
- ( "sys-apps/*",
- (None, 'sys-apps/*', None, None, None, None), True, False ),
- ( "*/portage",
- (None, '*/portage', None, None, None, None), True, False ),
- ( "s*s-*/portage:1",
- (None, 's*s-*/portage', None, '1', None, None), True, False ),
- ( "*/po*ge:2",
- (None, '*/po*ge', None, '2', None, None), True, False ),
- ( "!dev-libs/A",
- (None, 'dev-libs/A', None, None, None, None), True, True ),
- ( "!!dev-libs/A",
- (None, 'dev-libs/A', None, None, None, None), True, True ),
- ( "!!dev-libs/A",
- (None, 'dev-libs/A', None, None, None, None), True, True ),
- ( "dev-libs/A[foo(+)]",
- (None, 'dev-libs/A', None, None, "[foo(+)]", None), True, True ),
- ( "dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]",
- (None, 'dev-libs/A', None, None, "[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", None), True, True ),
- ( "dev-libs/A:2[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]",
- (None, 'dev-libs/A', None, "2", "[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", None), True, True ),
-
- ( "=sys-apps/portage-2.1-r1:0::repo_name[doc,a=,!b=,c?,!d?,-e]",
- ('=', 'sys-apps/portage', '2.1-r1', '0', '[doc,a=,!b=,c?,!d?,-e]', 'repo_name'), False, True ),
- ( "=sys-apps/portage-2.1-r1*:0::repo_name[doc]",
- ('=*', 'sys-apps/portage', '2.1-r1', '0', '[doc]', 'repo_name'), False, True ),
- ( "sys-apps/portage:0::repo_name[doc]",
- (None, 'sys-apps/portage', None, '0', '[doc]', 'repo_name'), False, True ),
-
- ( "*/*::repo_name",
- (None, '*/*', None, None, None, 'repo_name'), True, True ),
- ( "sys-apps/*::repo_name",
- (None, 'sys-apps/*', None, None, None, 'repo_name'), True, True ),
- ( "*/portage::repo_name",
- (None, '*/portage', None, None, None, 'repo_name'), True, True ),
- ( "s*s-*/portage:1::repo_name",
- (None, 's*s-*/portage', None, '1', None, 'repo_name'), True, True ),
+ ("=sys-apps/portage-2.1-r1:0[doc,a=,!b=,c?,!d?,-e]",
+ ('=', 'sys-apps/portage', '2.1-r1', '0', '[doc,a=,!b=,c?,!d?,-e]', None), False, False),
+ ("=sys-apps/portage-2.1-r1*:0[doc]",
+ ('=*', 'sys-apps/portage', '2.1-r1', '0', '[doc]', None), False, False),
+ ("sys-apps/portage:0[doc]",
+ (None, 'sys-apps/portage', None, '0', '[doc]', None), False, False),
+ ("sys-apps/portage:0[doc]",
+ (None, 'sys-apps/portage', None, '0', '[doc]', None), False, False),
+ ("*/*",
+ (None, '*/*', None, None, None, None), True, False),
+ ("=*/*-*9999*",
+ ('=*', '*/*', '*9999*', None, None, None), True, False),
+ ("=*/*-*9999*:0::repo_name",
+ ('=*', '*/*', '*9999*', '0', None, 'repo_name'), True, True),
+ ("=*/*-*_beta*",
+ ('=*', '*/*', '*_beta*', None, None, None), True, False),
+ ("=*/*-*_beta*:0::repo_name",
+ ('=*', '*/*', '*_beta*', '0', None, 'repo_name'), True, True),
+ ("sys-apps/*",
+ (None, 'sys-apps/*', None, None, None, None), True, False),
+ ("*/portage",
+ (None, '*/portage', None, None, None, None), True, False),
+ ("s*s-*/portage:1",
+ (None, 's*s-*/portage', None, '1', None, None), True, False),
+ ("*/po*ge:2",
+ (None, '*/po*ge', None, '2', None, None), True, False),
+ ("!dev-libs/A",
+ (None, 'dev-libs/A', None, None, None, None), True, True),
+ ("!!dev-libs/A",
+ (None, 'dev-libs/A', None, None, None, None), True, True),
+ ("!!dev-libs/A",
+ (None, 'dev-libs/A', None, None, None, None), True, True),
+ ("dev-libs/A[foo(+)]",
+ (None, 'dev-libs/A', None, None, "[foo(+)]", None), True, True),
+ ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]",
+ (None, 'dev-libs/A', None, None, "[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", None), True, True),
+ ("dev-libs/A:2[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]",
+ (None, 'dev-libs/A', None, "2", "[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", None), True, True),
+
+ ("=sys-apps/portage-2.1-r1:0::repo_name[doc,a=,!b=,c?,!d?,-e]",
+ ('=', 'sys-apps/portage', '2.1-r1', '0', '[doc,a=,!b=,c?,!d?,-e]', 'repo_name'), False, True),
+ ("=sys-apps/portage-2.1-r1*:0::repo_name[doc]",
+ ('=*', 'sys-apps/portage', '2.1-r1', '0', '[doc]', 'repo_name'), False, True),
+ ("sys-apps/portage:0::repo_name[doc]",
+ (None, 'sys-apps/portage', None, '0', '[doc]', 'repo_name'), False, True),
+
+ ("*/*::repo_name",
+ (None, '*/*', None, None, None, 'repo_name'), True, True),
+ ("sys-apps/*::repo_name",
+ (None, 'sys-apps/*', None, None, None, 'repo_name'), True, True),
+ ("*/portage::repo_name",
+ (None, '*/portage', None, None, None, 'repo_name'), True, True),
+ ("s*s-*/portage:1::repo_name",
+ (None, 's*s-*/portage', None, '1', None, 'repo_name'), True, True),
)
-
+
tests_xfail = (
- ( Atom("sys-apps/portage"), False, False ),
- ( "cat/pkg[a!]", False, False ),
- ( "cat/pkg[!a]", False, False ),
- ( "cat/pkg[!a!]", False, False ),
- ( "cat/pkg[!a-]", False, False ),
- ( "cat/pkg[-a=]", False, False ),
- ( "cat/pkg[-a?]", False, False ),
- ( "cat/pkg[-a!]", False, False ),
- ( "cat/pkg[=a]", False, False ),
- ( "cat/pkg[=a=]", False, False ),
- ( "cat/pkg[=a?]", False, False ),
- ( "cat/pkg[=a!]", False, False ),
- ( "cat/pkg[=a-]", False, False ),
- ( "cat/pkg[?a]", False, False ),
- ( "cat/pkg[?a=]", False, False ),
- ( "cat/pkg[?a?]", False, False ),
- ( "cat/pkg[?a!]", False, False ),
- ( "cat/pkg[?a-]", False, False ),
- ( "sys-apps/portage[doc]:0", False, False ),
- ( "*/*", False, False ),
- ( "sys-apps/*", False, False ),
- ( "*/portage", False, False ),
- ( "*/**", True, False ),
- ( "*/portage[use]", True, False ),
- ( "cat/pkg[a()]", False, False ),
- ( "cat/pkg[a(]", False, False ),
- ( "cat/pkg[a)]", False, False ),
- ( "cat/pkg[a(,b]", False, False ),
- ( "cat/pkg[a),b]", False, False ),
- ( "cat/pkg[a(*)]", False, False ),
- ( "cat/pkg[a(*)]", True, False ),
- ( "cat/pkg[a(+-)]", False, False ),
- ( "cat/pkg[a()]", False, False ),
- ( "cat/pkg[(+)a]", False, False ),
- ( "cat/pkg[a=(+)]", False, False ),
- ( "cat/pkg[!(+)a=]", False, False ),
- ( "cat/pkg[!a=(+)]", False, False ),
- ( "cat/pkg[a?(+)]", False, False ),
- ( "cat/pkg[!a?(+)]", False, False ),
- ( "cat/pkg[!(+)a?]", False, False ),
- ( "cat/pkg[-(+)a]", False, False ),
- ( "cat/pkg[a(+),-a]", False, False ),
- ( "cat/pkg[a(-),-a]", False, False ),
- ( "cat/pkg[-a,a(+)]", False, False ),
- ( "cat/pkg[-a,a(-)]", False, False ),
- ( "cat/pkg[-a(+),a(-)]", False, False ),
- ( "cat/pkg[-a(-),a(+)]", False, False ),
- ( "sys-apps/portage[doc]::repo_name", False, False ),
- ( "sys-apps/portage:0[doc]::repo_name", False, False ),
- ( "sys-apps/portage[doc]:0::repo_name", False, False ),
- ( "=sys-apps/portage-2.1-r1:0::repo_name[doc,a=,!b=,c?,!d?,-e]", False, False ),
- ( "=sys-apps/portage-2.1-r1*:0::repo_name[doc]", False, False ),
- ( "sys-apps/portage:0::repo_name[doc]", False, False ),
- ( "*/*::repo_name", True, False ),
+ (Atom("sys-apps/portage"), False, False),
+ ("cat/pkg[a!]", False, False),
+ ("cat/pkg[!a]", False, False),
+ ("cat/pkg[!a!]", False, False),
+ ("cat/pkg[!a-]", False, False),
+ ("cat/pkg[-a=]", False, False),
+ ("cat/pkg[-a?]", False, False),
+ ("cat/pkg[-a!]", False, False),
+ ("cat/pkg[=a]", False, False),
+ ("cat/pkg[=a=]", False, False),
+ ("cat/pkg[=a?]", False, False),
+ ("cat/pkg[=a!]", False, False),
+ ("cat/pkg[=a-]", False, False),
+ ("cat/pkg[?a]", False, False),
+ ("cat/pkg[?a=]", False, False),
+ ("cat/pkg[?a?]", False, False),
+ ("cat/pkg[?a!]", False, False),
+ ("cat/pkg[?a-]", False, False),
+ ("sys-apps/portage[doc]:0", False, False),
+ ("*/*", False, False),
+ ("sys-apps/*", False, False),
+ ("*/portage", False, False),
+ ("*/**", True, False),
+ ("*/portage[use]", True, False),
+ ("cat/pkg[a()]", False, False),
+ ("cat/pkg[a(]", False, False),
+ ("cat/pkg[a)]", False, False),
+ ("cat/pkg[a(,b]", False, False),
+ ("cat/pkg[a),b]", False, False),
+ ("cat/pkg[a(*)]", False, False),
+ ("cat/pkg[a(*)]", True, False),
+ ("cat/pkg[a(+-)]", False, False),
+ ("cat/pkg[a()]", False, False),
+ ("cat/pkg[(+)a]", False, False),
+ ("cat/pkg[a=(+)]", False, False),
+ ("cat/pkg[!(+)a=]", False, False),
+ ("cat/pkg[!a=(+)]", False, False),
+ ("cat/pkg[a?(+)]", False, False),
+ ("cat/pkg[!a?(+)]", False, False),
+ ("cat/pkg[!(+)a?]", False, False),
+ ("cat/pkg[-(+)a]", False, False),
+ ("cat/pkg[a(+),-a]", False, False),
+ ("cat/pkg[a(-),-a]", False, False),
+ ("cat/pkg[-a,a(+)]", False, False),
+ ("cat/pkg[-a,a(-)]", False, False),
+ ("cat/pkg[-a(+),a(-)]", False, False),
+ ("cat/pkg[-a(-),a(+)]", False, False),
+ ("sys-apps/portage[doc]::repo_name", False, False),
+ ("sys-apps/portage:0[doc]::repo_name", False, False),
+ ("sys-apps/portage[doc]:0::repo_name", False, False),
+ ("=sys-apps/portage-2.1-r1:0::repo_name[doc,a=,!b=,c?,!d?,-e]", False, False),
+ ("=sys-apps/portage-2.1-r1*:0::repo_name[doc]", False, False),
+ ("sys-apps/portage:0::repo_name[doc]", False, False),
+ ("*/*::repo_name", True, False),
)
for atom, parts, allow_wildcard, allow_repo in tests:
a = Atom(atom, allow_wildcard=allow_wildcard, allow_repo=allow_repo)
op, cp, ver, slot, use, repo = parts
- self.assertEqual( op, a.operator,
- msg="Atom('%s').operator = %s == '%s'" % ( atom, a.operator, op ) )
- self.assertEqual( cp, a.cp,
- msg="Atom('%s').cp = %s == '%s'" % ( atom, a.cp, cp ) )
+ self.assertEqual(op, a.operator,
+ msg="Atom('%s').operator = %s == '%s'" % (atom, a.operator, op))
+ self.assertEqual(cp, a.cp,
+ msg="Atom('%s').cp = %s == '%s'" % (atom, a.cp, cp))
if ver is not None:
cpv = "%s-%s" % (cp, ver)
else:
cpv = cp
- self.assertEqual( cpv, a.cpv,
- msg="Atom('%s').cpv = %s == '%s'" % ( atom, a.cpv, cpv ) )
- self.assertEqual( slot, a.slot,
- msg="Atom('%s').slot = %s == '%s'" % ( atom, a.slot, slot ) )
- self.assertEqual( repo, a.repo,
- msg="Atom('%s').repo == %s == '%s'" % ( atom, a.repo, repo ) )
+ self.assertEqual(cpv, a.cpv,
+ msg="Atom('%s').cpv = %s == '%s'" % (atom, a.cpv, cpv))
+ self.assertEqual(slot, a.slot,
+ msg="Atom('%s').slot = %s == '%s'" % (atom, a.slot, slot))
+ self.assertEqual(repo, a.repo,
+ msg="Atom('%s').repo == %s == '%s'" % (atom, a.repo, repo))
if a.use:
returned_use = str(a.use)
else:
returned_use = None
- self.assertEqual( use, returned_use,
- msg="Atom('%s').use = %s == '%s'" % ( atom, returned_use, use ) )
+ self.assertEqual(use, returned_use,
+ msg="Atom('%s').use = %s == '%s'" % (atom, returned_use, use))
for atom, allow_wildcard, allow_repo in tests_xfail:
- self.assertRaisesMsg(atom, (InvalidAtom, TypeError), Atom, atom, \
+ self.assertRaisesMsg(atom, (InvalidAtom, TypeError), Atom, atom,
allow_wildcard=allow_wildcard, allow_repo=allow_repo)
def testSlotAbiAtom(self):
tests = (
- ("virtual/ffmpeg:0/53", "4-slot-abi", {"slot": "0", "slot_abi": "53", "slot_abi_op": None}),
- ("virtual/ffmpeg:0/53=", "4-slot-abi", {"slot": "0", "slot_abi": "53", "slot_abi_op": "="}),
- ("virtual/ffmpeg:=", "4-slot-abi", {"slot": None, "slot_abi": None, "slot_abi_op": "="}),
- ("virtual/ffmpeg:0=", "4-slot-abi", {"slot": "0", "slot_abi": None, "slot_abi_op": "="}),
- ("virtual/ffmpeg:*", "4-slot-abi", {"slot": None, "slot_abi": None, "slot_abi_op": "*"}),
- ("virtual/ffmpeg:0*", "4-slot-abi", {"slot": "0", "slot_abi": None, "slot_abi_op": "*"}),
- ("virtual/ffmpeg:0", "4-slot-abi", {"slot": "0", "slot_abi": None, "slot_abi_op": None}),
- ("virtual/ffmpeg", "4-slot-abi", {"slot": None, "slot_abi": None, "slot_abi_op": None}),
+ ("virtual/ffmpeg:0/53", "4-slot-abi", {"slot": "0", "sub_slot": "53", "slot_operator": None}),
+ ("virtual/ffmpeg:0/53=", "4-slot-abi", {"slot": "0", "sub_slot": "53", "slot_operator": "="}),
+ ("virtual/ffmpeg:=", "4-slot-abi", {"slot": None, "sub_slot": None, "slot_operator": "="}),
+ ("virtual/ffmpeg:0=", "4-slot-abi", {"slot": "0", "sub_slot": None, "slot_operator": "="}),
+ ("virtual/ffmpeg:*", "4-slot-abi", {"slot": None, "sub_slot": None, "slot_operator": "*"}),
+ ("virtual/ffmpeg:0", "4-slot-abi", {"slot": "0", "sub_slot": None, "slot_operator": None}),
+ ("virtual/ffmpeg", "4-slot-abi", {"slot": None, "sub_slot": None, "slot_operator": None}),
)
for atom, eapi, parts in tests:
@@ -165,7 +168,7 @@ class TestAtom(TestCase):
for k, v in parts.items():
self.assertEqual(v, getattr(a, k),
msg="Atom('%s').%s = %s == '%s'" %
- (atom, k, getattr(a, k), v ))
+ (atom, k, getattr(a, k), v))
def test_intersects(self):
test_cases = (
@@ -182,7 +185,7 @@ class TestAtom(TestCase):
)
for atom, other, expected_result in test_cases:
- self.assertEqual(Atom(atom).intersects(Atom(other)), expected_result, \
+ self.assertEqual(Atom(atom).intersects(Atom(other)), expected_result,
"%s and %s should intersect: %s" % (atom, other, expected_result))
def test_violated_conditionals(self):
@@ -276,7 +279,7 @@ class TestAtom(TestCase):
for atom, other_use, iuse, parent_use in test_cases_xfail:
a = Atom(atom)
validator = use_flag_validator(iuse)
- self.assertRaisesMsg(atom, InvalidAtom, \
+ self.assertRaisesMsg(atom, InvalidAtom,
a.violated_conditionals, other_use, validator.is_valid_flag, parent_use)
def test_evaluate_conditionals(self):
@@ -325,9 +328,9 @@ class TestAtom(TestCase):
("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["d", "e", "f"], [], "dev-libs/A[a,b,-b,c,-c,-e,-f]"),
("dev-libs/A[a,b=,!c=,d?,!e?,-f]", [], ["d", "e", "f"], "dev-libs/A[a,b,-b,c,-c,d,-f]"),
- ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", \
+ ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]",
["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a(-),-b(+),c(-),-e(-),-f(+)]"),
- ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", \
+ ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]",
[], ["a", "b", "c", "d", "e", "f"], "dev-libs/A[a(+),b(-),-c(+),d(-),-f(-)]"),
)
diff --git a/pym/portage/tests/dep/testCheckRequiredUse.py b/pym/portage/tests/dep/testCheckRequiredUse.py
index 54791e016..63330b5cb 100644
--- a/pym/portage/tests/dep/testCheckRequiredUse.py
+++ b/pym/portage/tests/dep/testCheckRequiredUse.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage.tests import TestCase
@@ -9,97 +9,106 @@ class TestCheckRequiredUse(TestCase):
def testCheckRequiredUse(self):
test_cases = (
- ( "|| ( a b )", [], ["a", "b"], False),
- ( "|| ( a b )", ["a"], ["a", "b"], True),
- ( "|| ( a b )", ["b"], ["a", "b"], True),
- ( "|| ( a b )", ["a", "b"], ["a", "b"], True),
-
- ( "^^ ( a b )", [], ["a", "b"], False),
- ( "^^ ( a b )", ["a"], ["a", "b"], True),
- ( "^^ ( a b )", ["b"], ["a", "b"], True),
- ( "^^ ( a b )", ["a", "b"], ["a", "b"], False),
-
- ( "^^ ( || ( a b ) c )", [], ["a", "b", "c"], False),
- ( "^^ ( || ( a b ) c )", ["a"], ["a", "b", "c"], True),
-
- ( "^^ ( || ( ( a b ) ) ( c ) )", [], ["a", "b", "c"], False),
- ( "( ^^ ( ( || ( ( a ) ( b ) ) ) ( ( c ) ) ) )", ["a"], ["a", "b", "c"], True),
-
- ( "a || ( b c )", ["a"], ["a", "b", "c"], False),
- ( "|| ( b c ) a", ["a"], ["a", "b", "c"], False),
-
- ( "|| ( a b c )", ["a"], ["a", "b", "c"], True),
- ( "|| ( a b c )", ["b"], ["a", "b", "c"], True),
- ( "|| ( a b c )", ["c"], ["a", "b", "c"], True),
-
- ( "^^ ( a b c )", ["a"], ["a", "b", "c"], True),
- ( "^^ ( a b c )", ["b"], ["a", "b", "c"], True),
- ( "^^ ( a b c )", ["c"], ["a", "b", "c"], True),
- ( "^^ ( a b c )", ["a", "b"], ["a", "b", "c"], False),
- ( "^^ ( a b c )", ["b", "c"], ["a", "b", "c"], False),
- ( "^^ ( a b c )", ["a", "c"], ["a", "b", "c"], False),
- ( "^^ ( a b c )", ["a", "b", "c"], ["a", "b", "c"], False),
-
- ( "a? ( ^^ ( b c ) )", [], ["a", "b", "c"], True),
- ( "a? ( ^^ ( b c ) )", ["a"], ["a", "b", "c"], False),
- ( "a? ( ^^ ( b c ) )", ["b"], ["a", "b", "c"], True),
- ( "a? ( ^^ ( b c ) )", ["c"], ["a", "b", "c"], True),
- ( "a? ( ^^ ( b c ) )", ["a", "b"], ["a", "b", "c"], True),
- ( "a? ( ^^ ( b c ) )", ["a", "b", "c"], ["a", "b", "c"], False),
-
- ( "^^ ( a? ( !b ) !c? ( d ) )", [], ["a", "b", "c", "d"], False),
- ( "^^ ( a? ( !b ) !c? ( d ) )", ["a"], ["a", "b", "c", "d"], True),
- ( "^^ ( a? ( !b ) !c? ( d ) )", ["c"], ["a", "b", "c", "d"], True),
- ( "^^ ( a? ( !b ) !c? ( d ) )", ["a", "c"], ["a", "b", "c", "d"], True),
- ( "^^ ( a? ( !b ) !c? ( d ) )", ["a", "b", "c"], ["a", "b", "c", "d"], False),
- ( "^^ ( a? ( !b ) !c? ( d ) )", ["a", "b", "d"], ["a", "b", "c", "d"], True),
- ( "^^ ( a? ( !b ) !c? ( d ) )", ["a", "b", "d"], ["a", "b", "c", "d"], True),
- ( "^^ ( a? ( !b ) !c? ( d ) )", ["a", "d"], ["a", "b", "c", "d"], False),
-
- ( "|| ( ^^ ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"], False),
- ( "|| ( ^^ ( a b ) ^^ ( b c ) )", ["a"], ["a", "b", "c"], True),
- ( "|| ( ^^ ( a b ) ^^ ( b c ) )", ["b"], ["a", "b", "c"], True),
- ( "|| ( ^^ ( a b ) ^^ ( b c ) )", ["c"], ["a", "b", "c"], True),
- ( "|| ( ^^ ( a b ) ^^ ( b c ) )", ["a", "b"], ["a", "b", "c"], True),
- ( "|| ( ^^ ( a b ) ^^ ( b c ) )", ["a", "c"], ["a", "b", "c"], True),
- ( "|| ( ^^ ( a b ) ^^ ( b c ) )", ["b", "c"], ["a", "b", "c"], True),
- ( "|| ( ^^ ( a b ) ^^ ( b c ) )", ["a", "b", "c"], ["a", "b", "c"], False),
-
- ( "^^ ( || ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"], False),
- ( "^^ ( || ( a b ) ^^ ( b c ) )", ["a"], ["a", "b", "c"], True),
- ( "^^ ( || ( a b ) ^^ ( b c ) )", ["b"], ["a", "b", "c"], False),
- ( "^^ ( || ( a b ) ^^ ( b c ) )", ["c"], ["a", "b", "c"], True),
- ( "^^ ( || ( a b ) ^^ ( b c ) )", ["a", "b"], ["a", "b", "c"], False),
- ( "^^ ( || ( a b ) ^^ ( b c ) )", ["a", "c"], ["a", "b", "c"], False),
- ( "^^ ( || ( a b ) ^^ ( b c ) )", ["b", "c"], ["a", "b", "c"], True),
- ( "^^ ( || ( a b ) ^^ ( b c ) )", ["a", "b", "c"], ["a", "b", "c"], True),
-
- ( "|| ( ( a b ) c )", ["a", "b", "c"], ["a", "b", "c"], True),
- ( "|| ( ( a b ) c )", ["b", "c"], ["a", "b", "c"], True),
- ( "|| ( ( a b ) c )", ["a", "c"], ["a", "b", "c"], True),
- ( "|| ( ( a b ) c )", ["a", "b"], ["a", "b", "c"], True),
- ( "|| ( ( a b ) c )", ["a"], ["a", "b", "c"], False),
- ( "|| ( ( a b ) c )", ["b"], ["a", "b", "c"], False),
- ( "|| ( ( a b ) c )", ["c"], ["a", "b", "c"], True),
- ( "|| ( ( a b ) c )", [], ["a", "b", "c"], False),
-
- ( "^^ ( ( a b ) c )", ["a", "b", "c"], ["a", "b", "c"], False),
- ( "^^ ( ( a b ) c )", ["b", "c"], ["a", "b", "c"], True),
- ( "^^ ( ( a b ) c )", ["a", "c"], ["a", "b", "c"], True),
- ( "^^ ( ( a b ) c )", ["a", "b"], ["a", "b", "c"], True),
- ( "^^ ( ( a b ) c )", ["a"], ["a", "b", "c"], False),
- ( "^^ ( ( a b ) c )", ["b"], ["a", "b", "c"], False),
- ( "^^ ( ( a b ) c )", ["c"], ["a", "b", "c"], True),
- ( "^^ ( ( a b ) c )", [], ["a", "b", "c"], False),
+ ("|| ( a b )", [], ["a", "b"], False),
+ ("|| ( a b )", ["a"], ["a", "b"], True),
+ ("|| ( a b )", ["b"], ["a", "b"], True),
+ ("|| ( a b )", ["a", "b"], ["a", "b"], True),
+
+ ("^^ ( a b )", [], ["a", "b"], False),
+ ("^^ ( a b )", ["a"], ["a", "b"], True),
+ ("^^ ( a b )", ["b"], ["a", "b"], True),
+ ("^^ ( a b )", ["a", "b"], ["a", "b"], False),
+ ("?? ( a b )", ["a", "b"], ["a", "b"], False),
+ ("?? ( a b )", ["a"], ["a", "b"], True),
+ ("?? ( a b )", ["b"], ["a", "b"], True),
+ ("?? ( a b )", [], ["a", "b"], True),
+ ("?? ( )", [], [], True),
+
+ ("^^ ( || ( a b ) c )", [], ["a", "b", "c"], False),
+ ("^^ ( || ( a b ) c )", ["a"], ["a", "b", "c"], True),
+
+ ("^^ ( || ( ( a b ) ) ( c ) )", [], ["a", "b", "c"], False),
+ ("( ^^ ( ( || ( ( a ) ( b ) ) ) ( ( c ) ) ) )", ["a"], ["a", "b", "c"], True),
+
+ ("a || ( b c )", ["a"], ["a", "b", "c"], False),
+ ("|| ( b c ) a", ["a"], ["a", "b", "c"], False),
+
+ ("|| ( a b c )", ["a"], ["a", "b", "c"], True),
+ ("|| ( a b c )", ["b"], ["a", "b", "c"], True),
+ ("|| ( a b c )", ["c"], ["a", "b", "c"], True),
+
+ ("^^ ( a b c )", ["a"], ["a", "b", "c"], True),
+ ("^^ ( a b c )", ["b"], ["a", "b", "c"], True),
+ ("^^ ( a b c )", ["c"], ["a", "b", "c"], True),
+ ("^^ ( a b c )", ["a", "b"], ["a", "b", "c"], False),
+ ("^^ ( a b c )", ["b", "c"], ["a", "b", "c"], False),
+ ("^^ ( a b c )", ["a", "c"], ["a", "b", "c"], False),
+ ("^^ ( a b c )", ["a", "b", "c"], ["a", "b", "c"], False),
+
+ ("a? ( ^^ ( b c ) )", [], ["a", "b", "c"], True),
+ ("a? ( ^^ ( b c ) )", ["a"], ["a", "b", "c"], False),
+ ("a? ( ^^ ( b c ) )", ["b"], ["a", "b", "c"], True),
+ ("a? ( ^^ ( b c ) )", ["c"], ["a", "b", "c"], True),
+ ("a? ( ^^ ( b c ) )", ["a", "b"], ["a", "b", "c"], True),
+ ("a? ( ^^ ( b c ) )", ["a", "b", "c"], ["a", "b", "c"], False),
+
+ ("^^ ( a? ( !b ) !c? ( d ) )", [], ["a", "b", "c", "d"], False),
+ ("^^ ( a? ( !b ) !c? ( d ) )", ["a"], ["a", "b", "c", "d"], True),
+ ("^^ ( a? ( !b ) !c? ( d ) )", ["c"], ["a", "b", "c", "d"], True),
+ ("^^ ( a? ( !b ) !c? ( d ) )", ["a", "c"], ["a", "b", "c", "d"], True),
+ ("^^ ( a? ( !b ) !c? ( d ) )", ["a", "b", "c"], ["a", "b", "c", "d"], False),
+ ("^^ ( a? ( !b ) !c? ( d ) )", ["a", "b", "d"], ["a", "b", "c", "d"], True),
+ ("^^ ( a? ( !b ) !c? ( d ) )", ["a", "b", "d"], ["a", "b", "c", "d"], True),
+ ("^^ ( a? ( !b ) !c? ( d ) )", ["a", "d"], ["a", "b", "c", "d"], False),
+
+ ("|| ( ^^ ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"], False),
+ ("|| ( ^^ ( a b ) ^^ ( b c ) )", ["a"], ["a", "b", "c"], True),
+ ("|| ( ^^ ( a b ) ^^ ( b c ) )", ["b"], ["a", "b", "c"], True),
+ ("|| ( ^^ ( a b ) ^^ ( b c ) )", ["c"], ["a", "b", "c"], True),
+ ("|| ( ^^ ( a b ) ^^ ( b c ) )", ["a", "b"], ["a", "b", "c"], True),
+ ("|| ( ^^ ( a b ) ^^ ( b c ) )", ["a", "c"], ["a", "b", "c"], True),
+ ("|| ( ^^ ( a b ) ^^ ( b c ) )", ["b", "c"], ["a", "b", "c"], True),
+ ("|| ( ^^ ( a b ) ^^ ( b c ) )", ["a", "b", "c"], ["a", "b", "c"], False),
+
+ ("^^ ( || ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"], False),
+ ("^^ ( || ( a b ) ^^ ( b c ) )", ["a"], ["a", "b", "c"], True),
+ ("^^ ( || ( a b ) ^^ ( b c ) )", ["b"], ["a", "b", "c"], False),
+ ("^^ ( || ( a b ) ^^ ( b c ) )", ["c"], ["a", "b", "c"], True),
+ ("^^ ( || ( a b ) ^^ ( b c ) )", ["a", "b"], ["a", "b", "c"], False),
+ ("^^ ( || ( a b ) ^^ ( b c ) )", ["a", "c"], ["a", "b", "c"], False),
+ ("^^ ( || ( a b ) ^^ ( b c ) )", ["b", "c"], ["a", "b", "c"], True),
+ ("^^ ( || ( a b ) ^^ ( b c ) )", ["a", "b", "c"], ["a", "b", "c"], True),
+
+ ("|| ( ( a b ) c )", ["a", "b", "c"], ["a", "b", "c"], True),
+ ("|| ( ( a b ) c )", ["b", "c"], ["a", "b", "c"], True),
+ ("|| ( ( a b ) c )", ["a", "c"], ["a", "b", "c"], True),
+ ("|| ( ( a b ) c )", ["a", "b"], ["a", "b", "c"], True),
+ ("|| ( ( a b ) c )", ["a"], ["a", "b", "c"], False),
+ ("|| ( ( a b ) c )", ["b"], ["a", "b", "c"], False),
+ ("|| ( ( a b ) c )", ["c"], ["a", "b", "c"], True),
+ ("|| ( ( a b ) c )", [], ["a", "b", "c"], False),
+
+ ("^^ ( ( a b ) c )", ["a", "b", "c"], ["a", "b", "c"], False),
+ ("^^ ( ( a b ) c )", ["b", "c"], ["a", "b", "c"], True),
+ ("^^ ( ( a b ) c )", ["a", "c"], ["a", "b", "c"], True),
+ ("^^ ( ( a b ) c )", ["a", "b"], ["a", "b", "c"], True),
+ ("^^ ( ( a b ) c )", ["a"], ["a", "b", "c"], False),
+ ("^^ ( ( a b ) c )", ["b"], ["a", "b", "c"], False),
+ ("^^ ( ( a b ) c )", ["c"], ["a", "b", "c"], True),
+ ("^^ ( ( a b ) c )", [], ["a", "b", "c"], False),
)
test_cases_xfail = (
- ( "^^ ( || ( a b ) ^^ ( b c ) )", [], ["a", "b"]),
- ( "^^ ( || ( a b ) ^^ ( b c )", [], ["a", "b", "c"]),
- ( "^^( || ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"]),
- ( "^^ || ( a b ) ^^ ( b c )", [], ["a", "b", "c"]),
- ( "^^ ( ( || ) ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"]),
- ( "^^ ( || ( a b ) ) ^^ ( b c ) )", [], ["a", "b", "c"]),
+ ("^^ ( || ( a b ) ^^ ( b c ) )", [], ["a", "b"]),
+ ("^^ ( || ( a b ) ^^ ( b c )", [], ["a", "b", "c"]),
+ ("^^( || ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"]),
+ ("^^ || ( a b ) ^^ ( b c )", [], ["a", "b", "c"]),
+ ("^^ ( ( || ) ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"]),
+ ("^^ ( || ( a b ) ) ^^ ( b c ) )", [], ["a", "b", "c"]),
+ )
+
+ test_cases_xfail_eapi = (
+ ("?? ( a b )", [], ["a", "b"], "4"),
)
for required_use, use, iuse, expected in test_cases:
@@ -110,6 +119,11 @@ class TestCheckRequiredUse(TestCase):
self.assertRaisesMsg(required_use + ", USE = " + " ".join(use), \
InvalidDependString, check_required_use, required_use, use, iuse.__contains__)
+ for required_use, use, iuse, eapi in test_cases_xfail_eapi:
+ self.assertRaisesMsg(required_use + ", USE = " + " ".join(use), \
+ InvalidDependString, check_required_use, required_use, use,
+ iuse.__contains__, eapi=eapi)
+
def testCheckRequiredUseFilterSatisfied(self):
"""
Test filtering of satisfied parts of REQUIRED_USE,
diff --git a/pym/portage/tests/dep/testStandalone.py b/pym/portage/tests/dep/testStandalone.py
index f03f2d508..88e3f39f8 100644
--- a/pym/portage/tests/dep/testStandalone.py
+++ b/pym/portage/tests/dep/testStandalone.py
@@ -12,20 +12,20 @@ class TestStandalone(TestCase):
def testCPVequal(self):
test_cases = (
- ( "sys-apps/portage-2.1","sys-apps/portage-2.1", True ),
- ( "sys-apps/portage-2.1","sys-apps/portage-2.0", False ),
- ( "sys-apps/portage-2.1","sys-apps/portage-2.1-r1", False ),
- ( "sys-apps/portage-2.1-r1","sys-apps/portage-2.1", False ),
- ( "sys-apps/portage-2.1_alpha3","sys-apps/portage-2.1", False ),
- ( "sys-apps/portage-2.1_alpha3_p6","sys-apps/portage-2.1_alpha3", False ),
- ( "sys-apps/portage-2.1_alpha3","sys-apps/portage-2.1", False ),
- ( "sys-apps/portage-2.1","sys-apps/X-2.1", False ),
- ( "sys-apps/portage-2.1","portage-2.1", False ),
+ ("sys-apps/portage-2.1", "sys-apps/portage-2.1", True),
+ ("sys-apps/portage-2.1", "sys-apps/portage-2.0", False),
+ ("sys-apps/portage-2.1", "sys-apps/portage-2.1-r1", False),
+ ("sys-apps/portage-2.1-r1", "sys-apps/portage-2.1", False),
+ ("sys-apps/portage-2.1_alpha3", "sys-apps/portage-2.1", False),
+ ("sys-apps/portage-2.1_alpha3_p6", "sys-apps/portage-2.1_alpha3", False),
+ ("sys-apps/portage-2.1_alpha3", "sys-apps/portage-2.1", False),
+ ("sys-apps/portage-2.1", "sys-apps/X-2.1", False),
+ ("sys-apps/portage-2.1", "portage-2.1", False),
)
-
+
test_cases_xfail = (
- ( "sys-apps/portage","sys-apps/portage" ),
- ( "sys-apps/portage-2.1-6","sys-apps/portage-2.1-6" ),
+ ("sys-apps/portage", "sys-apps/portage"),
+ ("sys-apps/portage-2.1-6", "sys-apps/portage-2.1-6"),
)
for cpv1, cpv2, expected_result in test_cases:
@@ -33,5 +33,5 @@ class TestStandalone(TestCase):
"cpvequal('%s', '%s') != %s" % (cpv1, cpv2, expected_result))
for cpv1, cpv2 in test_cases_xfail:
- self.assertRaisesMsg("cpvequal("+cpv1+", "+cpv2+")", \
+ self.assertRaisesMsg("cpvequal(%s, %s)" % (cpv1, cpv2),
PortageException, cpvequal, cpv1, cpv2)
diff --git a/pym/portage/tests/dep/test_best_match_to_list.py b/pym/portage/tests/dep/test_best_match_to_list.py
index 8a1403828..586c8bc50 100644
--- a/pym/portage/tests/dep/test_best_match_to_list.py
+++ b/pym/portage/tests/dep/test_best_match_to_list.py
@@ -1,5 +1,5 @@
# test_best_match_to_list.py -- Portage Unit Testing Functionality
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from itertools import permutations
@@ -28,25 +28,29 @@ class Test_best_match_to_list(TestCase):
def testBest_match_to_list(self):
tests = [
- ("dev-libs/A-4", [Atom(">=dev-libs/A-3"), Atom(">=dev-libs/A-2")], \
- [Atom(">=dev-libs/A-3"), Atom(">=dev-libs/A-2")], True),
- ("dev-libs/A-4", [Atom("<=dev-libs/A-5"), Atom("<=dev-libs/A-6")], \
- [Atom("<=dev-libs/A-5"), Atom("<=dev-libs/A-6")], True),
- ("dev-libs/A-1", [Atom("dev-libs/A"), Atom("=dev-libs/A-1")], \
- [Atom("=dev-libs/A-1"), Atom("dev-libs/A")], True),
- ("dev-libs/A-1", [Atom("dev-libs/B"), Atom("=dev-libs/A-1:0")], \
- [Atom("=dev-libs/A-1:0")], True),
- ("dev-libs/A-1", [Atom("dev-libs/*", allow_wildcard=True), Atom("=dev-libs/A-1:0")], \
- [Atom("=dev-libs/A-1:0"), Atom("dev-libs/*", allow_wildcard=True)], True),
- ("dev-libs/A-4.9999-r1", [Atom("dev-libs/*", allow_wildcard=True), Atom("=*/*-*9999*", allow_wildcard=True)], \
- [Atom("=*/*-*9999*", allow_wildcard=True), Atom("dev-libs/*", allow_wildcard=True)], True),
- ("dev-libs/A-1:0", [Atom("dev-*/*", allow_wildcard=True), Atom("dev-*/*:0", allow_wildcard=True),\
- Atom("dev-libs/A"), Atom("<=dev-libs/A-2"), Atom("dev-libs/A:0"), \
- Atom("=dev-libs/A-1*"), Atom("~dev-libs/A-1"), Atom("=dev-libs/A-1")], \
- [Atom("=dev-libs/A-1"), Atom("~dev-libs/A-1"), Atom("=dev-libs/A-1*"), \
- Atom("dev-libs/A:0"), Atom("<=dev-libs/A-2"), Atom("dev-libs/A"), \
- Atom("dev-*/*:0", allow_wildcard=True), Atom("dev-*/*", allow_wildcard=True)], False)
- ]
+ ("dev-libs/A-4", [Atom(">=dev-libs/A-3"), Atom(">=dev-libs/A-2")],
+ [Atom(">=dev-libs/A-3"), Atom(">=dev-libs/A-2")], True),
+ ("dev-libs/A-4", [Atom("<=dev-libs/A-5"), Atom("<=dev-libs/A-6")],
+ [Atom("<=dev-libs/A-5"), Atom("<=dev-libs/A-6")], True),
+ ("dev-libs/A-1", [Atom("dev-libs/A"), Atom("=dev-libs/A-1")],
+ [Atom("=dev-libs/A-1"), Atom("dev-libs/A")], True),
+ ("dev-libs/A-1", [Atom("dev-libs/B"), Atom("=dev-libs/A-1:0")],
+ [Atom("=dev-libs/A-1:0")], True),
+ ("dev-libs/A-1", [Atom("dev-libs/*", allow_wildcard=True), Atom("=dev-libs/A-1:0")],
+ [Atom("=dev-libs/A-1:0"), Atom("dev-libs/*", allow_wildcard=True)], True),
+ ("dev-libs/A-4.9999-r1", [Atom("dev-libs/*", allow_wildcard=True), Atom("=*/*-*9999*", allow_wildcard=True)],
+ [Atom("=*/*-*9999*", allow_wildcard=True), Atom("dev-libs/*", allow_wildcard=True)], True),
+ ("dev-libs/A-4_beta-r1", [Atom("dev-libs/*", allow_wildcard=True), Atom("=*/*-*_beta*", allow_wildcard=True)],
+ [Atom("=*/*-*_beta*", allow_wildcard=True), Atom("dev-libs/*", allow_wildcard=True)], True),
+ ("dev-libs/A-4_beta1-r1", [Atom("dev-libs/*", allow_wildcard=True), Atom("=*/*-*_beta*", allow_wildcard=True)],
+ [Atom("=*/*-*_beta*", allow_wildcard=True), Atom("dev-libs/*", allow_wildcard=True)], True),
+ ("dev-libs/A-1:0", [Atom("dev-*/*", allow_wildcard=True), Atom("dev-*/*:0", allow_wildcard=True),
+ Atom("dev-libs/A"), Atom("<=dev-libs/A-2"), Atom("dev-libs/A:0"),
+ Atom("=dev-libs/A-1*"), Atom("~dev-libs/A-1"), Atom("=dev-libs/A-1")],
+ [Atom("=dev-libs/A-1"), Atom("~dev-libs/A-1"), Atom("=dev-libs/A-1*"),
+ Atom("dev-libs/A:0"), Atom("<=dev-libs/A-2"), Atom("dev-libs/A"),
+ Atom("dev-*/*:0", allow_wildcard=True), Atom("dev-*/*", allow_wildcard=True)], False)
+ ]
for pkg, atom_list, result, all_permutations in tests:
if all_permutations:
diff --git a/pym/portage/tests/dep/test_dep_getcpv.py b/pym/portage/tests/dep/test_dep_getcpv.py
index 8a0a8aa2f..79c1514a1 100644
--- a/pym/portage/tests/dep/test_dep_getcpv.py
+++ b/pym/portage/tests/dep/test_dep_getcpv.py
@@ -10,12 +10,14 @@ class DepGetCPV(TestCase):
"""
def testDepGetCPV(self):
-
- prefix_ops = ["<", ">", "=", "~", "<=",
- ">=", "!=", "!<", "!>", "!~"]
- bad_prefix_ops = [ ">~", "<~", "~>", "~<" ]
- postfix_ops = [ ("=", "*"), ]
+ prefix_ops = [
+ "<", ">", "=", "~", "<=",
+ ">=", "!=", "!<", "!>", "!~"
+ ]
+
+ bad_prefix_ops = [">~", "<~", "~>", "~<"]
+ postfix_ops = [("=", "*"),]
cpvs = ["sys-apps/portage-2.1", "sys-apps/portage-2.1",
"sys-apps/portage-2.1"]
@@ -26,10 +28,10 @@ class DepGetCPV(TestCase):
mycpv = prefix + cpv
if slot:
mycpv += slot
- self.assertEqual( dep_getcpv( mycpv ), cpv )
+ self.assertEqual(dep_getcpv(mycpv), cpv)
for prefix, postfix in postfix_ops:
mycpv = prefix + cpv + postfix
if slot:
mycpv += slot
- self.assertEqual( dep_getcpv( mycpv ), cpv )
+ self.assertEqual(dep_getcpv(mycpv), cpv)
diff --git a/pym/portage/tests/dep/test_dep_getrepo.py b/pym/portage/tests/dep/test_dep_getrepo.py
index 78ead8cee..6c17d3cf7 100644
--- a/pym/portage/tests/dep/test_dep_getrepo.py
+++ b/pym/portage/tests/dep/test_dep_getrepo.py
@@ -11,9 +11,9 @@ class DepGetRepo(TestCase):
def testDepGetRepo(self):
repo_char = "::"
- repos = ( "a", "repo-name", "repo_name", "repo123", None )
+ repos = ("a", "repo-name", "repo_name", "repo123", None)
cpvs = ["sys-apps/portage"]
- versions = ["2.1.1","2.1-r1", None]
+ versions = ["2.1.1", "2.1-r1", None]
uses = ["[use]", None]
for cpv in cpvs:
for version in versions:
@@ -26,4 +26,4 @@ class DepGetRepo(TestCase):
pkg = pkg + repo_char + repo
if use:
pkg = pkg + use
- self.assertEqual( dep_getrepo( pkg ), repo )
+ self.assertEqual(dep_getrepo(pkg), repo)
diff --git a/pym/portage/tests/dep/test_dep_getslot.py b/pym/portage/tests/dep/test_dep_getslot.py
index 206cecc8c..84828648b 100644
--- a/pym/portage/tests/dep/test_dep_getslot.py
+++ b/pym/portage/tests/dep/test_dep_getslot.py
@@ -12,9 +12,9 @@ class DepGetSlot(TestCase):
def testDepGetSlot(self):
slot_char = ":"
- slots = ( "a", "1.2", "1", "IloveVapier", None )
+ slots = ("a", "1.2", "1", "IloveVapier", None)
cpvs = ["sys-apps/portage"]
- versions = ["2.1.1","2.1-r1"]
+ versions = ["2.1.1", "2.1-r1"]
for cpv in cpvs:
for version in versions:
for slot in slots:
@@ -22,7 +22,7 @@ class DepGetSlot(TestCase):
if version:
mycpv = '=' + mycpv + '-' + version
if slot is not None:
- self.assertEqual( dep_getslot(
- mycpv + slot_char + slot ), slot )
+ self.assertEqual(dep_getslot(
+ mycpv + slot_char + slot), slot)
else:
- self.assertEqual( dep_getslot( mycpv ), slot )
+ self.assertEqual(dep_getslot(mycpv), slot)
diff --git a/pym/portage/tests/dep/test_dep_getusedeps.py b/pym/portage/tests/dep/test_dep_getusedeps.py
index d2494f7b3..cd58eab35 100644
--- a/pym/portage/tests/dep/test_dep_getusedeps.py
+++ b/pym/portage/tests/dep/test_dep_getusedeps.py
@@ -24,12 +24,12 @@ class DepGetUseDeps(TestCase):
cpv += ":" + slot
if isinstance(use, tuple):
cpv += "[%s]" % (",".join(use),)
- self.assertEqual( dep_getusedeps(
- cpv ), use )
+ self.assertEqual(dep_getusedeps(
+ cpv), use)
else:
if len(use):
- self.assertEqual( dep_getusedeps(
- cpv + "[" + use + "]" ), (use,) )
+ self.assertEqual(dep_getusedeps(
+ cpv + "[" + use + "]"), (use,))
else:
- self.assertEqual( dep_getusedeps(
- cpv + "[" + use + "]" ), () )
+ self.assertEqual(dep_getusedeps(
+ cpv + "[" + use + "]"), ())
diff --git a/pym/portage/tests/dep/test_get_operator.py b/pym/portage/tests/dep/test_get_operator.py
index 4f9848f5d..5076e2107 100644
--- a/pym/portage/tests/dep/test_get_operator.py
+++ b/pym/portage/tests/dep/test_get_operator.py
@@ -10,24 +10,28 @@ class GetOperator(TestCase):
def testGetOperator(self):
# get_operator does not validate operators
- tests = [ ( "~", "~" ), ( "=", "=" ), ( ">", ">" ),
- ( ">=", ">=" ), ( "<=", "<=" ),
+ tests = [
+ ("~", "~"),
+ ("=", "="),
+ (">", ">"),
+ (">=", ">="),
+ ("<=", "<="),
]
test_cpvs = ["sys-apps/portage-2.1"]
- slots = [ None,"1","linux-2.5.6" ]
+ slots = [None, "1", "linux-2.5.6"]
for cpv in test_cpvs:
for test in tests:
for slot in slots:
atom = cpv[:]
if slot:
atom += ":" + slot
- result = get_operator( test[0] + atom )
- self.assertEqual( result, test[1],
- msg="get_operator(%s) != %s" % (test[0] + atom, test[1]) )
+ result = get_operator(test[0] + atom)
+ self.assertEqual(result, test[1],
+ msg="get_operator(%s) != %s" % (test[0] + atom, test[1]))
- result = get_operator( "sys-apps/portage" )
- self.assertEqual( result, None )
+ result = get_operator("sys-apps/portage")
+ self.assertEqual(result, None)
- result = get_operator( "=sys-apps/portage-2.1*" )
- self.assertEqual( result , "=*" )
+ result = get_operator("=sys-apps/portage-2.1*")
+ self.assertEqual(result , "=*")
diff --git a/pym/portage/tests/dep/test_get_required_use_flags.py b/pym/portage/tests/dep/test_get_required_use_flags.py
index 06f81106a..90e096c78 100644
--- a/pym/portage/tests/dep/test_get_required_use_flags.py
+++ b/pym/portage/tests/dep/test_get_required_use_flags.py
@@ -1,4 +1,4 @@
-# Copyright 2010 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage.tests import TestCase
@@ -13,6 +13,8 @@ class TestCheckRequiredUse(TestCase):
("|| ( a b c )", ["a", "b", "c"]),
("^^ ( a b c )", ["a", "b", "c"]),
+ ("?? ( a b c )", ["a", "b", "c"]),
+ ("?? ( )", []),
("|| ( a b ^^ ( d e f ) )", ["a", "b", "d", "e", "f"]),
("^^ ( a b || ( d e f ) )", ["a", "b", "d", "e", "f"]),
diff --git a/pym/portage/tests/dep/test_isjustname.py b/pym/portage/tests/dep/test_isjustname.py
index c16fb5493..9b95bcd0f 100644
--- a/pym/portage/tests/dep/test_isjustname.py
+++ b/pym/portage/tests/dep/test_isjustname.py
@@ -9,16 +9,16 @@ class IsJustName(TestCase):
def testIsJustName(self):
- cats = ( "", "sys-apps/", "foo/", "virtual/" )
- pkgs = ( "portage", "paludis", "pkgcore", "notARealPkg" )
- vers = ( "", "-2.0-r3", "-1.0_pre2", "-3.1b" )
+ cats = ("", "sys-apps/", "foo/", "virtual/")
+ pkgs = ("portage", "paludis", "pkgcore", "notARealPkg")
+ vers = ("", "-2.0-r3", "-1.0_pre2", "-3.1b")
for pkg in pkgs:
for cat in cats:
for ver in vers:
if len(ver):
- self.assertFalse( isjustname( cat + pkg + ver ),
- msg="isjustname(%s) is True!" % (cat + pkg + ver) )
+ self.assertFalse(isjustname(cat + pkg + ver),
+ msg="isjustname(%s) is True!" % (cat + pkg + ver))
else:
- self.assertTrue( isjustname( cat + pkg + ver ),
- msg="isjustname(%s) is False!" % (cat + pkg + ver) )
+ self.assertTrue(isjustname(cat + pkg + ver),
+ msg="isjustname(%s) is False!" % (cat + pkg + ver))
diff --git a/pym/portage/tests/dep/test_isvalidatom.py b/pym/portage/tests/dep/test_isvalidatom.py
index abcec755e..67ba60398 100644
--- a/pym/portage/tests/dep/test_isvalidatom.py
+++ b/pym/portage/tests/dep/test_isvalidatom.py
@@ -1,4 +1,4 @@
-# Copyright 2006-2010 Gentoo Foundation
+# Copyright 2006-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage.tests import TestCase
@@ -26,7 +26,7 @@ class IsValidAtom(TestCase):
IsValidAtomTestCase("~sys-apps/portage-2.1", True),
IsValidAtomTestCase("sys-apps/portage:foo", True),
IsValidAtomTestCase("sys-apps/portage-2.1:foo", False),
- IsValidAtomTestCase( "sys-apps/portage-2.1:", False),
+ IsValidAtomTestCase("sys-apps/portage-2.1:", False),
IsValidAtomTestCase("sys-apps/portage-2.1:", False),
IsValidAtomTestCase("sys-apps/portage-2.1:[foo]", False),
IsValidAtomTestCase("sys-apps/portage", True),
@@ -141,8 +141,11 @@ class IsValidAtom(TestCase):
IsValidAtomTestCase("virtual/ffmpeg:=", True),
IsValidAtomTestCase("virtual/ffmpeg:0=", True),
IsValidAtomTestCase("virtual/ffmpeg:*", True),
- IsValidAtomTestCase("virtual/ffmpeg:0*", True),
+ IsValidAtomTestCase("virtual/ffmpeg:0*", False),
IsValidAtomTestCase("virtual/ffmpeg:0", True),
+
+ # Wildcard atoms
+ IsValidAtomTestCase("*/portage-2.1", False, allow_wildcard=True),
)
for test_case in test_cases:
@@ -150,6 +153,6 @@ class IsValidAtom(TestCase):
atom_type = "valid"
else:
atom_type = "invalid"
- self.assertEqual( bool(isvalidatom(test_case.atom, allow_wildcard=test_case.allow_wildcard, \
+ self.assertEqual(bool(isvalidatom(test_case.atom, allow_wildcard=test_case.allow_wildcard,
allow_repo=test_case.allow_repo)), test_case.expected,
- msg="isvalidatom(%s) != %s" % ( test_case.atom, test_case.expected ) )
+ msg="isvalidatom(%s) != %s" % (test_case.atom, test_case.expected))
diff --git a/pym/portage/tests/dep/test_match_from_list.py b/pym/portage/tests/dep/test_match_from_list.py
index d5d718f74..75ac8fd80 100644
--- a/pym/portage/tests/dep/test_match_from_list.py
+++ b/pym/portage/tests/dep/test_match_from_list.py
@@ -1,4 +1,4 @@
-# Copyright 2006-2012 Gentoo Foundation
+# Copyright 2006-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import sys
@@ -7,6 +7,7 @@ from portage.dep import Atom, match_from_list, _repo_separator
from portage.versions import catpkgsplit, _pkg_str
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
class Package(object):
@@ -17,14 +18,14 @@ class Package(object):
atom = Atom(atom, allow_repo=True)
self.cp = atom.cp
slot = atom.slot
- if atom.slot_abi:
- slot = "%s/%s" % (slot, atom.slot_abi)
+ if atom.sub_slot:
+ slot = "%s/%s" % (slot, atom.sub_slot)
if not slot:
slot = '0'
self.cpv = _pkg_str(atom.cpv, slot=slot, repo=atom.repo)
self.cpv_split = catpkgsplit(self.cpv)
self.slot = self.cpv.slot
- self.slot_abi = self.cpv.slot_abi
+ self.sub_slot = self.cpv.sub_slot
self.repo = atom.repo
if atom.use:
self.use = self._use_class(atom.use.enabled)
@@ -53,76 +54,79 @@ class Test_match_from_list(TestCase):
def testMatch_from_list(self):
tests = (
- ("=sys-apps/portage-45*", [], [] ),
- ("=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
- ("!=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
- ("!!=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
- ("=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
- ("=sys-apps/portage-045", ["sys-apps/portage-046"], [] ),
- ("~sys-apps/portage-045", ["sys-apps/portage-045-r1"], ["sys-apps/portage-045-r1"] ),
- ("~sys-apps/portage-045", ["sys-apps/portage-046-r1"], [] ),
- ("<=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
- ("<=sys-apps/portage-045", ["sys-apps/portage-046"], [] ),
- ("<sys-apps/portage-046", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
- ("<sys-apps/portage-046", ["sys-apps/portage-046"], [] ),
- (">=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
- (">=sys-apps/portage-047", ["sys-apps/portage-046-r1"], [] ),
- (">sys-apps/portage-044", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
- (">sys-apps/portage-047", ["sys-apps/portage-046-r1"], [] ),
- ("sys-apps/portage:0", [Package("=sys-apps/portage-045:0")], ["sys-apps/portage-045"] ),
- ("sys-apps/portage:0", [Package("=sys-apps/portage-045:1")], [] ),
+ ("=sys-apps/portage-45*", [], []),
+ ("=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"]),
+ ("!=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"]),
+ ("!!=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"]),
+ ("=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"]),
+ ("=sys-apps/portage-045", ["sys-apps/portage-046"], []),
+ ("~sys-apps/portage-045", ["sys-apps/portage-045-r1"], ["sys-apps/portage-045-r1"]),
+ ("~sys-apps/portage-045", ["sys-apps/portage-046-r1"], []),
+ ("<=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"]),
+ ("<=sys-apps/portage-045", ["sys-apps/portage-046"], []),
+ ("<sys-apps/portage-046", ["sys-apps/portage-045"], ["sys-apps/portage-045"]),
+ ("<sys-apps/portage-046", ["sys-apps/portage-046"], []),
+ (">=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"]),
+ (">=sys-apps/portage-047", ["sys-apps/portage-046-r1"], []),
+ (">sys-apps/portage-044", ["sys-apps/portage-045"], ["sys-apps/portage-045"]),
+ (">sys-apps/portage-047", ["sys-apps/portage-046-r1"], []),
+ ("sys-apps/portage:0", [Package("=sys-apps/portage-045:0")], ["sys-apps/portage-045"]),
+ ("sys-apps/portage:0", [Package("=sys-apps/portage-045:1")], []),
+ ("=cat/pkg-1-r1*", ["cat/pkg-1_alpha1"], []),
+ ("=cat/pkg-1-r1*", ["cat/pkg-1-r11"], ["cat/pkg-1-r11"]),
+ ("=cat/pkg-1-r1*", ["cat/pkg-01-r11"], ["cat/pkg-01-r11"]),
+ ("=cat/pkg-01-r1*", ["cat/pkg-1-r11"], ["cat/pkg-1-r11"]),
+ ("=cat/pkg-01-r1*", ["cat/pkg-001-r11"], ["cat/pkg-001-r11"]),
("=sys-fs/udev-1*", ["sys-fs/udev-123"], ["sys-fs/udev-123"]),
- ("=sys-fs/udev-4*", ["sys-fs/udev-456"], ["sys-fs/udev-456"] ),
- ("*/*", ["sys-fs/udev-456"], ["sys-fs/udev-456"] ),
- ("*/*:0", ["sys-fs/udev-456:0"], ["sys-fs/udev-456:0"] ),
- ("*/*:1", ["sys-fs/udev-456:0"], [] ),
- ("sys-fs/*", ["sys-fs/udev-456"], ["sys-fs/udev-456"] ),
- ("*/udev", ["sys-fs/udev-456"], ["sys-fs/udev-456"] ),
- ("=sys-apps/portage-2*", ["sys-apps/portage-2.1"], ["sys-apps/portage-2.1"] ),
- ("=sys-apps/portage-2.1*", ["sys-apps/portage-2.1.2"], ["sys-apps/portage-2.1.2"] ),
- ("dev-libs/*", ["sys-apps/portage-2.1.2"], [] ),
- ("*/tar", ["sys-apps/portage-2.1.2"], [] ),
- ("*/*", ["dev-libs/A-1", "dev-libs/B-1"], ["dev-libs/A-1", "dev-libs/B-1"] ),
- ("dev-libs/*", ["dev-libs/A-1", "sci-libs/B-1"], ["dev-libs/A-1"] ),
+ ("=sys-fs/udev-4*", ["sys-fs/udev-456"], ["sys-fs/udev-456"]),
+ ("*/*", ["sys-fs/udev-456"], ["sys-fs/udev-456"]),
+ ("*/*:0", ["sys-fs/udev-456:0"], ["sys-fs/udev-456:0"]),
+ ("*/*:1", ["sys-fs/udev-456:0"], []),
+ ("sys-fs/*", ["sys-fs/udev-456"], ["sys-fs/udev-456"]),
+ ("*/udev", ["sys-fs/udev-456"], ["sys-fs/udev-456"]),
+ ("=sys-apps/portage-2*", ["sys-apps/portage-2.1"], ["sys-apps/portage-2.1"]),
+ ("=sys-apps/portage-2.1*", ["sys-apps/portage-2.1.2"], ["sys-apps/portage-2.1.2"]),
+ ("dev-libs/*", ["sys-apps/portage-2.1.2"], []),
+ ("*/tar", ["sys-apps/portage-2.1.2"], []),
+ ("*/*", ["dev-libs/A-1", "dev-libs/B-1"], ["dev-libs/A-1", "dev-libs/B-1"]),
+ ("dev-libs/*", ["dev-libs/A-1", "sci-libs/B-1"], ["dev-libs/A-1"]),
- ("dev-libs/A[foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], ["dev-libs/A-1"] ),
- ("dev-libs/A[-foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], ["dev-libs/A-2"] ),
- ("dev-libs/A[-foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2")], [] ),
- ("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], [] ),
- ("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo,bar]")], [] ),
- ("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[foo,bar]")], ["dev-libs/A-2"] ),
- ("dev-libs/A[foo,bar(+)]", [Package("=dev-libs/A-1[-foo]"), Package("=dev-libs/A-2[foo]")], ["dev-libs/A-2"] ),
- ("dev-libs/A[foo,bar(-)]", [Package("=dev-libs/A-1[-foo]"), Package("=dev-libs/A-2[foo]")], [] ),
- ("dev-libs/A[foo,-bar(-)]", [Package("=dev-libs/A-1[-foo,bar]"), Package("=dev-libs/A-2[foo]")], ["dev-libs/A-2"] ),
+ ("dev-libs/A[foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], ["dev-libs/A-1"]),
+ ("dev-libs/A[-foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], ["dev-libs/A-2"]),
+ ("dev-libs/A[-foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2")], []),
+ ("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], []),
+ ("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo,bar]")], []),
+ ("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[foo,bar]")], ["dev-libs/A-2"]),
+ ("dev-libs/A[foo,bar(+)]", [Package("=dev-libs/A-1[-foo]"), Package("=dev-libs/A-2[foo]")], ["dev-libs/A-2"]),
+ ("dev-libs/A[foo,bar(-)]", [Package("=dev-libs/A-1[-foo]"), Package("=dev-libs/A-2[foo]")], []),
+ ("dev-libs/A[foo,-bar(-)]", [Package("=dev-libs/A-1[-foo,bar]"), Package("=dev-libs/A-2[foo]")], ["dev-libs/A-2"]),
- ("dev-libs/A::repo1", [Package("=dev-libs/A-1::repo1"), Package("=dev-libs/A-1::repo2")], ["dev-libs/A-1::repo1"] ),
- ("dev-libs/A::repo2", [Package("=dev-libs/A-1::repo1"), Package("=dev-libs/A-1::repo2")], ["dev-libs/A-1::repo2"] ),
- ("dev-libs/A::repo2[foo]", [Package("=dev-libs/A-1::repo1[foo]"), Package("=dev-libs/A-1::repo2[-foo]")], [] ),
- ("dev-libs/A::repo2[foo]", [Package("=dev-libs/A-1::repo1[-foo]"), Package("=dev-libs/A-1::repo2[foo]")], ["dev-libs/A-1::repo2"] ),
- ("dev-libs/A:1::repo2[foo]", [Package("=dev-libs/A-1:1::repo1"), Package("=dev-libs/A-1:2::repo2")], [] ),
- ("dev-libs/A:1::repo2[foo]", [Package("=dev-libs/A-1:2::repo1"), Package("=dev-libs/A-1:1::repo2[foo]")], ["dev-libs/A-1::repo2"] ),
+ ("dev-libs/A::repo1", [Package("=dev-libs/A-1::repo1"), Package("=dev-libs/A-1::repo2")], ["dev-libs/A-1::repo1"]),
+ ("dev-libs/A::repo2", [Package("=dev-libs/A-1::repo1"), Package("=dev-libs/A-1::repo2")], ["dev-libs/A-1::repo2"]),
+ ("dev-libs/A::repo2[foo]", [Package("=dev-libs/A-1::repo1[foo]"), Package("=dev-libs/A-1::repo2[-foo]")], []),
+ ("dev-libs/A::repo2[foo]", [Package("=dev-libs/A-1::repo1[-foo]"), Package("=dev-libs/A-1::repo2[foo]")], ["dev-libs/A-1::repo2"]),
+ ("dev-libs/A:1::repo2[foo]", [Package("=dev-libs/A-1:1::repo1"), Package("=dev-libs/A-1:2::repo2")], []),
+ ("dev-libs/A:1::repo2[foo]", [Package("=dev-libs/A-1:2::repo1"), Package("=dev-libs/A-1:1::repo2[foo]")], ["dev-libs/A-1::repo2"]),
- ("virtual/ffmpeg:0/53", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"] ),
- ("virtual/ffmpeg:0/53=", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"] ),
- ("virtual/ffmpeg:0/52", [Package("=virtual/ffmpeg-0.10.3:0/53")], [] ),
- ("virtual/ffmpeg:=", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"] ),
- ("virtual/ffmpeg:0=", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"] ),
- ("virtual/ffmpeg:*", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"] ),
- ("virtual/ffmpeg:0*", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"] ),
- ("virtual/ffmpeg:0", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"] ),
+ ("virtual/ffmpeg:0/53", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]),
+ ("virtual/ffmpeg:0/53=", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]),
+ ("virtual/ffmpeg:0/52", [Package("=virtual/ffmpeg-0.10.3:0/53")], []),
+ ("virtual/ffmpeg:=", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]),
+ ("virtual/ffmpeg:0=", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]),
+ ("virtual/ffmpeg:*", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]),
+ ("virtual/ffmpeg:0", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]),
- ("sys-libs/db:4.8/4.8", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"] ),
- ("sys-libs/db:4.8/4.8=", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"] ),
- ("sys-libs/db:4.8=", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"] ),
- ("sys-libs/db:4.8*", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"] ),
- ("sys-libs/db:*", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"] ),
- ("sys-libs/db:4.8/0", [Package("=sys-libs/db-4.8.30:4.8")], [] ),
- ("sys-libs/db:4.8/0=", [Package("=sys-libs/db-4.8.30:4.8")], [] ),
+ ("sys-libs/db:4.8/4.8", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"]),
+ ("sys-libs/db:4.8/4.8=", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"]),
+ ("sys-libs/db:4.8=", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"]),
+ ("sys-libs/db:*", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"]),
+ ("sys-libs/db:4.8/0", [Package("=sys-libs/db-4.8.30:4.8")], []),
+ ("sys-libs/db:4.8/0=", [Package("=sys-libs/db-4.8.30:4.8")], []),
)
for atom, cpv_list, expected_result in tests:
result = []
- for pkg in match_from_list( atom, cpv_list ):
+ for pkg in match_from_list(atom, cpv_list):
if isinstance(pkg, Package):
if pkg.repo:
result.append(pkg.cpv + _repo_separator + pkg.repo)
@@ -130,4 +134,4 @@ class Test_match_from_list(TestCase):
result.append(pkg.cpv)
else:
result.append(pkg)
- self.assertEqual( result, expected_result )
+ self.assertEqual(result, expected_result)
diff --git a/pym/portage/tests/dep/test_paren_reduce.py b/pym/portage/tests/dep/test_paren_reduce.py
index 9a147a02e..324465289 100644
--- a/pym/portage/tests/dep/test_paren_reduce.py
+++ b/pym/portage/tests/dep/test_paren_reduce.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage.tests import TestCase
@@ -10,30 +10,30 @@ class TestParenReduce(TestCase):
def testParenReduce(self):
test_cases = (
- ( "A", ["A"]),
- ( "( A )", ["A"]),
- ( "|| ( A B )", [ "||", ["A", "B"] ]),
- ( "|| ( A || ( B C ) )", [ "||", ["A", "||", ["B", "C"]]]),
- ( "|| ( A || ( B C D ) )", [ "||", ["A", "||", ["B", "C", "D"]] ]),
- ( "|| ( A || ( B || ( C D ) E ) )", [ "||", ["A", "||", ["B", "||", ["C", "D"], "E"]] ]),
- ( "a? ( A )", ["a?", ["A"]]),
-
- ( "( || ( ( ( A ) B ) ) )", ["A", "B"]),
- ( "( || ( || ( ( A ) B ) ) )", [ "||", ["A", "B"] ]),
- ( "|| ( A )", ["A"]),
- ( "( || ( || ( || ( A ) foo? ( B ) ) ) )", [ "||", ["A", "foo?", ["B"] ]]),
- ( "( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )", [ "||", ["bar?", ["A"], "foo?", ["B"] ]]),
- ( "A || ( ) foo? ( ) B", ["A", "B"]),
+ ("A", ["A"]),
+ ("( A )", ["A"]),
+ ("|| ( A B )", ["||", ["A", "B"]]),
+ ("|| ( A || ( B C ) )", ["||", ["A", "||", ["B", "C"]]]),
+ ("|| ( A || ( B C D ) )", ["||", ["A", "||", ["B", "C", "D"]]]),
+ ("|| ( A || ( B || ( C D ) E ) )", ["||", ["A", "||", ["B", "||", ["C", "D"], "E"]]]),
+ ("a? ( A )", ["a?", ["A"]]),
- ( "|| ( A ) || ( B )", ["A", "B"]),
- ( "foo? ( A ) foo? ( B )", ["foo?", ["A"], "foo?", ["B"]]),
+ ("( || ( ( ( A ) B ) ) )", ["A", "B"]),
+ ("( || ( || ( ( A ) B ) ) )", ["||", ["A", "B"]]),
+ ("|| ( A )", ["A"]),
+ ("( || ( || ( || ( A ) foo? ( B ) ) ) )", ["||", ["A", "foo?", ["B"]]]),
+ ("( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )", ["||", ["bar?", ["A"], "foo?", ["B"]]]),
+ ("A || ( ) foo? ( ) B", ["A", "B"]),
- ( "|| ( ( A B ) C )", [ "||", [ ["A", "B"], "C"] ]),
- ( "|| ( ( A B ) ( C ) )", [ "||", [ ["A", "B"], "C"] ]),
+ ("|| ( A ) || ( B )", ["A", "B"]),
+ ("foo? ( A ) foo? ( B )", ["foo?", ["A"], "foo?", ["B"]]),
+
+ ("|| ( ( A B ) C )", ["||", [["A", "B"], "C"]]),
+ ("|| ( ( A B ) ( C ) )", ["||", [["A", "B"], "C"]]),
# test USE dep defaults for bug #354003
- ( ">=dev-lang/php-5.2[pcre(+)]", [ ">=dev-lang/php-5.2[pcre(+)]" ]),
+ (">=dev-lang/php-5.2[pcre(+)]", [">=dev-lang/php-5.2[pcre(+)]"]),
)
-
+
test_cases_xfail = (
"( A",
"A )",
@@ -47,20 +47,23 @@ class TestParenReduce(TestCase):
"|| A B",
"|| ( A B ) )",
"|| || B C",
-
+
"|| ( A B || )",
-
+
"a? A",
-
- ( "( || ( || || ( A ) foo? ( B ) ) )"),
- ( "( || ( || bar? ( A ) foo? ( B ) ) )"),
+
+ "( || ( || || ( A ) foo? ( B ) ) )",
+ "( || ( || bar? ( A ) foo? ( B ) ) )",
)
for dep_str, expected_result in test_cases:
- self.assertEqual(paren_reduce(dep_str), expected_result,
+ self.assertEqual(paren_reduce(dep_str, _deprecation_warn=False),
+ expected_result,
"input: '%s' result: %s != %s" % (dep_str,
- paren_reduce(dep_str), expected_result))
+ paren_reduce(dep_str, _deprecation_warn=False),
+ expected_result))
for dep_str in test_cases_xfail:
self.assertRaisesMsg(dep_str,
- InvalidDependString, paren_reduce, dep_str)
+ InvalidDependString, paren_reduce, dep_str,
+ _deprecation_warn=False)
diff --git a/pym/portage/tests/dep/test_use_reduce.py b/pym/portage/tests/dep/test_use_reduce.py
index 1618430c5..4f65567cf 100644
--- a/pym/portage/tests/dep/test_use_reduce.py
+++ b/pym/portage/tests/dep/test_use_reduce.py
@@ -6,10 +6,10 @@ from portage.exception import InvalidDependString
from portage.dep import Atom, use_reduce
class UseReduceTestCase(object):
- def __init__(self, deparray, uselist=[], masklist=[], \
- matchall=0, excludeall=[], is_src_uri=False, \
- eapi="0", opconvert=False, flat=False, expected_result=None, \
- is_valid_flag=None, token_class=None):
+ def __init__(self, deparray, uselist=[], masklist=[],
+ matchall=0, excludeall=[], is_src_uri=False,
+ eapi='0', opconvert=False, flat=False, expected_result=None,
+ is_valid_flag=None, token_class=None):
self.deparray = deparray
self.uselist = uselist
self.masklist = masklist
@@ -25,8 +25,8 @@ class UseReduceTestCase(object):
def run(self):
try:
- return use_reduce(self.deparray, self.uselist, self.masklist, \
- self.matchall, self.excludeall, self.is_src_uri, self.eapi, \
+ return use_reduce(self.deparray, self.uselist, self.masklist,
+ self.matchall, self.excludeall, self.is_src_uri, self.eapi,
self.opconvert, self.flat, self.is_valid_flag, self.token_class)
except InvalidDependString as e:
raise InvalidDependString("%s: %s" % (e, self.deparray))
@@ -47,508 +47,507 @@ class UseReduce(TestCase):
test_cases = (
UseReduceTestCase(
"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
- uselist = ["a", "b", "c", "d"],
- expected_result = ["A", "B"]
+ uselist=["a", "b", "c", "d"],
+ expected_result=["A", "B"]
),
UseReduceTestCase(
"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
- uselist = ["a", "b", "c"],
- expected_result = ["A", "B", "D"]
+ uselist=["a", "b", "c"],
+ expected_result=["A", "B", "D"]
),
UseReduceTestCase(
"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
- uselist = ["b", "c"],
- expected_result = ["B", "D"]
+ uselist=["b", "c"],
+ expected_result=["B", "D"]
),
UseReduceTestCase(
"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
- matchall = True,
- expected_result = ["A", "B", "C", "D"]
+ matchall=True,
+ expected_result=["A", "B", "C", "D"]
),
UseReduceTestCase(
"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
- masklist = ["a", "c"],
- expected_result = ["C", "D"]
+ masklist=["a", "c"],
+ expected_result=["C", "D"]
),
UseReduceTestCase(
"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
- matchall = True,
- masklist = ["a", "c"],
- expected_result = ["B", "C", "D"]
+ matchall=True,
+ masklist=["a", "c"],
+ expected_result=["B", "C", "D"]
),
UseReduceTestCase(
"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
- uselist = ["a", "b"],
- masklist = ["a", "c"],
- expected_result = ["B", "C", "D"]
+ uselist=["a", "b"],
+ masklist=["a", "c"],
+ expected_result=["B", "C", "D"]
),
UseReduceTestCase(
"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
- excludeall = ["a", "c"],
- expected_result = ["D"]
+ excludeall=["a", "c"],
+ expected_result=["D"]
),
UseReduceTestCase(
"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
- uselist = ["b"],
- excludeall = ["a", "c"],
- expected_result = ["B", "D"]
+ uselist=["b"],
+ excludeall=["a", "c"],
+ expected_result=["B", "D"]
),
UseReduceTestCase(
"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
- matchall = True,
- excludeall = ["a", "c"],
- expected_result = ["A", "B", "D"]
+ matchall=True,
+ excludeall=["a", "c"],
+ expected_result=["A", "B", "D"]
),
UseReduceTestCase(
"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
- matchall = True,
- excludeall = ["a", "c"],
- masklist = ["b"],
- expected_result = ["A", "D"]
+ matchall=True,
+ excludeall=["a", "c"],
+ masklist=["b"],
+ expected_result=["A", "D"]
),
-
UseReduceTestCase(
"a? ( b? ( AB ) )",
- uselist = ["a", "b"],
- expected_result = ["AB"]
+ uselist=["a", "b"],
+ expected_result=["AB"]
),
UseReduceTestCase(
"a? ( b? ( AB ) C )",
- uselist = ["a"],
- expected_result = ["C"]
+ uselist=["a"],
+ expected_result=["C"]
),
UseReduceTestCase(
"a? ( b? ( || ( AB CD ) ) )",
- uselist = ["a", "b"],
- expected_result = ["||", ["AB", "CD"]]
+ uselist=["a", "b"],
+ expected_result=["||", ["AB", "CD"]]
),
UseReduceTestCase(
"|| ( || ( a? ( A ) b? ( B ) ) )",
- uselist = ["a", "b"],
- expected_result = ["||", ["A", "B"]]
+ uselist=["a", "b"],
+ expected_result=["||", ["A", "B"]]
),
UseReduceTestCase(
"|| ( || ( a? ( A ) b? ( B ) ) )",
- uselist = ["a"],
- expected_result = ["A"]
+ uselist=["a"],
+ expected_result=["A"]
),
UseReduceTestCase(
"|| ( || ( a? ( A ) b? ( B ) ) )",
- uselist = [],
- expected_result = []
+ uselist=[],
+ expected_result=[]
),
UseReduceTestCase(
"|| ( || ( a? ( || ( A c? ( C ) ) ) b? ( B ) ) )",
- uselist = [],
- expected_result = []
+ uselist=[],
+ expected_result=[]
),
UseReduceTestCase(
"|| ( || ( a? ( || ( A c? ( C ) ) ) b? ( B ) ) )",
- uselist = ["a"],
- expected_result = ["A"]
+ uselist=["a"],
+ expected_result=["A"]
),
UseReduceTestCase(
"|| ( || ( a? ( || ( A c? ( C ) ) ) b? ( B ) ) )",
- uselist = ["b"],
- expected_result = ["B"]
+ uselist=["b"],
+ expected_result=["B"]
),
UseReduceTestCase(
"|| ( || ( a? ( || ( A c? ( C ) ) ) b? ( B ) ) )",
- uselist = ["c"],
- expected_result = []
+ uselist=["c"],
+ expected_result=[]
),
UseReduceTestCase(
"|| ( || ( a? ( || ( A c? ( C ) ) ) b? ( B ) ) )",
- uselist = ["a", "c"],
- expected_result = ["||", [ "A", "C"]]
+ uselist=["a", "c"],
+ expected_result=["||", ["A", "C"]]
),
-
- #paren_reduce tests
+
+ # paren_reduce tests
UseReduceTestCase(
"A",
- expected_result = ["A"]),
+ expected_result=["A"]),
UseReduceTestCase(
"( A )",
- expected_result = ["A"]),
+ expected_result=["A"]),
UseReduceTestCase(
"|| ( A B )",
- expected_result = [ "||", ["A", "B"] ]),
+ expected_result=["||", ["A", "B"]]),
UseReduceTestCase(
"|| ( ( A B ) C )",
- expected_result = [ "||", [ ["A", "B"], "C"] ]),
+ expected_result=["||", [["A", "B"], "C"]]),
UseReduceTestCase(
"|| ( ( A B ) ( C ) )",
- expected_result = [ "||", [ ["A", "B"], "C"] ]),
+ expected_result=["||", [["A", "B"], "C"]]),
UseReduceTestCase(
"|| ( A || ( B C ) )",
- expected_result = [ "||", ["A", "B", "C"]]),
+ expected_result=["||", ["A", "B", "C"]]),
UseReduceTestCase(
"|| ( A || ( B C D ) )",
- expected_result = [ "||", ["A", "B", "C", "D"] ]),
+ expected_result=["||", ["A", "B", "C", "D"]]),
UseReduceTestCase(
"|| ( A || ( B || ( C D ) E ) )",
- expected_result = [ "||", ["A", "B", "C", "D", "E"] ]),
+ expected_result=["||", ["A", "B", "C", "D", "E"]]),
UseReduceTestCase(
"( || ( ( ( A ) B ) ) )",
- expected_result = ["A", "B"] ),
+ expected_result=["A", "B"]),
UseReduceTestCase(
"( || ( || ( ( A ) B ) ) )",
- expected_result = [ "||", ["A", "B"] ]),
+ expected_result=["||", ["A", "B"]]),
UseReduceTestCase(
"( || ( || ( ( A ) B ) ) )",
- expected_result = [ "||", ["A", "B"] ]),
+ expected_result=["||", ["A", "B"]]),
UseReduceTestCase(
"|| ( A )",
- expected_result = ["A"]),
+ expected_result=["A"]),
UseReduceTestCase(
"( || ( || ( || ( A ) foo? ( B ) ) ) )",
- expected_result = ["A"]),
+ expected_result=["A"]),
UseReduceTestCase(
"( || ( || ( || ( A ) foo? ( B ) ) ) )",
- uselist = ["foo"],
- expected_result = [ "||", ["A", "B"] ]),
+ uselist=["foo"],
+ expected_result=["||", ["A", "B"]]),
UseReduceTestCase(
"( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
- expected_result = []),
+ expected_result=[]),
UseReduceTestCase(
"( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
- uselist = ["foo", "bar"],
- expected_result = [ "||", [ "A", "B" ] ]),
+ uselist=["foo", "bar"],
+ expected_result=["||", ["A", "B"]]),
UseReduceTestCase(
"A || ( bar? ( C ) ) foo? ( bar? ( C ) ) B",
- expected_result = ["A", "B"]),
+ expected_result=["A", "B"]),
UseReduceTestCase(
"|| ( A ) || ( B )",
- expected_result = ["A", "B"]),
+ expected_result=["A", "B"]),
UseReduceTestCase(
"foo? ( A ) foo? ( B )",
- expected_result = []),
+ expected_result=[]),
UseReduceTestCase(
"foo? ( A ) foo? ( B )",
- uselist = ["foo"],
- expected_result = ["A", "B"]),
+ uselist=["foo"],
+ expected_result=["A", "B"]),
UseReduceTestCase(
"|| ( A B ) C",
- expected_result = ['||', ['A', 'B'], 'C']),
+ expected_result=['||', ['A', 'B'], 'C']),
UseReduceTestCase(
"A || ( B C )",
- expected_result = ['A', '||', ['B', 'C']]),
+ expected_result=['A', '||', ['B', 'C']]),
- #SRC_URI stuff
+ # SRC_URI stuff
UseReduceTestCase(
"http://foo/bar -> blah.tbz2",
- is_src_uri = True,
- eapi = EAPI_WITH_SRC_URI_ARROWS,
- expected_result = ["http://foo/bar", "->", "blah.tbz2"]),
+ is_src_uri=True,
+ eapi=EAPI_WITH_SRC_URI_ARROWS,
+ expected_result=["http://foo/bar", "->", "blah.tbz2"]),
UseReduceTestCase(
"foo? ( http://foo/bar -> blah.tbz2 )",
- uselist = [],
- is_src_uri = True,
- eapi = EAPI_WITH_SRC_URI_ARROWS,
- expected_result = []),
+ uselist=[],
+ is_src_uri=True,
+ eapi=EAPI_WITH_SRC_URI_ARROWS,
+ expected_result=[]),
UseReduceTestCase(
"foo? ( http://foo/bar -> blah.tbz2 )",
- uselist = ["foo"],
- is_src_uri = True,
- eapi = EAPI_WITH_SRC_URI_ARROWS,
- expected_result = ["http://foo/bar", "->", "blah.tbz2"]),
+ uselist=["foo"],
+ is_src_uri=True,
+ eapi=EAPI_WITH_SRC_URI_ARROWS,
+ expected_result=["http://foo/bar", "->", "blah.tbz2"]),
UseReduceTestCase(
"http://foo/bar -> bar.tbz2 foo? ( ftp://foo/a )",
- uselist = [],
- is_src_uri = True,
- eapi = EAPI_WITH_SRC_URI_ARROWS,
- expected_result = ["http://foo/bar", "->", "bar.tbz2"]),
+ uselist=[],
+ is_src_uri=True,
+ eapi=EAPI_WITH_SRC_URI_ARROWS,
+ expected_result=["http://foo/bar", "->", "bar.tbz2"]),
UseReduceTestCase(
"http://foo/bar -> bar.tbz2 foo? ( ftp://foo/a )",
- uselist = ["foo"],
- is_src_uri = True,
- eapi = EAPI_WITH_SRC_URI_ARROWS,
- expected_result = ["http://foo/bar", "->", "bar.tbz2", "ftp://foo/a"]),
+ uselist=["foo"],
+ is_src_uri=True,
+ eapi=EAPI_WITH_SRC_URI_ARROWS,
+ expected_result=["http://foo/bar", "->", "bar.tbz2", "ftp://foo/a"]),
UseReduceTestCase(
"http://foo.com/foo http://foo/bar -> blah.tbz2",
- uselist = ["foo"],
- is_src_uri = True,
- eapi = EAPI_WITH_SRC_URI_ARROWS,
- expected_result = ["http://foo.com/foo", "http://foo/bar", "->", "blah.tbz2"]),
+ uselist=["foo"],
+ is_src_uri=True,
+ eapi=EAPI_WITH_SRC_URI_ARROWS,
+ expected_result=["http://foo.com/foo", "http://foo/bar", "->", "blah.tbz2"]),
- #opconvert tests
+ # opconvert tests
UseReduceTestCase(
"A",
- opconvert = True,
- expected_result = ["A"]),
+ opconvert=True,
+ expected_result=["A"]),
UseReduceTestCase(
"( A )",
- opconvert = True,
- expected_result = ["A"]),
+ opconvert=True,
+ expected_result=["A"]),
UseReduceTestCase(
"|| ( A B )",
- opconvert = True,
- expected_result = [['||', 'A', 'B']]),
+ opconvert=True,
+ expected_result=[['||', 'A', 'B']]),
UseReduceTestCase(
"|| ( ( A B ) C )",
- opconvert = True,
- expected_result = [['||', ['A', 'B'], 'C']]),
+ opconvert=True,
+ expected_result=[['||', ['A', 'B'], 'C']]),
UseReduceTestCase(
"|| ( A || ( B C ) )",
- opconvert = True,
- expected_result = [['||', 'A', 'B', 'C']]),
+ opconvert=True,
+ expected_result=[['||', 'A', 'B', 'C']]),
UseReduceTestCase(
"|| ( A || ( B C D ) )",
- opconvert = True,
- expected_result = [['||', 'A', 'B', 'C', 'D']]),
+ opconvert=True,
+ expected_result=[['||', 'A', 'B', 'C', 'D']]),
UseReduceTestCase(
"|| ( A || ( B || ( C D ) E ) )",
- expected_result = [ "||", ["A", "B", "C", "D", "E"] ]),
+ expected_result=["||", ["A", "B", "C", "D", "E"]]),
UseReduceTestCase(
"( || ( ( ( A ) B ) ) )",
- opconvert = True,
- expected_result = [ "A", "B" ] ),
+ opconvert=True,
+ expected_result=['A', 'B']),
UseReduceTestCase(
"( || ( || ( ( A ) B ) ) )",
- opconvert = True,
- expected_result = [['||', 'A', 'B']]),
+ opconvert=True,
+ expected_result=[['||', 'A', 'B']]),
UseReduceTestCase(
"|| ( A B ) C",
- opconvert = True,
- expected_result = [['||', 'A', 'B'], 'C']),
+ opconvert=True,
+ expected_result=[['||', 'A', 'B'], 'C']),
UseReduceTestCase(
"A || ( B C )",
- opconvert = True,
- expected_result = ['A', ['||', 'B', 'C']]),
+ opconvert=True,
+ expected_result=['A', ['||', 'B', 'C']]),
UseReduceTestCase(
"A foo? ( || ( B || ( bar? ( || ( C D E ) ) !bar? ( F ) ) ) ) G",
- uselist = ["foo", "bar"],
- opconvert = True,
- expected_result = ['A', ['||', 'B', 'C', 'D', 'E'], 'G']),
+ uselist=["foo", "bar"],
+ opconvert=True,
+ expected_result=['A', ['||', 'B', 'C', 'D', 'E'], 'G']),
UseReduceTestCase(
"A foo? ( || ( B || ( bar? ( || ( C D E ) ) !bar? ( F ) ) ) ) G",
- uselist = ["foo", "bar"],
- opconvert = False,
- expected_result = ['A', '||', ['B', 'C', 'D', 'E'], 'G']),
+ uselist=["foo", "bar"],
+ opconvert=False,
+ expected_result=['A', '||', ['B', 'C', 'D', 'E'], 'G']),
UseReduceTestCase(
"|| ( A )",
- opconvert = True,
- expected_result = ["A"]),
+ opconvert=True,
+ expected_result=["A"]),
UseReduceTestCase(
"( || ( || ( || ( A ) foo? ( B ) ) ) )",
- expected_result = ["A"]),
+ expected_result=["A"]),
UseReduceTestCase(
"( || ( || ( || ( A ) foo? ( B ) ) ) )",
- uselist = ["foo"],
- opconvert = True,
- expected_result = [['||', 'A', 'B']]),
+ uselist=["foo"],
+ opconvert=True,
+ expected_result=[['||', 'A', 'B']]),
UseReduceTestCase(
"( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
- opconvert = True,
- expected_result = []),
+ opconvert=True,
+ expected_result=[]),
UseReduceTestCase(
"( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
- uselist = ["foo", "bar"],
- opconvert = True,
- expected_result = [['||', 'A', 'B']]),
+ uselist=["foo", "bar"],
+ opconvert=True,
+ expected_result=[['||', 'A', 'B']]),
UseReduceTestCase(
"A || ( bar? ( C ) ) foo? ( bar? ( C ) ) B",
- opconvert = True,
- expected_result = ["A", "B"]),
+ opconvert=True,
+ expected_result=["A", "B"]),
UseReduceTestCase(
"|| ( A ) || ( B )",
- opconvert = True,
- expected_result = ["A", "B"]),
+ opconvert=True,
+ expected_result=["A", "B"]),
UseReduceTestCase(
"foo? ( A ) foo? ( B )",
- opconvert = True,
- expected_result = []),
+ opconvert=True,
+ expected_result=[]),
UseReduceTestCase(
"foo? ( A ) foo? ( B )",
- uselist = ["foo"],
- opconvert = True,
- expected_result = ["A", "B"]),
+ uselist=["foo"],
+ opconvert=True,
+ expected_result=["A", "B"]),
UseReduceTestCase(
"|| ( foo? ( || ( A B ) ) )",
- uselist = ["foo"],
- opconvert = True,
- expected_result = [['||', 'A', 'B']]),
+ uselist=["foo"],
+ opconvert=True,
+ expected_result=[['||', 'A', 'B']]),
UseReduceTestCase(
"|| ( ( A B ) foo? ( || ( C D ) ) )",
- uselist = ["foo"],
- opconvert = True,
- expected_result = [['||', ['A', 'B'], 'C', 'D']]),
+ uselist=["foo"],
+ opconvert=True,
+ expected_result=[['||', ['A', 'B'], 'C', 'D']]),
UseReduceTestCase(
"|| ( ( A B ) foo? ( || ( C D ) ) )",
- uselist = ["foo"],
- opconvert = False,
- expected_result = ['||', [['A', 'B'], 'C', 'D']]),
+ uselist=["foo"],
+ opconvert=False,
+ expected_result=['||', [['A', 'B'], 'C', 'D']]),
UseReduceTestCase(
"|| ( ( A B ) || ( C D ) )",
- expected_result = ['||', [['A', 'B'], 'C', 'D']]),
+ expected_result=['||', [['A', 'B'], 'C', 'D']]),
UseReduceTestCase(
"|| ( ( A B ) || ( C D || ( E ( F G ) || ( H ) ) ) )",
- expected_result = ['||', [['A', 'B'], 'C', 'D', 'E', ['F', 'G'], 'H']]),
+ expected_result=['||', [['A', 'B'], 'C', 'D', 'E', ['F', 'G'], 'H']]),
UseReduceTestCase(
"|| ( ( A B ) || ( C D || ( E ( F G ) || ( H ) ) ) )",
- opconvert = True,
- expected_result = [['||', ['A', 'B'], 'C', 'D', 'E', ['F', 'G'], 'H']]),
+ opconvert=True,
+ expected_result=[['||', ['A', 'B'], 'C', 'D', 'E', ['F', 'G'], 'H']]),
UseReduceTestCase(
"|| ( foo? ( A B ) )",
- uselist = ["foo"],
- expected_result = ['A', 'B']),
+ uselist=["foo"],
+ expected_result=['A', 'B']),
UseReduceTestCase(
"|| ( || ( foo? ( A B ) ) )",
- uselist = ["foo"],
- expected_result = ['A', 'B']),
+ uselist=["foo"],
+ expected_result=['A', 'B']),
UseReduceTestCase(
"|| ( || ( || ( a? ( b? ( c? ( || ( || ( || ( d? ( e? ( f? ( A B ) ) ) ) ) ) ) ) ) ) ) )",
- uselist = ["a", "b", "c", "d", "e", "f"],
- expected_result = ['A', 'B']),
+ uselist=["a", "b", "c", "d", "e", "f"],
+ expected_result=['A', 'B']),
UseReduceTestCase(
"|| ( || ( ( || ( a? ( ( b? ( c? ( || ( || ( || ( ( d? ( e? ( f? ( A B ) ) ) ) ) ) ) ) ) ) ) ) ) ) )",
- uselist = ["a", "b", "c", "d", "e", "f"],
- expected_result = ['A', 'B']),
+ uselist=["a", "b", "c", "d", "e", "f"],
+ expected_result=['A', 'B']),
UseReduceTestCase(
"|| ( ( A ( || ( B ) ) ) )",
- expected_result = ['A', 'B']),
+ expected_result=['A', 'B']),
UseReduceTestCase(
"|| ( ( A B ) || ( foo? ( bar? ( ( C D || ( baz? ( E ) ( F G ) || ( H ) ) ) ) ) ) )",
- uselist = ["foo", "bar", "baz"],
- expected_result = ['||', [['A', 'B'], ['C', 'D', '||', ['E', ['F', 'G'], 'H']]]]),
+ uselist=["foo", "bar", "baz"],
+ expected_result=['||', [['A', 'B'], ['C', 'D', '||', ['E', ['F', 'G'], 'H']]]]),
UseReduceTestCase(
"|| ( ( A B ) || ( foo? ( bar? ( ( C D || ( baz? ( E ) ( F G ) || ( H ) ) ) ) ) ) )",
- uselist = ["foo", "bar", "baz"],
- opconvert = True,
- expected_result = [['||', ['A', 'B'], ['C', 'D', ['||', 'E', ['F', 'G'], 'H']]]]),
+ uselist=["foo", "bar", "baz"],
+ opconvert=True,
+ expected_result=[['||', ['A', 'B'], ['C', 'D', ['||', 'E', ['F', 'G'], 'H']]]]),
UseReduceTestCase(
"|| ( foo? ( A B ) )",
- uselist = ["foo"],
+ uselist=["foo"],
opconvert=True,
- expected_result = ['A', 'B']),
+ expected_result=['A', 'B']),
UseReduceTestCase(
"|| ( || ( foo? ( A B ) ) )",
- uselist = ["foo"],
+ uselist=["foo"],
opconvert=True,
- expected_result = ['A', 'B']),
+ expected_result=['A', 'B']),
UseReduceTestCase(
"|| ( || ( || ( a? ( b? ( c? ( || ( || ( || ( d? ( e? ( f? ( A B ) ) ) ) ) ) ) ) ) ) ) )",
- uselist = ["a", "b", "c", "d", "e", "f"],
+ uselist=["a", "b", "c", "d", "e", "f"],
opconvert=True,
- expected_result = ['A', 'B']),
+ expected_result=['A', 'B']),
- #flat test
+ # flat test
UseReduceTestCase(
"A",
- flat = True,
- expected_result = ["A"]),
+ flat=True,
+ expected_result=["A"]),
UseReduceTestCase(
"( A )",
- flat = True,
- expected_result = ["A"]),
+ flat=True,
+ expected_result=["A"]),
UseReduceTestCase(
"|| ( A B )",
- flat = True,
- expected_result = [ "||", "A", "B" ] ),
+ flat=True,
+ expected_result=["||", "A", "B"]),
UseReduceTestCase(
"|| ( A || ( B C ) )",
- flat = True,
- expected_result = [ "||", "A", "||", "B", "C" ]),
+ flat=True,
+ expected_result=["||", "A", "||", "B", "C"]),
UseReduceTestCase(
"|| ( A || ( B C D ) )",
- flat = True,
- expected_result = [ "||", "A", "||", "B", "C", "D" ]),
+ flat=True,
+ expected_result=["||", "A", "||", "B", "C", "D"]),
UseReduceTestCase(
"|| ( A || ( B || ( C D ) E ) )",
- flat = True,
- expected_result = [ "||", "A", "||", "B", "||", "C", "D", "E" ]),
+ flat=True,
+ expected_result=["||", "A", "||", "B", "||", "C", "D", "E"]),
UseReduceTestCase(
"( || ( ( ( A ) B ) ) )",
- flat = True,
- expected_result = [ "||", "A", "B"] ),
+ flat=True,
+ expected_result=["||", "A", "B"]),
UseReduceTestCase(
"( || ( || ( ( A ) B ) ) )",
- flat = True,
- expected_result = [ "||", "||", "A", "B" ]),
+ flat=True,
+ expected_result=["||", "||", "A", "B"]),
UseReduceTestCase(
"( || ( || ( ( A ) B ) ) )",
- flat = True,
- expected_result = [ "||", "||", "A", "B" ]),
+ flat=True,
+ expected_result=["||", "||", "A", "B"]),
UseReduceTestCase(
"|| ( A )",
- flat = True,
- expected_result = ["||", "A"]),
+ flat=True,
+ expected_result=["||", "A"]),
UseReduceTestCase(
"( || ( || ( || ( A ) foo? ( B ) ) ) )",
- expected_result = ["A"]),
+ expected_result=["A"]),
UseReduceTestCase(
"( || ( || ( || ( A ) foo? ( B ) ) ) )",
- uselist = ["foo"],
- flat = True,
- expected_result = [ "||", "||","||", "A", "B" ]),
+ uselist=["foo"],
+ flat=True,
+ expected_result=["||", "||", "||", "A", "B"]),
UseReduceTestCase(
"( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
- flat = True,
- expected_result = ["||", "||","||"]),
+ flat=True,
+ expected_result=["||", "||", "||"]),
UseReduceTestCase(
"( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
- uselist = ["foo", "bar"],
- flat = True,
- expected_result = [ "||", "||", "A", "||", "B" ]),
+ uselist=["foo", "bar"],
+ flat=True,
+ expected_result=["||", "||", "A", "||", "B"]),
UseReduceTestCase(
"A || ( bar? ( C ) ) foo? ( bar? ( C ) ) B",
- flat = True,
- expected_result = ["A", "||", "B"]),
+ flat=True,
+ expected_result=["A", "||", "B"]),
UseReduceTestCase(
"|| ( A ) || ( B )",
- flat = True,
- expected_result = ["||", "A", "||", "B"]),
+ flat=True,
+ expected_result=["||", "A", "||", "B"]),
UseReduceTestCase(
"foo? ( A ) foo? ( B )",
- flat = True,
- expected_result = []),
+ flat=True,
+ expected_result=[]),
UseReduceTestCase(
"foo? ( A ) foo? ( B )",
- uselist = ["foo"],
- flat = True,
- expected_result = ["A", "B"]),
+ uselist=["foo"],
+ flat=True,
+ expected_result=["A", "B"]),
- #use flag validation
+ # use flag validation
UseReduceTestCase(
"foo? ( A )",
- uselist = ["foo"],
- is_valid_flag = self.always_true,
- expected_result = ["A"]),
+ uselist=["foo"],
+ is_valid_flag=self.always_true,
+ expected_result=["A"]),
UseReduceTestCase(
"foo? ( A )",
- is_valid_flag = self.always_true,
- expected_result = []),
+ is_valid_flag=self.always_true,
+ expected_result=[]),
- #token_class
+ # token_class
UseReduceTestCase(
"foo? ( dev-libs/A )",
- uselist = ["foo"],
+ uselist=["foo"],
token_class=Atom,
- expected_result = ["dev-libs/A"]),
+ expected_result=["dev-libs/A"]),
UseReduceTestCase(
"foo? ( dev-libs/A )",
token_class=Atom,
- expected_result = []),
+ expected_result=[]),
)
-
+
test_cases_xfail = (
UseReduceTestCase("? ( A )"),
UseReduceTestCase("!? ( A )"),
@@ -571,44 +570,44 @@ class UseReduce(TestCase):
UseReduceTestCase("|| ( )"),
UseReduceTestCase("foo? ( )"),
- #SRC_URI stuff
- UseReduceTestCase("http://foo/bar -> blah.tbz2", is_src_uri = True, eapi = EAPI_WITHOUT_SRC_URI_ARROWS),
- UseReduceTestCase("|| ( http://foo/bar -> blah.tbz2 )", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
- UseReduceTestCase("http://foo/bar -> foo? ( ftp://foo/a )", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
- UseReduceTestCase("http://foo/bar blah.tbz2 ->", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
- UseReduceTestCase("-> http://foo/bar blah.tbz2 )", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
- UseReduceTestCase("http://foo/bar ->", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
- UseReduceTestCase("http://foo/bar -> foo? ( http://foo.com/foo )", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
- UseReduceTestCase("foo? ( http://foo/bar -> ) blah.tbz2", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
- UseReduceTestCase("http://foo/bar -> foo/blah.tbz2", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
- UseReduceTestCase("http://foo/bar -> -> bar.tbz2 foo? ( ftp://foo/a )", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
-
- UseReduceTestCase("http://foo/bar -> bar.tbz2 foo? ( ftp://foo/a )", is_src_uri = False, eapi = EAPI_WITH_SRC_URI_ARROWS),
+ # SRC_URI stuff
+ UseReduceTestCase("http://foo/bar -> blah.tbz2", is_src_uri=True, eapi=EAPI_WITHOUT_SRC_URI_ARROWS),
+ UseReduceTestCase("|| ( http://foo/bar -> blah.tbz2 )", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("http://foo/bar -> foo? ( ftp://foo/a )", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("http://foo/bar blah.tbz2 ->", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("-> http://foo/bar blah.tbz2 )", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("http://foo/bar ->", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("http://foo/bar -> foo? ( http://foo.com/foo )", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("foo? ( http://foo/bar -> ) blah.tbz2", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("http://foo/bar -> foo/blah.tbz2", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("http://foo/bar -> -> bar.tbz2 foo? ( ftp://foo/a )", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+
+ UseReduceTestCase("http://foo/bar -> bar.tbz2 foo? ( ftp://foo/a )", is_src_uri=False, eapi=EAPI_WITH_SRC_URI_ARROWS),
UseReduceTestCase(
"A",
- opconvert = True,
- flat = True),
+ opconvert=True,
+ flat=True),
- #use flag validation
+ # use flag validation
UseReduceTestCase("1.0? ( A )"),
UseReduceTestCase("!1.0? ( A )"),
UseReduceTestCase("!? ( A )"),
UseReduceTestCase("!?? ( A )"),
UseReduceTestCase(
"foo? ( A )",
- is_valid_flag = self.always_false,
+ is_valid_flag=self.always_false,
),
UseReduceTestCase(
"foo? ( A )",
- uselist = ["foo"],
- is_valid_flag = self.always_false,
+ uselist=["foo"],
+ is_valid_flag=self.always_false,
),
- #token_class
+ # token_class
UseReduceTestCase(
"foo? ( A )",
- uselist = ["foo"],
+ uselist=["foo"],
token_class=Atom),
UseReduceTestCase(
"A(B",
diff --git a/pym/portage/tests/ebuild/test_config.py b/pym/portage/tests/ebuild/test_config.py
index 63cb99d41..08e0a5dcf 100644
--- a/pym/portage/tests/ebuild/test_config.py
+++ b/pym/portage/tests/ebuild/test_config.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import portage
@@ -46,7 +46,7 @@ class ConfigTestCase(TestCase):
settings.features.add('noclean')
self.assertEqual('noclean' in settings['FEATURES'].split(), True)
settings.regenerate()
- self.assertEqual('noclean' in settings['FEATURES'].split(),True)
+ self.assertEqual('noclean' in settings['FEATURES'].split(), True)
settings.features.discard('noclean')
self.assertEqual('noclean' in settings['FEATURES'].split(), False)
@@ -56,7 +56,7 @@ class ConfigTestCase(TestCase):
settings.features.add('noclean')
self.assertEqual('noclean' in settings['FEATURES'].split(), True)
settings.regenerate()
- self.assertEqual('noclean' in settings['FEATURES'].split(),True)
+ self.assertEqual('noclean' in settings['FEATURES'].split(), True)
# before: ['noclean', '-noclean', 'noclean']
settings.features._prune_overrides()
@@ -92,7 +92,7 @@ class ConfigTestCase(TestCase):
try:
portage.util.noiselimit = -2
- license_group_locations = (os.path.join(playground.portdir, "profiles"),)
+ license_group_locations = (os.path.join(playground.settings.repositories["test_repo"].location, "profiles"),)
pkg_license = os.path.join(playground.eroot, "etc", "portage")
lic_man = LicenseManager(license_group_locations, pkg_license)
@@ -221,6 +221,7 @@ class ConfigTestCase(TestCase):
"profile-formats = pms",
"thin-manifests = true",
"manifest-hashes = SHA256 SHA512 WHIRLPOOL",
+ "# use implicit masters"
),
}
}
@@ -239,28 +240,30 @@ class ConfigTestCase(TestCase):
playground = ResolverPlayground(ebuilds=ebuilds,
repo_configs=repo_configs, distfiles=distfiles)
+ settings = playground.settings
- new_repo_config = playground.settings.repositories.prepos['new_repo']
+ new_repo_config = settings.repositories["new_repo"]
+ old_repo_config = settings.repositories["old_repo"]
self.assertTrue(len(new_repo_config.masters) > 0, "new_repo has no default master")
- self.assertEqual(new_repo_config.masters[0].user_location, playground.portdir,
- "new_repo default master is not PORTDIR")
+ self.assertEqual(new_repo_config.masters[0].user_location, playground.settings.repositories["test_repo"].location,
+ "new_repo default master is not test_repo")
self.assertEqual(new_repo_config.thin_manifest, True,
"new_repo_config.thin_manifest != True")
- new_manifest_file = os.path.join(playground.repo_dirs["new_repo"], "dev-libs", "A", "Manifest")
- self.assertEqual(os.path.exists(new_manifest_file), False)
+ new_manifest_file = os.path.join(new_repo_config.location, "dev-libs", "A", "Manifest")
+ self.assertNotExists(new_manifest_file)
- new_manifest_file = os.path.join(playground.repo_dirs["new_repo"], "dev-libs", "B", "Manifest")
+ new_manifest_file = os.path.join(new_repo_config.location, "dev-libs", "B", "Manifest")
f = open(new_manifest_file)
self.assertEqual(len(list(f)), 1)
f.close()
- new_manifest_file = os.path.join(playground.repo_dirs["new_repo"], "dev-libs", "C", "Manifest")
+ new_manifest_file = os.path.join(new_repo_config.location, "dev-libs", "C", "Manifest")
f = open(new_manifest_file)
self.assertEqual(len(list(f)), 2)
f.close()
- old_manifest_file = os.path.join(playground.repo_dirs["old_repo"], "dev-libs", "A", "Manifest")
+ old_manifest_file = os.path.join(old_repo_config.location, "dev-libs", "A", "Manifest")
f = open(old_manifest_file)
self.assertEqual(len(list(f)), 1)
f.close()
diff --git a/pym/portage/tests/ebuild/test_doebuild_fd_pipes.py b/pym/portage/tests/ebuild/test_doebuild_fd_pipes.py
new file mode 100644
index 000000000..61392dd54
--- /dev/null
+++ b/pym/portage/tests/ebuild/test_doebuild_fd_pipes.py
@@ -0,0 +1,137 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import textwrap
+
+import portage
+from portage import os
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground
+from portage.package.ebuild._ipc.QueryCommand import QueryCommand
+from portage.util._async.ForkProcess import ForkProcess
+from portage.util._async.TaskScheduler import TaskScheduler
+from portage.util._eventloop.global_event_loop import global_event_loop
+from _emerge.Package import Package
+from _emerge.PipeReader import PipeReader
+
+class DoebuildProcess(ForkProcess):
+
+ __slots__ = ('doebuild_kwargs', 'doebuild_pargs')
+
+ def _run(self):
+ return portage.doebuild(*self.doebuild_pargs, **self.doebuild_kwargs)
+
+class DoebuildFdPipesTestCase(TestCase):
+
+ def testDoebuild(self):
+ """
+ Invoke portage.doebuild() with the fd_pipes parameter, and
+ check that the expected output appears in the pipe. This
+ functionality is not used by portage internally, but it is
+ supported for API consumers (see bug #475812).
+ """
+
+ ebuild_body = textwrap.dedent("""
+ S=${WORKDIR}
+ pkg_info() { echo info ; }
+ pkg_nofetch() { echo nofetch ; }
+ pkg_pretend() { echo pretend ; }
+ pkg_setup() { echo setup ; }
+ src_unpack() { echo unpack ; }
+ src_prepare() { echo prepare ; }
+ src_configure() { echo configure ; }
+ src_compile() { echo compile ; }
+ src_test() { echo test ; }
+ src_install() { echo install ; }
+ """)
+
+ ebuilds = {
+ 'app-misct/foo-1': {
+ 'EAPI' : '5',
+ "MISC_CONTENT": ebuild_body,
+ }
+ }
+
+ # Override things that may be unavailable, or may have portability
+ # issues when running tests in exotic environments.
+ # prepstrip - bug #447810 (bash read builtin EINTR problem)
+ true_symlinks = ("find", "prepstrip", "sed", "scanelf")
+ true_binary = portage.process.find_binary("true")
+ self.assertEqual(true_binary is None, False,
+ "true command not found")
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ QueryCommand._db = playground.trees
+ root_config = playground.trees[playground.eroot]['root_config']
+ portdb = root_config.trees["porttree"].dbapi
+ settings = portage.config(clone=playground.settings)
+ if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
+ settings["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]
+ settings.backup_changes("__PORTAGE_TEST_HARDLINK_LOCKS")
+
+ settings.features.add("noauto")
+ settings.features.add("test")
+ settings['PORTAGE_PYTHON'] = portage._python_interpreter
+ settings['PORTAGE_QUIET'] = "1"
+
+ fake_bin = os.path.join(settings["EPREFIX"], "bin")
+ portage.util.ensure_dirs(fake_bin)
+ for x in true_symlinks:
+ os.symlink(true_binary, os.path.join(fake_bin, x))
+
+ settings["__PORTAGE_TEST_PATH_OVERRIDE"] = fake_bin
+ settings.backup_changes("__PORTAGE_TEST_PATH_OVERRIDE")
+
+ cpv = 'app-misct/foo-1'
+ metadata = dict(zip(Package.metadata_keys,
+ portdb.aux_get(cpv, Package.metadata_keys)))
+
+ pkg = Package(built=False, cpv=cpv, installed=False,
+ metadata=metadata, root_config=root_config,
+ type_name='ebuild')
+ settings.setcpv(pkg)
+ ebuildpath = portdb.findname(cpv)
+ self.assertNotEqual(ebuildpath, None)
+
+ for phase in ('info', 'nofetch',
+ 'pretend', 'setup', 'unpack', 'prepare', 'configure',
+ 'compile', 'test', 'install', 'qmerge', 'clean', 'merge'):
+
+ pr, pw = os.pipe()
+
+ producer = DoebuildProcess(doebuild_pargs=(ebuildpath, phase),
+ doebuild_kwargs={"settings" : settings,
+ "mydbapi": portdb, "tree": "porttree",
+ "vartree": root_config.trees["vartree"],
+ "fd_pipes": {1: pw, 2: pw},
+ "prev_mtimes": {}})
+
+ consumer = PipeReader(
+ input_files={"producer" : pr})
+
+ task_scheduler = TaskScheduler(iter([producer, consumer]),
+ max_jobs=2)
+
+ try:
+ task_scheduler.start()
+ finally:
+ # PipeReader closes pr
+ os.close(pw)
+
+ task_scheduler.wait()
+ output = portage._unicode_decode(
+ consumer.getvalue()).rstrip("\n")
+
+ if task_scheduler.returncode != os.EX_OK:
+ portage.writemsg(output, noiselevel=-1)
+
+ self.assertEqual(task_scheduler.returncode, os.EX_OK)
+
+ if phase not in ('clean', 'merge', 'qmerge'):
+ self.assertEqual(phase, output)
+
+ finally:
+ playground.cleanup()
+ QueryCommand._db = None
diff --git a/pym/portage/tests/ebuild/test_doebuild_spawn.py b/pym/portage/tests/ebuild/test_doebuild_spawn.py
index 89e27a331..ae9a5c504 100644
--- a/pym/portage/tests/ebuild/test_doebuild_spawn.py
+++ b/pym/portage/tests/ebuild/test_doebuild_spawn.py
@@ -1,18 +1,22 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+import textwrap
+
from portage import os
from portage import _python_interpreter
from portage import _shell_quote
from portage.const import EBUILD_SH_BINARY
from portage.package.ebuild.config import config
from portage.package.ebuild.doebuild import spawn as doebuild_spawn
+from portage.package.ebuild._spawn_nofetch import spawn_nofetch
from portage.tests import TestCase
from portage.tests.resolver.ResolverPlayground import ResolverPlayground
+from portage.util._async.SchedulerInterface import SchedulerInterface
+from portage.util._eventloop.global_event_loop import global_event_loop
from _emerge.EbuildPhase import EbuildPhase
from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
from _emerge.Package import Package
-from _emerge.PollScheduler import PollScheduler
class DoebuildSpawnTestCase(TestCase):
"""
@@ -23,25 +27,37 @@ class DoebuildSpawnTestCase(TestCase):
"""
def testDoebuildSpawn(self):
- playground = ResolverPlayground()
- try:
- settings = config(clone=playground.settings)
- if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
- settings["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
- os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]
- settings.backup_changes("__PORTAGE_TEST_HARDLINK_LOCKS")
- cpv = 'sys-apps/portage-2.1'
- metadata = {
+ ebuild_body = textwrap.dedent("""
+ pkg_nofetch() { : ; }
+ """)
+
+ ebuilds = {
+ 'sys-apps/portage-2.1': {
'EAPI' : '2',
- 'INHERITED' : 'python eutils',
'IUSE' : 'build doc epydoc python3 selinux',
+ 'KEYWORDS' : 'x86',
'LICENSE' : 'GPL-2',
- 'PROVIDE' : 'virtual/portage',
'RDEPEND' : '>=app-shells/bash-3.2_p17 >=dev-lang/python-2.6',
'SLOT' : '0',
+ "MISC_CONTENT": ebuild_body,
}
+ }
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
root_config = playground.trees[playground.eroot]['root_config']
+ portdb = root_config.trees["porttree"].dbapi
+ settings = config(clone=playground.settings)
+ if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
+ settings["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]
+ settings.backup_changes("__PORTAGE_TEST_HARDLINK_LOCKS")
+
+ cpv = 'sys-apps/portage-2.1'
+ metadata = dict(zip(Package.metadata_keys,
+ portdb.aux_get(cpv, Package.metadata_keys)))
+
pkg = Package(built=False, cpv=cpv, installed=False,
metadata=metadata, root_config=root_config,
type_name='ebuild')
@@ -57,7 +73,7 @@ class DoebuildSpawnTestCase(TestCase):
# has been sourced already.
open(os.path.join(settings['T'], 'environment'), 'wb').close()
- scheduler = PollScheduler().sched_iface
+ scheduler = SchedulerInterface(global_event_loop())
for phase in ('_internal_test',):
# Test EbuildSpawnProcess by calling doebuild.spawn() with
@@ -83,5 +99,7 @@ class DoebuildSpawnTestCase(TestCase):
ebuild_phase.start()
ebuild_phase.wait()
self.assertEqual(ebuild_phase.returncode, os.EX_OK)
+
+ spawn_nofetch(portdb, portdb.findname(cpv), settings=settings)
finally:
playground.cleanup()
diff --git a/pym/portage/tests/ebuild/test_ipc_daemon.py b/pym/portage/tests/ebuild/test_ipc_daemon.py
index 0efab6584..a87107625 100644
--- a/pym/portage/tests/ebuild/test_ipc_daemon.py
+++ b/pym/portage/tests/ebuild/test_ipc_daemon.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import tempfile
@@ -13,16 +13,28 @@ from portage.const import BASH_BINARY
from portage.locks import hardlock_cleanup
from portage.package.ebuild._ipc.ExitCommand import ExitCommand
from portage.util import ensure_dirs
+from portage.util._async.ForkProcess import ForkProcess
+from portage.util._async.TaskScheduler import TaskScheduler
+from portage.util._eventloop.global_event_loop import global_event_loop
from _emerge.SpawnProcess import SpawnProcess
from _emerge.EbuildBuildDir import EbuildBuildDir
from _emerge.EbuildIpcDaemon import EbuildIpcDaemon
-from _emerge.TaskScheduler import TaskScheduler
+
+class SleepProcess(ForkProcess):
+ """
+ Emulate the sleep command, in order to ensure a consistent
+ return code when it is killed by SIGTERM (see bug #437180).
+ """
+ __slots__ = ('seconds',)
+ def _run(self):
+ time.sleep(self.seconds)
class IpcDaemonTestCase(TestCase):
_SCHEDULE_TIMEOUT = 40000 # 40 seconds
def testIpcDaemon(self):
+ event_loop = global_event_loop()
tmpdir = tempfile.mkdtemp()
build_dir = None
try:
@@ -44,9 +56,8 @@ class IpcDaemonTestCase(TestCase):
env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]
- task_scheduler = TaskScheduler(max_jobs=2)
build_dir = EbuildBuildDir(
- scheduler=task_scheduler.sched_iface,
+ scheduler=event_loop,
settings=env)
build_dir.lock()
ensure_dirs(env['PORTAGE_BUILDDIR'])
@@ -61,26 +72,23 @@ class IpcDaemonTestCase(TestCase):
commands = {'exit' : exit_command}
daemon = EbuildIpcDaemon(commands=commands,
input_fifo=input_fifo,
- output_fifo=output_fifo,
- scheduler=task_scheduler.sched_iface)
+ output_fifo=output_fifo)
proc = SpawnProcess(
args=[BASH_BINARY, "-c",
'"$PORTAGE_BIN_PATH"/ebuild-ipc exit %d' % exitcode],
- env=env, scheduler=task_scheduler.sched_iface)
+ env=env)
+ task_scheduler = TaskScheduler(iter([daemon, proc]),
+ max_jobs=2, event_loop=event_loop)
self.received_command = False
def exit_command_callback():
self.received_command = True
- task_scheduler.clear()
- task_scheduler.wait()
+ task_scheduler.cancel()
exit_command.reply_hook = exit_command_callback
start_time = time.time()
- task_scheduler.add(daemon)
- task_scheduler.add(proc)
- task_scheduler.run(timeout=self._SCHEDULE_TIMEOUT)
- task_scheduler.clear()
- task_scheduler.wait()
+ self._run(event_loop, task_scheduler, self._SCHEDULE_TIMEOUT)
+
hardlock_cleanup(env['PORTAGE_BUILDDIR'],
remove_all_locks=True)
@@ -91,8 +99,10 @@ class IpcDaemonTestCase(TestCase):
self.assertEqual(daemon.isAlive(), False)
self.assertEqual(exit_command.exitcode, exitcode)
- # Intentionally short timeout test for QueueScheduler.run()
- sleep_time_s = 10 # 10.000 seconds
+ # Intentionally short timeout test for EventLoop/AsyncScheduler.
+ # Use a ridiculously long sleep_time_s in case the user's
+ # system is heavily loaded (see bug #436334).
+ sleep_time_s = 600 #600.000 seconds
short_timeout_ms = 10 # 0.010 seconds
for i in range(3):
@@ -100,25 +110,20 @@ class IpcDaemonTestCase(TestCase):
commands = {'exit' : exit_command}
daemon = EbuildIpcDaemon(commands=commands,
input_fifo=input_fifo,
- output_fifo=output_fifo,
- scheduler=task_scheduler.sched_iface)
- proc = SpawnProcess(
- args=[BASH_BINARY, "-c", 'exec sleep %d' % sleep_time_s],
- env=env, scheduler=task_scheduler.sched_iface)
+ output_fifo=output_fifo)
+ proc = SleepProcess(seconds=sleep_time_s)
+ task_scheduler = TaskScheduler(iter([daemon, proc]),
+ max_jobs=2, event_loop=event_loop)
self.received_command = False
def exit_command_callback():
self.received_command = True
- task_scheduler.clear()
- task_scheduler.wait()
+ task_scheduler.cancel()
exit_command.reply_hook = exit_command_callback
start_time = time.time()
- task_scheduler.add(daemon)
- task_scheduler.add(proc)
- task_scheduler.run(timeout=short_timeout_ms)
- task_scheduler.clear()
- task_scheduler.wait()
+ self._run(event_loop, task_scheduler, short_timeout_ms)
+
hardlock_cleanup(env['PORTAGE_BUILDDIR'],
remove_all_locks=True)
@@ -133,3 +138,20 @@ class IpcDaemonTestCase(TestCase):
if build_dir is not None:
build_dir.unlock()
shutil.rmtree(tmpdir)
+
+ def _timeout_callback(self):
+ self._timed_out = True
+
+ def _run(self, event_loop, task_scheduler, timeout):
+ self._timed_out = False
+ timeout_id = event_loop.timeout_add(timeout, self._timeout_callback)
+
+ try:
+ task_scheduler.start()
+ while not self._timed_out and task_scheduler.poll() is None:
+ event_loop.iteration()
+ if self._timed_out:
+ task_scheduler.cancel()
+ task_scheduler.wait()
+ finally:
+ event_loop.source_remove(timeout_id)
diff --git a/pym/portage/tests/ebuild/test_spawn.py b/pym/portage/tests/ebuild/test_spawn.py
index fea4738d4..a38e10972 100644
--- a/pym/portage/tests/ebuild/test_spawn.py
+++ b/pym/portage/tests/ebuild/test_spawn.py
@@ -1,17 +1,18 @@
-# Copyright 1998-2011 Gentoo Foundation
+# Copyright 1998-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import errno
import io
import sys
import tempfile
+import portage
from portage import os
from portage import _encodings
from portage import _unicode_encode
from portage.const import BASH_BINARY
from portage.tests import TestCase
+from portage.util._eventloop.global_event_loop import global_event_loop
from _emerge.SpawnProcess import SpawnProcess
-from _emerge.PollScheduler import PollScheduler
class SpawnTestCase(TestCase):
@@ -22,12 +23,16 @@ class SpawnTestCase(TestCase):
os.close(fd)
null_fd = os.open('/dev/null', os.O_RDWR)
test_string = 2 * "blah blah blah\n"
- scheduler = PollScheduler().sched_iface
proc = SpawnProcess(
args=[BASH_BINARY, "-c",
"echo -n '%s'" % test_string],
- env={}, fd_pipes={0:sys.stdin.fileno(), 1:null_fd, 2:null_fd},
- scheduler=scheduler,
+ env={},
+ fd_pipes={
+ 0: portage._get_stdin().fileno(),
+ 1: null_fd,
+ 2: null_fd
+ },
+ scheduler=global_event_loop(),
logfile=logfile)
proc.start()
os.close(null_fd)
diff --git a/pym/portage/tests/emerge/test_emerge_slot_abi.py b/pym/portage/tests/emerge/test_emerge_slot_abi.py
index f18bd123b..fd7ec0e6a 100644
--- a/pym/portage/tests/emerge/test_emerge_slot_abi.py
+++ b/pym/portage/tests/emerge/test_emerge_slot_abi.py
@@ -1,4 +1,4 @@
-# Copyright 2012 Gentoo Foundation
+# Copyright 2012-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import subprocess
@@ -64,15 +64,14 @@ class SlotAbiEmergeTestCase(TestCase):
trees = playground.trees
portdb = trees[eroot]["porttree"].dbapi
vardb = trees[eroot]["vartree"].dbapi
- portdir = settings["PORTDIR"]
var_cache_edb = os.path.join(eprefix, "var", "cache", "edb")
user_config_dir = os.path.join(eprefix, USER_CONFIG_PATH)
package_mask_path = os.path.join(user_config_dir, "package.mask")
portage_python = portage._python_interpreter
- ebuild_cmd = (portage_python, "-Wd",
+ ebuild_cmd = (portage_python, "-b", "-Wd",
os.path.join(PORTAGE_BIN_PATH, "ebuild"))
- emerge_cmd = (portage_python, "-Wd",
+ emerge_cmd = (portage_python, "-b", "-Wd",
os.path.join(PORTAGE_BIN_PATH, "emerge"))
test_ebuild = portdb.findname("dev-libs/dbus-glib-0.98")
@@ -94,25 +93,6 @@ class SlotAbiEmergeTestCase(TestCase):
portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage")
profile_path = settings.profile_path
- features = []
- if not portage.process.sandbox_capable or \
- os.environ.get("SANDBOX_ON") == "1":
- features.append("-sandbox")
-
- make_conf = (
- "FEATURES=\"%s\"\n" % (" ".join(features),),
- "PORTDIR=\"%s\"\n" % (portdir,),
- "PORTAGE_GRPNAME=\"%s\"\n" % (os.environ["PORTAGE_GRPNAME"],),
- "PORTAGE_USERNAME=\"%s\"\n" % (os.environ["PORTAGE_USERNAME"],),
- "PKGDIR=\"%s\"\n" % (pkgdir,),
- "PORTAGE_INST_GID=%s\n" % (portage.data.portage_gid,),
- "PORTAGE_INST_UID=%s\n" % (portage.data.portage_uid,),
- "PORTAGE_TMPDIR=\"%s\"\n" % (portage_tmpdir,),
- "CLEAN_DELAY=0\n",
- "DISTDIR=\"%s\"\n" % (distdir,),
- "EMERGE_WARNING_DELAY=0\n",
- )
-
path = os.environ.get("PATH")
if path is not None and not path.strip():
path = None
@@ -139,6 +119,7 @@ class SlotAbiEmergeTestCase(TestCase):
"PORTAGE_OVERRIDE_EPREFIX" : eprefix,
"PATH" : path,
"PORTAGE_PYTHON" : portage_python,
+ "PORTAGE_REPOSITORIES" : settings.repositories.config_string(),
"PYTHONPATH" : pythonpath,
}
@@ -155,9 +136,6 @@ class SlotAbiEmergeTestCase(TestCase):
try:
for d in dirs:
ensure_dirs(d)
- with open(os.path.join(user_config_dir, "make.conf"), 'w') as f:
- for line in make_conf:
- f.write(line)
for x in true_symlinks:
os.symlink(true_binary, os.path.join(fake_bin, x))
with open(os.path.join(var_cache_edb, "counter"), 'wb') as f:
diff --git a/pym/portage/tests/emerge/test_simple.py b/pym/portage/tests/emerge/test_simple.py
index f87170a71..bf0af8bc8 100644
--- a/pym/portage/tests/emerge/test_simple.py
+++ b/pym/portage/tests/emerge/test_simple.py
@@ -1,4 +1,4 @@
-# Copyright 2011-2012 Gentoo Foundation
+# Copyright 2011-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import subprocess
@@ -7,7 +7,8 @@ import sys
import portage
from portage import os
from portage import _unicode_decode
-from portage.const import PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, USER_CONFIG_PATH
+from portage.const import (BASH_BINARY, PORTAGE_BASE_PATH,
+ PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, USER_CONFIG_PATH)
from portage.process import find_binary
from portage.tests import TestCase
from portage.tests.resolver.ResolverPlayground import ResolverPlayground
@@ -75,13 +76,21 @@ pkg_preinst() {
else
einfo "has_version does not detect an installed instance of $CATEGORY/$PN:$SLOT"
fi
+ if [[ ${EPREFIX} != ${PORTAGE_OVERRIDE_EPREFIX} ]] ; then
+ if has_version --host-root $CATEGORY/$PN:$SLOT ; then
+ einfo "has_version --host-root detects an installed instance of $CATEGORY/$PN:$SLOT"
+ einfo "best_version --host-root reports that the installed instance is $(best_version $CATEGORY/$PN:$SLOT)"
+ else
+ einfo "has_version --host-root does not detect an installed instance of $CATEGORY/$PN:$SLOT"
+ fi
+ fi
}
"""
ebuilds = {
"dev-libs/A-1": {
- "EAPI" : "4",
+ "EAPI" : "5",
"IUSE" : "+flag",
"KEYWORDS": "x86",
"LICENSE": "GPL-2",
@@ -89,14 +98,14 @@ pkg_preinst() {
"RDEPEND": "flag? ( dev-libs/B[flag] )",
},
"dev-libs/B-1": {
- "EAPI" : "4",
+ "EAPI" : "5",
"IUSE" : "+flag",
"KEYWORDS": "x86",
"LICENSE": "GPL-2",
"MISC_CONTENT": install_something,
},
"virtual/foo-0": {
- "EAPI" : "4",
+ "EAPI" : "5",
"KEYWORDS": "x86",
"LICENSE": "GPL-2",
},
@@ -104,7 +113,7 @@ pkg_preinst() {
installed = {
"dev-libs/A-1": {
- "EAPI" : "4",
+ "EAPI" : "5",
"IUSE" : "+flag",
"KEYWORDS": "x86",
"LICENSE": "GPL-2",
@@ -112,21 +121,21 @@ pkg_preinst() {
"USE": "flag",
},
"dev-libs/B-1": {
- "EAPI" : "4",
+ "EAPI" : "5",
"IUSE" : "+flag",
"KEYWORDS": "x86",
"LICENSE": "GPL-2",
"USE": "flag",
},
"dev-libs/depclean-me-1": {
- "EAPI" : "4",
+ "EAPI" : "5",
"IUSE" : "",
"KEYWORDS": "x86",
"LICENSE": "GPL-2",
"USE": "",
},
"app-misc/depclean-me-1": {
- "EAPI" : "4",
+ "EAPI" : "5",
"IUSE" : "",
"KEYWORDS": "x86",
"LICENSE": "GPL-2",
@@ -159,29 +168,35 @@ pkg_preinst() {
eroot = settings["EROOT"]
trees = playground.trees
portdb = trees[eroot]["porttree"].dbapi
- portdir = settings["PORTDIR"]
+ test_repo_location = settings.repositories["test_repo"].location
var_cache_edb = os.path.join(eprefix, "var", "cache", "edb")
cachedir = os.path.join(var_cache_edb, "dep")
- cachedir_pregen = os.path.join(portdir, "metadata", "cache")
+ cachedir_pregen = os.path.join(test_repo_location, "metadata", "md5-cache")
portage_python = portage._python_interpreter
- ebuild_cmd = (portage_python, "-Wd",
+ dispatch_conf_cmd = (portage_python, "-b", "-Wd",
+ os.path.join(PORTAGE_BIN_PATH, "dispatch-conf"))
+ ebuild_cmd = (portage_python, "-b", "-Wd",
os.path.join(PORTAGE_BIN_PATH, "ebuild"))
- egencache_cmd = (portage_python, "-Wd",
- os.path.join(PORTAGE_BIN_PATH, "egencache"))
- emerge_cmd = (portage_python, "-Wd",
+ egencache_cmd = (portage_python, "-b", "-Wd",
+ os.path.join(PORTAGE_BIN_PATH, "egencache"),
+ "--repo", "test_repo",
+ "--repositories-configuration", settings.repositories.config_string())
+ emerge_cmd = (portage_python, "-b", "-Wd",
os.path.join(PORTAGE_BIN_PATH, "emerge"))
- emaint_cmd = (portage_python, "-Wd",
+ emaint_cmd = (portage_python, "-b", "-Wd",
os.path.join(PORTAGE_BIN_PATH, "emaint"))
- env_update_cmd = (portage_python, "-Wd",
+ env_update_cmd = (portage_python, "-b", "-Wd",
os.path.join(PORTAGE_BIN_PATH, "env-update"))
- fixpackages_cmd = (portage_python, "-Wd",
+ etc_update_cmd = (BASH_BINARY,
+ os.path.join(PORTAGE_BIN_PATH, "etc-update"))
+ fixpackages_cmd = (portage_python, "-b", "-Wd",
os.path.join(PORTAGE_BIN_PATH, "fixpackages"))
- portageq_cmd = (portage_python, "-Wd",
+ portageq_cmd = (portage_python, "-b", "-Wd",
os.path.join(PORTAGE_BIN_PATH, "portageq"))
- quickpkg_cmd = (portage_python, "-Wd",
+ quickpkg_cmd = (portage_python, "-b", "-Wd",
os.path.join(PORTAGE_BIN_PATH, "quickpkg"))
- regenworld_cmd = (portage_python, "-Wd",
+ regenworld_cmd = (portage_python, "-b", "-Wd",
os.path.join(PORTAGE_BIN_PATH, "regenworld"))
rm_binary = find_binary("rm")
@@ -196,8 +211,14 @@ pkg_preinst() {
test_ebuild = portdb.findname("dev-libs/A-1")
self.assertFalse(test_ebuild is None)
+ cross_prefix = os.path.join(eprefix, "cross_prefix")
+
test_commands = (
env_update_cmd,
+ portageq_cmd + ("envvar", "-v", "CONFIG_PROTECT", "EROOT",
+ "PORTAGE_CONFIGROOT", "PORTAGE_TMPDIR", "USERLAND"),
+ etc_update_cmd,
+ dispatch_conf_cmd,
emerge_cmd + ("--version",),
emerge_cmd + ("--info",),
emerge_cmd + ("--info", "--verbose"),
@@ -210,7 +231,7 @@ pkg_preinst() {
({"FEATURES" : "metadata-transfer"},) + \
emerge_cmd + ("--regen",),
rm_cmd + ("-rf", cachedir),
- ({"FEATURES" : "metadata-transfer parse-eapi-ebuild-head"},) + \
+ ({"FEATURES" : "metadata-transfer"},) + \
emerge_cmd + ("--regen",),
rm_cmd + ("-rf", cachedir),
egencache_cmd + ("--update",) + tuple(egencache_extra_args),
@@ -226,6 +247,7 @@ pkg_preinst() {
ebuild_cmd + (test_ebuild, "manifest", "clean", "package", "merge"),
emerge_cmd + ("--pretend", "--tree", "--complete-graph", "dev-libs/A"),
emerge_cmd + ("-p", "dev-libs/B"),
+ emerge_cmd + ("-p", "--newrepo", "dev-libs/B"),
emerge_cmd + ("-B", "dev-libs/B",),
emerge_cmd + ("--oneshot", "--usepkg", "dev-libs/B",),
@@ -257,6 +279,24 @@ pkg_preinst() {
emerge_cmd + ("-p", "--unmerge", "-q", eroot + "usr"),
emerge_cmd + ("--unmerge", "--quiet", "dev-libs/A"),
emerge_cmd + ("-C", "--quiet", "dev-libs/B"),
+
+ # Test cross-prefix usage, including chpathtool for binpkgs.
+ ({"EPREFIX" : cross_prefix},) + \
+ emerge_cmd + ("--usepkgonly", "dev-libs/A"),
+ ({"EPREFIX" : cross_prefix},) + \
+ portageq_cmd + ("has_version", cross_prefix, "dev-libs/A"),
+ ({"EPREFIX" : cross_prefix},) + \
+ portageq_cmd + ("has_version", cross_prefix, "dev-libs/B"),
+ ({"EPREFIX" : cross_prefix},) + \
+ emerge_cmd + ("-C", "--quiet", "dev-libs/B"),
+ ({"EPREFIX" : cross_prefix},) + \
+ emerge_cmd + ("-C", "--quiet", "dev-libs/A"),
+ ({"EPREFIX" : cross_prefix},) + \
+ emerge_cmd + ("dev-libs/A",),
+ ({"EPREFIX" : cross_prefix},) + \
+ portageq_cmd + ("has_version", cross_prefix, "dev-libs/A"),
+ ({"EPREFIX" : cross_prefix},) + \
+ portageq_cmd + ("has_version", cross_prefix, "dev-libs/B"),
)
distdir = playground.distdir
@@ -266,20 +306,6 @@ pkg_preinst() {
profile_path = settings.profile_path
user_config_dir = os.path.join(os.sep, eprefix, USER_CONFIG_PATH)
- features = []
- if not portage.process.sandbox_capable or \
- os.environ.get("SANDBOX_ON") == "1":
- features.append("-sandbox")
-
- # Since egencache ignores settings from the calling environment,
- # configure it via make.conf.
- make_conf = (
- "FEATURES=\"%s\"\n" % (" ".join(features),),
- "PORTDIR=\"%s\"\n" % (portdir,),
- "PORTAGE_GRPNAME=\"%s\"\n" % (os.environ["PORTAGE_GRPNAME"],),
- "PORTAGE_USERNAME=\"%s\"\n" % (os.environ["PORTAGE_USERNAME"],),
- )
-
path = os.environ.get("PATH")
if path is not None and not path.strip():
path = None
@@ -314,37 +340,43 @@ pkg_preinst() {
"PORTAGE_INST_GID" : str(portage.data.portage_gid),
"PORTAGE_INST_UID" : str(portage.data.portage_uid),
"PORTAGE_PYTHON" : portage_python,
+ "PORTAGE_REPOSITORIES" : settings.repositories.config_string(),
"PORTAGE_TMPDIR" : portage_tmpdir,
"PYTHONPATH" : pythonpath,
+ "__PORTAGE_TEST_PATH_OVERRIDE" : fake_bin,
}
if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]
- updates_dir = os.path.join(portdir, "profiles", "updates")
+ updates_dir = os.path.join(test_repo_location, "profiles", "updates")
dirs = [cachedir, cachedir_pregen, distdir, fake_bin,
portage_tmpdir, updates_dir,
user_config_dir, var_cache_edb]
- true_symlinks = ["chown", "chgrp"]
+ etc_symlinks = ("dispatch-conf.conf", "etc-update.conf")
+ # Override things that may be unavailable, or may have portability
+ # issues when running tests in exotic environments.
+ # prepstrip - bug #447810 (bash read builtin EINTR problem)
+ true_symlinks = ["find", "prepstrip", "sed", "scanelf"]
true_binary = find_binary("true")
self.assertEqual(true_binary is None, False,
"true command not found")
try:
for d in dirs:
ensure_dirs(d)
- with open(os.path.join(user_config_dir, "make.conf"), 'w') as f:
- for line in make_conf:
- f.write(line)
for x in true_symlinks:
os.symlink(true_binary, os.path.join(fake_bin, x))
+ for x in etc_symlinks:
+ os.symlink(os.path.join(PORTAGE_BASE_PATH, "cnf", x),
+ os.path.join(eprefix, "etc", x))
with open(os.path.join(var_cache_edb, "counter"), 'wb') as f:
f.write(b"100")
# non-empty system set keeps --depclean quiet
with open(os.path.join(profile_path, "packages"), 'w') as f:
f.write("*dev-libs/token-system-pkg")
for cp, xml_data in metadata_xml_files:
- with open(os.path.join(portdir, cp, "metadata.xml"), 'w') as f:
+ with open(os.path.join(test_repo_location, cp, "metadata.xml"), 'w') as f:
f.write(playground.metadata_xml_template % xml_data)
with open(os.path.join(updates_dir, "1Q-2010"), 'w') as f:
f.write("""
diff --git a/pym/portage/tests/env/config/test_PackageKeywordsFile.py b/pym/portage/tests/env/config/test_PackageKeywordsFile.py
index f1e9e98f0..609c0fda2 100644
--- a/pym/portage/tests/env/config/test_PackageKeywordsFile.py
+++ b/pym/portage/tests/env/config/test_PackageKeywordsFile.py
@@ -11,7 +11,7 @@ class PackageKeywordsFileTestCase(TestCase):
cpv = ['sys-apps/portage']
keywords = ['~x86', 'amd64', '-mips']
-
+
def testPackageKeywordsFile(self):
"""
A simple test to ensure the load works properly
@@ -23,17 +23,17 @@ class PackageKeywordsFileTestCase(TestCase):
f.load()
i = 0
for cpv, keyword in f.items():
- self.assertEqual( cpv, self.cpv[i] )
+ self.assertEqual(cpv, self.cpv[i])
[k for k in keyword if self.assertTrue(k in self.keywords)]
i = i + 1
finally:
self.NukeFile()
-
+
def BuildFile(self):
fd, self.fname = mkstemp()
f = os.fdopen(fd, 'w')
for c in self.cpv:
- f.write("%s %s\n" % (c,' '.join(self.keywords)))
+ f.write("%s %s\n" % (c, ' '.join(self.keywords)))
f.close()
def NukeFile(self):
diff --git a/pym/portage/tests/env/config/test_PackageUseFile.py b/pym/portage/tests/env/config/test_PackageUseFile.py
index 7a3806792..b1a6ccbde 100644
--- a/pym/portage/tests/env/config/test_PackageUseFile.py
+++ b/pym/portage/tests/env/config/test_PackageUseFile.py
@@ -12,7 +12,7 @@ class PackageUseFileTestCase(TestCase):
cpv = 'sys-apps/portage'
useflags = ['cdrom', 'far', 'boo', 'flag', 'blat']
-
+
def testPackageUseFile(self):
"""
A simple test to ensure the load works properly
@@ -22,7 +22,7 @@ class PackageUseFileTestCase(TestCase):
f = PackageUseFile(self.fname)
f.load()
for cpv, use in f.items():
- self.assertEqual( cpv, self.cpv )
+ self.assertEqual(cpv, self.cpv)
[flag for flag in use if self.assertTrue(flag in self.useflags)]
finally:
self.NukeFile()
@@ -32,6 +32,6 @@ class PackageUseFileTestCase(TestCase):
f = os.fdopen(fd, 'w')
f.write("%s %s" % (self.cpv, ' '.join(self.useflags)))
f.close()
-
+
def NukeFile(self):
os.unlink(self.fname)
diff --git a/pym/portage/tests/env/config/test_PortageModulesFile.py b/pym/portage/tests/env/config/test_PortageModulesFile.py
index 2cd1a8ab1..05584a5f8 100644
--- a/pym/portage/tests/env/config/test_PortageModulesFile.py
+++ b/pym/portage/tests/env/config/test_PortageModulesFile.py
@@ -8,14 +8,13 @@ from tempfile import mkstemp
class PortageModulesFileTestCase(TestCase):
- keys = ['foo.bar','baz','bob','extra_key']
- invalid_keys = ['',""]
- modules = ['spanky','zmedico','antarus','ricer','5','6']
+ keys = ['foo.bar', 'baz', 'bob', 'extra_key']
+ invalid_keys = ['', ""]
+ modules = ['spanky', 'zmedico', 'antarus', 'ricer', '5', '6']
def setUp(self):
self.items = {}
- for k, v in zip(self.keys + self.invalid_keys,
- self.modules):
+ for k, v in zip(self.keys + self.invalid_keys, self.modules):
self.items[k] = v
def testPortageModulesFile(self):
@@ -32,7 +31,7 @@ class PortageModulesFileTestCase(TestCase):
fd, self.fname = mkstemp()
f = os.fdopen(fd, 'w')
for k, v in self.items.items():
- f.write('%s=%s\n' % (k,v))
+ f.write('%s=%s\n' % (k, v))
f.close()
def NukeFile(self):
diff --git a/pym/portage/tests/glsa/__init__.py b/pym/portage/tests/glsa/__init__.py
new file mode 100644
index 000000000..6cde9320b
--- /dev/null
+++ b/pym/portage/tests/glsa/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/pym/portage/tests/glsa/__test__ b/pym/portage/tests/glsa/__test__
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/pym/portage/tests/glsa/__test__
diff --git a/pym/portage/tests/glsa/test_security_set.py b/pym/portage/tests/glsa/test_security_set.py
new file mode 100644
index 000000000..edf567809
--- /dev/null
+++ b/pym/portage/tests/glsa/test_security_set.py
@@ -0,0 +1,144 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import io
+
+import portage
+from portage import os, _encodings
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SecuritySetTestCase(TestCase):
+
+ glsa_template = """\
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet href="/xsl/glsa.xsl" type="text/xsl"?>
+<?xml-stylesheet href="/xsl/guide.xsl" type="text/xsl"?>
+<!DOCTYPE glsa SYSTEM "http://www.gentoo.org/dtd/glsa.dtd">
+<glsa id="%(glsa_id)s">
+ <title>%(pkgname)s: Multiple vulnerabilities</title>
+ <synopsis>Multiple vulnerabilities have been found in %(pkgname)s.
+ </synopsis>
+ <product type="ebuild">%(pkgname)s</product>
+ <announced>January 18, 2013</announced>
+ <revised>January 18, 2013: 1</revised>
+ <bug>55555</bug>
+ <access>remote</access>
+ <affected>
+ <package name="%(cp)s" auto="yes" arch="*">
+ <unaffected range="ge">%(unaffected_version)s</unaffected>
+ <vulnerable range="lt">%(unaffected_version)s</vulnerable>
+ </package>
+ </affected>
+ <background>
+ <p>%(pkgname)s is software package.</p>
+ </background>
+ <description>
+ <p>Multiple vulnerabilities have been discovered in %(pkgname)s.
+ </p>
+ </description>
+ <impact type="normal">
+ <p>A remote attacker could exploit these vulnerabilities.</p>
+ </impact>
+ <workaround>
+ <p>There is no known workaround at this time.</p>
+ </workaround>
+ <resolution>
+ <p>All %(pkgname)s users should upgrade to the latest version:</p>
+ <code>
+ # emerge --sync
+ # emerge --ask --oneshot --verbose "&gt;=%(cp)s-%(unaffected_version)s"
+ </code>
+ </resolution>
+ <references>
+ </references>
+</glsa>
+"""
+
+ def _must_skip(self):
+ try:
+ __import__("xml.etree.ElementTree")
+ __import__("xml.parsers.expat").parsers.expat.ExpatError
+ except (AttributeError, ImportError):
+ return "python is missing xml support"
+
+ def testSecuritySet(self):
+
+ skip_reason = self._must_skip()
+ if skip_reason:
+ self.portage_skip = skip_reason
+ self.assertFalse(True, skip_reason)
+ return
+
+ ebuilds = {
+ "cat/A-vulnerable-2.2": {
+ "KEYWORDS": "x86"
+ },
+ "cat/B-not-vulnerable-4.5": {
+ "KEYWORDS": "x86"
+ },
+ }
+
+ installed = {
+ "cat/A-vulnerable-2.1": {
+ "KEYWORDS": "x86"
+ },
+ "cat/B-not-vulnerable-4.4": {
+ "KEYWORDS": "x86"
+ },
+ }
+
+ glsas = (
+ {
+ "glsa_id": "201301-01",
+ "pkgname": "A-vulnerable",
+ "cp": "cat/A-vulnerable",
+ "unaffected_version": "2.2"
+ },
+ {
+ "glsa_id": "201301-02",
+ "pkgname": "B-not-vulnerable",
+ "cp": "cat/B-not-vulnerable",
+ "unaffected_version": "4.4"
+ },
+ {
+ "glsa_id": "201301-03",
+ "pkgname": "NotInstalled",
+ "cp": "cat/NotInstalled",
+ "unaffected_version": "3.5"
+ },
+ )
+
+ world = ["cat/A"]
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ["@security"],
+ options = {},
+ success = True,
+ mergelist = ["cat/A-vulnerable-2.2"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+
+ try:
+
+ portdb = playground.trees[playground.eroot]["porttree"].dbapi
+ glsa_dir = os.path.join(portdb.porttree_root, 'metadata', 'glsa')
+ portage.util.ensure_dirs(glsa_dir)
+ for glsa in glsas:
+ with io.open(os.path.join(glsa_dir,
+ 'glsa-' + glsa["glsa_id"] + '.xml'),
+ encoding=_encodings['repo.content'], mode='w') as f:
+ f.write(self.glsa_template % glsa)
+
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/lazyimport/test_lazy_import_portage_baseline.py b/pym/portage/tests/lazyimport/test_lazy_import_portage_baseline.py
index c7ebbaff9..080cf3f98 100644
--- a/pym/portage/tests/lazyimport/test_lazy_import_portage_baseline.py
+++ b/pym/portage/tests/lazyimport/test_lazy_import_portage_baseline.py
@@ -6,8 +6,8 @@ import portage
from portage import os
from portage.const import PORTAGE_PYM_PATH
from portage.tests import TestCase
+from portage.util._eventloop.global_event_loop import global_event_loop
-from _emerge.PollScheduler import PollScheduler
from _emerge.PipeReader import PipeReader
from _emerge.SpawnProcess import SpawnProcess
@@ -52,7 +52,7 @@ sys.stdout.write(" ".join(k for k in sys.modules
# then the above PYTHONPATH override doesn't help.
env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH
- scheduler = PollScheduler().sched_iface
+ scheduler = global_event_loop()
master_fd, slave_fd = os.pipe()
master_file = os.fdopen(master_fd, 'rb', 0)
slave_file = os.fdopen(slave_fd, 'wb')
diff --git a/pym/portage/tests/lint/test_bash_syntax.py b/pym/portage/tests/lint/test_bash_syntax.py
index aef8d74f1..fdbb6fe88 100644
--- a/pym/portage/tests/lint/test_bash_syntax.py
+++ b/pym/portage/tests/lint/test_bash_syntax.py
@@ -1,20 +1,26 @@
-# Copyright 2010 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from itertools import chain
import stat
+import subprocess
+import sys
-from portage.const import BASH_BINARY, PORTAGE_BIN_PATH
+from portage.const import BASH_BINARY, PORTAGE_BASE_PATH, PORTAGE_BIN_PATH
from portage.tests import TestCase
from portage import os
-from portage import subprocess_getstatusoutput
from portage import _encodings
-from portage import _shell_quote
from portage import _unicode_decode, _unicode_encode
class BashSyntaxTestCase(TestCase):
def testBashSyntax(self):
- for parent, dirs, files in os.walk(PORTAGE_BIN_PATH):
+ locations = [PORTAGE_BIN_PATH]
+ misc_dir = os.path.join(PORTAGE_BASE_PATH, "misc")
+ if os.path.isdir(misc_dir):
+ locations.append(misc_dir)
+ for parent, dirs, files in \
+ chain.from_iterable(os.walk(x) for x in locations):
parent = _unicode_decode(parent,
encoding=_encodings['fs'], errors='strict')
for x in files:
@@ -36,7 +42,13 @@ class BashSyntaxTestCase(TestCase):
f.close()
if line[:2] == '#!' and \
'bash' in line:
- cmd = "%s -n %s" % (_shell_quote(BASH_BINARY), _shell_quote(x))
- status, output = subprocess_getstatusoutput(cmd)
+ cmd = [BASH_BINARY, "-n", x]
+ cmd = [_unicode_encode(x,
+ encoding=_encodings['fs'], errors='strict') for x in cmd]
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ output = _unicode_decode(proc.communicate()[0],
+ encoding=_encodings['fs'])
+ status = proc.wait()
self.assertEqual(os.WIFEXITED(status) and \
os.WEXITSTATUS(status) == os.EX_OK, True, msg=output)
diff --git a/pym/portage/tests/lint/test_compile_modules.py b/pym/portage/tests/lint/test_compile_modules.py
index f90a6665a..ce7e3fb90 100644
--- a/pym/portage/tests/lint/test_compile_modules.py
+++ b/pym/portage/tests/lint/test_compile_modules.py
@@ -1,6 +1,7 @@
-# Copyright 2009-2010 Gentoo Foundation
+# Copyright 2009-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+import errno
import itertools
import stat
@@ -10,12 +11,10 @@ from portage import os
from portage import _encodings
from portage import _unicode_decode, _unicode_encode
-import py_compile
-
class CompileModulesTestCase(TestCase):
def testCompileModules(self):
- for parent, dirs, files in itertools.chain(
+ for parent, _dirs, files in itertools.chain(
os.walk(PORTAGE_BIN_PATH),
os.walk(PORTAGE_PYM_PATH)):
parent = _unicode_decode(parent,
@@ -33,14 +32,21 @@ class CompileModulesTestCase(TestCase):
if x[-3:] == '.py':
do_compile = True
else:
- # Check for python shebang
- f = open(_unicode_encode(x,
- encoding=_encodings['fs'], errors='strict'), 'rb')
- line = _unicode_decode(f.readline(),
- encoding=_encodings['content'], errors='replace')
- f.close()
- if line[:2] == '#!' and \
- 'python' in line:
+ # Check for python shebang.
+ try:
+ with open(_unicode_encode(x,
+ encoding=_encodings['fs'], errors='strict'), 'rb') as f:
+ line = _unicode_decode(f.readline(),
+ encoding=_encodings['content'], errors='replace')
+ except IOError as e:
+ # Some tests create files that are unreadable by the
+ # user (by design), so ignore EACCES issues.
+ if e.errno != errno.EACCES:
+ raise
+ continue
+ if line[:2] == '#!' and 'python' in line:
do_compile = True
if do_compile:
- py_compile.compile(x, cfile='/dev/null', doraise=True)
+ with open(_unicode_encode(x,
+ encoding=_encodings['fs'], errors='strict'), 'rb') as f:
+ compile(f.read(), x, 'exec')
diff --git a/pym/portage/tests/lint/test_import_modules.py b/pym/portage/tests/lint/test_import_modules.py
index 8d257c5a6..34261f464 100644
--- a/pym/portage/tests/lint/test_import_modules.py
+++ b/pym/portage/tests/lint/test_import_modules.py
@@ -1,4 +1,4 @@
-# Copyright 2011 Gentoo Foundation
+# Copyright 2011-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage.const import PORTAGE_PYM_PATH
diff --git a/pym/portage/tests/locks/test_asynchronous_lock.py b/pym/portage/tests/locks/test_asynchronous_lock.py
index 49dd10ec4..3a2ccfb84 100644
--- a/pym/portage/tests/locks/test_asynchronous_lock.py
+++ b/pym/portage/tests/locks/test_asynchronous_lock.py
@@ -7,13 +7,13 @@ import tempfile
from portage import os
from portage import shutil
from portage.tests import TestCase
+from portage.util._eventloop.global_event_loop import global_event_loop
from _emerge.AsynchronousLock import AsynchronousLock
-from _emerge.PollScheduler import PollScheduler
class AsynchronousLockTestCase(TestCase):
def _testAsynchronousLock(self):
- scheduler = PollScheduler().sched_iface
+ scheduler = global_event_loop()
tempdir = tempfile.mkdtemp()
try:
path = os.path.join(tempdir, 'lock_me')
@@ -53,7 +53,7 @@ class AsynchronousLockTestCase(TestCase):
os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] = prev_state
def _testAsynchronousLockWait(self):
- scheduler = PollScheduler().sched_iface
+ scheduler = global_event_loop()
tempdir = tempfile.mkdtemp()
try:
path = os.path.join(tempdir, 'lock_me')
@@ -94,7 +94,7 @@ class AsynchronousLockTestCase(TestCase):
os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] = prev_state
def _testAsynchronousLockWaitCancel(self):
- scheduler = PollScheduler().sched_iface
+ scheduler = global_event_loop()
tempdir = tempfile.mkdtemp()
try:
path = os.path.join(tempdir, 'lock_me')
@@ -132,7 +132,7 @@ class AsynchronousLockTestCase(TestCase):
os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] = prev_state
def _testAsynchronousLockWaitKill(self):
- scheduler = PollScheduler().sched_iface
+ scheduler = global_event_loop()
tempdir = tempfile.mkdtemp()
try:
path = os.path.join(tempdir, 'lock_me')
diff --git a/pym/portage/tests/process/test_PopenProcess.py b/pym/portage/tests/process/test_PopenProcess.py
new file mode 100644
index 000000000..88da0b354
--- /dev/null
+++ b/pym/portage/tests/process/test_PopenProcess.py
@@ -0,0 +1,85 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import subprocess
+import tempfile
+
+from portage import os
+from portage.tests import TestCase
+from portage.util._async.PipeLogger import PipeLogger
+from portage.util._async.PopenProcess import PopenProcess
+from portage.util._eventloop.global_event_loop import global_event_loop
+from _emerge.PipeReader import PipeReader
+
+class PopenPipeTestCase(TestCase):
+ """
+ Test PopenProcess, which can be useful for Jython support, since it
+ uses the subprocess.Popen instead of os.fork().
+ """
+
+ _echo_cmd = "echo -n '%s'"
+
+ def _testPipeReader(self, test_string):
+ """
+ Use a poll loop to read data from a pipe and assert that
+ the data written to the pipe is identical to the data
+ read from the pipe.
+ """
+
+ producer = PopenProcess(proc=subprocess.Popen(
+ ["bash", "-c", self._echo_cmd % test_string],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT),
+ pipe_reader=PipeReader(), scheduler=global_event_loop())
+
+ consumer = producer.pipe_reader
+ consumer.input_files = {"producer" : producer.proc.stdout}
+
+ producer.start()
+ producer.wait()
+
+ self.assertEqual(producer.returncode, os.EX_OK)
+ self.assertEqual(consumer.returncode, os.EX_OK)
+
+ return consumer.getvalue().decode('ascii', 'replace')
+
+ def _testPipeLogger(self, test_string):
+
+ producer = PopenProcess(proc=subprocess.Popen(
+ ["bash", "-c", self._echo_cmd % test_string],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT),
+ scheduler=global_event_loop())
+
+ fd, log_file_path = tempfile.mkstemp()
+ try:
+
+ consumer = PipeLogger(background=True,
+ input_fd=producer.proc.stdout,
+ log_file_path=log_file_path)
+
+ producer.pipe_reader = consumer
+
+ producer.start()
+ producer.wait()
+
+ self.assertEqual(producer.returncode, os.EX_OK)
+ self.assertEqual(consumer.returncode, os.EX_OK)
+
+ with open(log_file_path, 'rb') as f:
+ content = f.read()
+
+ finally:
+ os.close(fd)
+ os.unlink(log_file_path)
+
+ return content.decode('ascii', 'replace')
+
+ def testPopenPipe(self):
+ for x in (1, 2, 5, 6, 7, 8, 2**5, 2**10, 2**12, 2**13, 2**14):
+ test_string = x * "a"
+ output = self._testPipeReader(test_string)
+ self.assertEqual(test_string, output,
+ "x = %s, len(output) = %s" % (x, len(output)))
+
+ output = self._testPipeLogger(test_string)
+ self.assertEqual(test_string, output,
+ "x = %s, len(output) = %s" % (x, len(output)))
diff --git a/pym/portage/tests/process/test_PopenProcessBlockingIO.py b/pym/portage/tests/process/test_PopenProcessBlockingIO.py
new file mode 100644
index 000000000..9ee291a39
--- /dev/null
+++ b/pym/portage/tests/process/test_PopenProcessBlockingIO.py
@@ -0,0 +1,63 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import subprocess
+
+try:
+ import threading
+except ImportError:
+ # dummy_threading will not suffice
+ threading = None
+
+from portage import os
+from portage.tests import TestCase
+from portage.util._async.PopenProcess import PopenProcess
+from portage.util._eventloop.global_event_loop import global_event_loop
+from portage.util._async.PipeReaderBlockingIO import PipeReaderBlockingIO
+
+class PopenPipeBlockingIOTestCase(TestCase):
+ """
+ Test PopenProcess, which can be useful for Jython support:
+ * use subprocess.Popen since Jython does not support os.fork()
+ * use blocking IO with threads, since Jython does not support
+ fcntl non-blocking IO)
+ """
+
+ _echo_cmd = "echo -n '%s'"
+
+ def _testPipeReader(self, test_string):
+ """
+ Use a poll loop to read data from a pipe and assert that
+ the data written to the pipe is identical to the data
+ read from the pipe.
+ """
+
+ producer = PopenProcess(proc=subprocess.Popen(
+ ["bash", "-c", self._echo_cmd % test_string],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT),
+ pipe_reader=PipeReaderBlockingIO(), scheduler=global_event_loop())
+
+ consumer = producer.pipe_reader
+ consumer.input_files = {"producer" : producer.proc.stdout}
+
+ producer.start()
+ producer.wait()
+
+ self.assertEqual(producer.returncode, os.EX_OK)
+ self.assertEqual(consumer.returncode, os.EX_OK)
+
+ return consumer.getvalue().decode('ascii', 'replace')
+
+ def testPopenPipeBlockingIO(self):
+
+ if threading is None:
+ skip_reason = "threading disabled"
+ self.portage_skip = "threading disabled"
+ self.assertFalse(True, skip_reason)
+ return
+
+ for x in (1, 2, 5, 6, 7, 8, 2**5, 2**10, 2**12, 2**13, 2**14):
+ test_string = x * "a"
+ output = self._testPipeReader(test_string)
+ self.assertEqual(test_string, output,
+ "x = %s, len(output) = %s" % (x, len(output)))
diff --git a/pym/portage/tests/process/test_poll.py b/pym/portage/tests/process/test_poll.py
index d6667b4e0..8c57c237a 100644
--- a/pym/portage/tests/process/test_poll.py
+++ b/pym/portage/tests/process/test_poll.py
@@ -1,12 +1,14 @@
-# Copyright 1998-2011 Gentoo Foundation
+# Copyright 1998-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+import subprocess
+
from portage import os
from portage.tests import TestCase
from portage.util._pty import _create_pty_or_pipe
-from _emerge.TaskScheduler import TaskScheduler
+from portage.util._async.PopenProcess import PopenProcess
+from portage.util._eventloop.global_event_loop import global_event_loop
from _emerge.PipeReader import PipeReader
-from _emerge.SpawnProcess import SpawnProcess
class PipeReaderTestCase(TestCase):
@@ -36,26 +38,23 @@ class PipeReaderTestCase(TestCase):
# WARNING: It is very important to use unbuffered mode here,
# in order to avoid issue 5380 with python3.
master_file = os.fdopen(master_fd, 'rb', 0)
- slave_file = os.fdopen(slave_fd, 'wb', 0)
- task_scheduler = TaskScheduler(max_jobs=2)
- producer = SpawnProcess(
- args=["bash", "-c", self._echo_cmd % test_string],
- env=os.environ, fd_pipes={1:slave_fd},
- scheduler=task_scheduler.sched_iface)
- task_scheduler.add(producer)
- slave_file.close()
+ scheduler = global_event_loop()
consumer = PipeReader(
input_files={"producer" : master_file},
- scheduler=task_scheduler.sched_iface, _use_array=self._use_array)
+ _use_array=self._use_array,
+ scheduler=scheduler)
+
+ producer = PopenProcess(
+ pipe_reader=consumer,
+ proc=subprocess.Popen(["bash", "-c", self._echo_cmd % test_string],
+ stdout=slave_fd),
+ scheduler=scheduler)
- task_scheduler.add(consumer)
+ producer.start()
+ os.close(slave_fd)
+ producer.wait()
- # This will ensure that both tasks have exited, which
- # is necessary to avoid "ResourceWarning: unclosed file"
- # warnings since Python 3.2 (and also ensures that we
- # don't leave any zombie child processes).
- task_scheduler.run()
self.assertEqual(producer.returncode, os.EX_OK)
self.assertEqual(consumer.returncode, os.EX_OK)
diff --git a/pym/portage/tests/repoman/test_echangelog.py b/pym/portage/tests/repoman/test_echangelog.py
index 502aa7292..1640be268 100644
--- a/pym/portage/tests/repoman/test_echangelog.py
+++ b/pym/portage/tests/repoman/test_echangelog.py
@@ -1,13 +1,9 @@
# Copyright 2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-import datetime
-import subprocess
-import sys
import tempfile
import time
-import portage
from portage import os
from portage import shutil
from portage.tests import TestCase
@@ -35,7 +31,7 @@ class RepomanEchangelogTestCase(TestCase):
self.header_pkg = '# ChangeLog for %s/%s\n' % (self.cat, self.pkg)
self.header_copyright = '# Copyright 1999-%s Gentoo Foundation; Distributed under the GPL v2\n' % \
- datetime.datetime.now().year
+ time.strftime('%Y', time.gmtime())
self.header_cvs = '# $Header: $\n'
self.changelog = os.path.join(self.pkgdir, 'ChangeLog')
diff --git a/pym/portage/tests/repoman/test_simple.py b/pym/portage/tests/repoman/test_simple.py
index eab06d9b8..69eb36de8 100644
--- a/pym/portage/tests/repoman/test_simple.py
+++ b/pym/portage/tests/repoman/test_simple.py
@@ -1,4 +1,4 @@
-# Copyright 2011 Gentoo Foundation
+# Copyright 2011-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import subprocess
@@ -76,9 +76,26 @@ class SimpleRepomanTestCase(TestCase):
profiles = (
("x86", "default/linux/x86/test_profile", "stable"),
+ ("x86", "default/linux/x86/test_dev", "dev"),
+ ("x86", "default/linux/x86/test_exp", "exp"),
)
+ profile = {
+ "eapi": ("5",),
+ "package.use.stable.mask": ("dev-libs/A flag",)
+ }
+
ebuilds = {
+ "dev-libs/A-0": {
+ "COPYRIGHT_HEADER" : copyright_header,
+ "DESCRIPTION" : "Desc goes here",
+ "EAPI" : "5",
+ "HOMEPAGE" : "http://example.com",
+ "IUSE" : "flag",
+ "KEYWORDS": "x86",
+ "LICENSE": "GPL-2",
+ "RDEPEND": "flag? ( dev-libs/B[flag] )",
+ },
"dev-libs/A-1": {
"COPYRIGHT_HEADER" : copyright_header,
"DESCRIPTION" : "Desc goes here",
@@ -98,6 +115,17 @@ class SimpleRepomanTestCase(TestCase):
"KEYWORDS": "~x86",
"LICENSE": "GPL-2",
},
+ "dev-libs/C-0": {
+ "COPYRIGHT_HEADER" : copyright_header,
+ "DESCRIPTION" : "Desc goes here",
+ "EAPI" : "4",
+ "HOMEPAGE" : "http://example.com",
+ "IUSE" : "flag",
+ # must be unstable, since dev-libs/A[flag] is stable masked
+ "KEYWORDS": "~x86",
+ "LICENSE": "GPL-2",
+ "RDEPEND": "flag? ( dev-libs/A[flag] )",
+ },
}
licenses = ["GPL-2"]
arch_list = ["x86"]
@@ -107,7 +135,7 @@ class SimpleRepomanTestCase(TestCase):
"dev-libs/A",
{
"herd" : "base-system",
- "flags" : "<flag name='flag'>Description of how USE='flag' affects this package</flag>",
+ "flags" : "<flag name='flag' restrict='&gt;=dev-libs/A-0'>Description of how USE='flag' affects this package</flag>",
},
),
(
@@ -117,6 +145,13 @@ class SimpleRepomanTestCase(TestCase):
"flags" : "<flag name='flag'>Description of how USE='flag' affects this package</flag>",
},
),
+ (
+ "dev-libs/C",
+ {
+ "herd" : "no-herd",
+ "flags" : "<flag name='flag'>Description of how USE='flag' affects this package</flag>",
+ },
+ ),
)
use_desc = (
@@ -124,18 +159,18 @@ class SimpleRepomanTestCase(TestCase):
)
playground = ResolverPlayground(ebuilds=ebuilds,
- repo_configs=repo_configs, debug=debug)
+ profile=profile, repo_configs=repo_configs, debug=debug)
settings = playground.settings
eprefix = settings["EPREFIX"]
eroot = settings["EROOT"]
portdb = playground.trees[playground.eroot]["porttree"].dbapi
homedir = os.path.join(eroot, "home")
distdir = os.path.join(eprefix, "distdir")
- portdir = settings["PORTDIR"]
- profiles_dir = os.path.join(portdir, "profiles")
- license_dir = os.path.join(portdir, "licenses")
+ test_repo_location = settings.repositories["test_repo"].location
+ profiles_dir = os.path.join(test_repo_location, "profiles")
+ license_dir = os.path.join(test_repo_location, "licenses")
- repoman_cmd = (portage._python_interpreter, "-Wd",
+ repoman_cmd = (portage._python_interpreter, "-b", "-Wd",
os.path.join(PORTAGE_BIN_PATH, "repoman"))
git_binary = find_binary("git")
@@ -159,6 +194,7 @@ class SimpleRepomanTestCase(TestCase):
("", git_cmd + ("init-db",)),
("", git_cmd + ("add", ".")),
("", git_cmd + ("commit", "-a", "-m", "add whole repo")),
+ ("", repoman_cmd + ("full", "-d")),
("", cp_cmd + (test_ebuild, test_ebuild[:-8] + "2.ebuild")),
("", git_cmd + ("add", test_ebuild[:-8] + "2.ebuild")),
("", repoman_cmd + ("commit", "-m", "bump to version 2")),
@@ -192,23 +228,35 @@ class SimpleRepomanTestCase(TestCase):
"PATH" : os.environ["PATH"],
"PORTAGE_GRPNAME" : os.environ["PORTAGE_GRPNAME"],
"PORTAGE_USERNAME" : os.environ["PORTAGE_USERNAME"],
- "PORTDIR" : portdir,
+ "PORTAGE_REPOSITORIES" : settings.repositories.config_string(),
"PYTHONPATH" : pythonpath,
}
if os.environ.get("SANDBOX_ON") == "1":
# avoid problems from nested sandbox instances
- env["FEATURES"] = "-sandbox"
+ env["FEATURES"] = "-sandbox -usersandbox"
dirs = [homedir, license_dir, profiles_dir, distdir]
try:
for d in dirs:
ensure_dirs(d)
- with open(os.path.join(portdir, "skel.ChangeLog"), 'w') as f:
+ with open(os.path.join(test_repo_location, "skel.ChangeLog"), 'w') as f:
f.write(copyright_header)
with open(os.path.join(profiles_dir, "profiles.desc"), 'w') as f:
for x in profiles:
f.write("%s %s %s\n" % x)
+
+ # ResolverPlayground only created the first profile,
+ # so create the remaining ones.
+ for x in profiles[1:]:
+ sub_profile_dir = os.path.join(profiles_dir, x[1])
+ ensure_dirs(sub_profile_dir)
+ for config_file, lines in profile.items():
+ file_name = os.path.join(sub_profile_dir, config_file)
+ with open(file_name, "w") as f:
+ for line in lines:
+ f.write("%s\n" % line)
+
for x in licenses:
open(os.path.join(license_dir, x), 'wb').close()
with open(os.path.join(profiles_dir, "arch.list"), 'w') as f:
@@ -218,12 +266,12 @@ class SimpleRepomanTestCase(TestCase):
for k, v in use_desc:
f.write("%s - %s\n" % (k, v))
for cp, xml_data in metadata_xml_files:
- with open(os.path.join(portdir, cp, "metadata.xml"), 'w') as f:
+ with open(os.path.join(test_repo_location, cp, "metadata.xml"), 'w') as f:
f.write(playground.metadata_xml_template % xml_data)
- # Use a symlink to portdir, in order to trigger bugs
+ # Use a symlink to test_repo, in order to trigger bugs
# involving canonical vs. non-canonical paths.
- portdir_symlink = os.path.join(eroot, "portdir_symlink")
- os.symlink(portdir, portdir_symlink)
+ test_repo_symlink = os.path.join(eroot, "test_repo_symlink")
+ os.symlink(test_repo_location, test_repo_symlink)
# repoman checks metadata.dtd for recent CTIME, so copy the file in
# order to ensure that the CTIME is current
shutil.copyfile(metadata_dtd, os.path.join(distdir, "metadata.dtd"))
@@ -238,9 +286,8 @@ class SimpleRepomanTestCase(TestCase):
stdout = subprocess.PIPE
for cwd in ("", "dev-libs", "dev-libs/A", "dev-libs/B"):
- abs_cwd = os.path.join(portdir_symlink, cwd)
- proc = subprocess.Popen([portage._python_interpreter, "-Wd",
- os.path.join(PORTAGE_BIN_PATH, "repoman"), "full"],
+ abs_cwd = os.path.join(test_repo_symlink, cwd)
+ proc = subprocess.Popen(repoman_cmd + ("full",),
cwd=abs_cwd, env=env, stdout=stdout)
if debug:
@@ -258,7 +305,7 @@ class SimpleRepomanTestCase(TestCase):
if git_binary is not None:
for cwd, cmd in git_test:
- abs_cwd = os.path.join(portdir_symlink, cwd)
+ abs_cwd = os.path.join(test_repo_symlink, cwd)
proc = subprocess.Popen(cmd,
cwd=abs_cwd, env=env, stdout=stdout)
diff --git a/pym/portage/tests/resolver/ResolverPlayground.py b/pym/portage/tests/resolver/ResolverPlayground.py
index 0ac209761..077e27159 100644
--- a/pym/portage/tests/resolver/ResolverPlayground.py
+++ b/pym/portage/tests/resolver/ResolverPlayground.py
@@ -1,7 +1,8 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from itertools import permutations
+import fnmatch
import sys
import tempfile
import portage
@@ -25,6 +26,7 @@ from _emerge.depgraph import backtrack_depgraph
from _emerge.RootConfig import RootConfig
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
class ResolverPlayground(object):
@@ -34,9 +36,10 @@ class ResolverPlayground(object):
its work.
"""
- config_files = frozenset(("package.accept_keywords", "package.use", "package.mask", "package.keywords", \
- "package.unmask", "package.properties", "package.license", "use.mask", "use.force",
- "layout.conf",))
+ config_files = frozenset(("eapi", "layout.conf", "make.conf", "package.accept_keywords",
+ "package.keywords", "package.license", "package.mask", "package.properties",
+ "package.unmask", "package.use", "package.use.aliases", "package.use.stable.mask",
+ "unpack_dependencies", "use.aliases", "use.force", "use.mask", "layout.conf"))
metadata_xml_template = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE pkgmetadata SYSTEM "http://www.gentoo.org/dtd/metadata.dtd">
@@ -54,27 +57,32 @@ class ResolverPlayground(object):
"""
def __init__(self, ebuilds={}, binpkgs={}, installed={}, profile={}, repo_configs={}, \
- user_config={}, sets={}, world=[], world_sets=[], distfiles={}, debug=False):
+ user_config={}, sets={}, world=[], world_sets=[], distfiles={},
+ targetroot=False, debug=False):
"""
- ebuilds: cpv -> metadata mapping simulating available ebuilds.
+ ebuilds: cpv -> metadata mapping simulating available ebuilds.
installed: cpv -> metadata mapping simulating installed packages.
If a metadata key is missing, it gets a default value.
profile: settings defined by the profile.
"""
self.debug = debug
self.eprefix = normalize_path(tempfile.mkdtemp())
+ portage.const.EPREFIX = self.eprefix.rstrip(os.sep)
+
self.eroot = self.eprefix + os.sep
+ if targetroot:
+ self.target_root = os.path.join(self.eroot, 'target_root')
+ else:
+ self.target_root = os.sep
self.distdir = os.path.join(self.eroot, "var", "portage", "distfiles")
self.pkgdir = os.path.join(self.eprefix, "pkgdir")
- self.portdir = os.path.join(self.eroot, "usr/portage")
self.vdbdir = os.path.join(self.eroot, "var/db/pkg")
- os.makedirs(self.portdir)
os.makedirs(self.vdbdir)
if not debug:
portage.util.noiselimit = -2
- self.repo_dirs = {}
+ self._repositories = {}
#Make sure the main repo is always created
self._get_repo_dir("test_repo")
@@ -88,20 +96,19 @@ class ResolverPlayground(object):
self.settings, self.trees = self._load_config()
self._create_ebuild_manifests(ebuilds)
-
+
portage.util.noiselimit = 0
def _get_repo_dir(self, repo):
"""
Create the repo directory if needed.
"""
- if repo not in self.repo_dirs:
+ if repo not in self._repositories:
if repo == "test_repo":
- repo_path = self.portdir
- else:
- repo_path = os.path.join(self.eroot, "usr", "local", repo)
+ self._repositories["DEFAULT"] = {"main-repo": repo}
- self.repo_dirs[repo] = repo_path
+ repo_path = os.path.join(self.eroot, "var", "repositories", repo)
+ self._repositories[repo] = {"location": repo_path}
profile_path = os.path.join(repo_path, "profiles")
try:
@@ -110,11 +117,10 @@ class ResolverPlayground(object):
pass
repo_name_file = os.path.join(profile_path, "repo_name")
- f = open(repo_name_file, "w")
- f.write("%s\n" % repo)
- f.close()
+ with open(repo_name_file, "w") as f:
+ f.write("%s\n" % repo)
- return self.repo_dirs[repo]
+ return self._repositories[repo]["location"]
def _create_distfiles(self, distfiles):
os.makedirs(self.distdir)
@@ -131,24 +137,18 @@ class ResolverPlayground(object):
metadata = ebuilds[cpv].copy()
copyright_header = metadata.pop("COPYRIGHT_HEADER", None)
- desc = metadata.pop("DESCRIPTION", None)
- eapi = metadata.pop("EAPI", 0)
- lic = metadata.pop("LICENSE", "")
- properties = metadata.pop("PROPERTIES", "")
- slot = metadata.pop("SLOT", 0)
- keywords = metadata.pop("KEYWORDS", "x86")
- homepage = metadata.pop("HOMEPAGE", None)
- src_uri = metadata.pop("SRC_URI", None)
- iuse = metadata.pop("IUSE", "")
- provide = metadata.pop("PROVIDE", None)
- depend = metadata.pop("DEPEND", "")
- rdepend = metadata.pop("RDEPEND", None)
- pdepend = metadata.pop("PDEPEND", None)
- required_use = metadata.pop("REQUIRED_USE", None)
+ eapi = metadata.pop("EAPI", "0")
misc_content = metadata.pop("MISC_CONTENT", None)
+ metadata.setdefault("DEPEND", "")
+ metadata.setdefault("SLOT", "0")
+ metadata.setdefault("KEYWORDS", "x86")
+ metadata.setdefault("IUSE", "")
- if metadata:
- raise ValueError("metadata of ebuild '%s' contains unknown keys: %s" % (cpv, metadata.keys()))
+ unknown_keys = set(metadata).difference(
+ portage.dbapi.dbapi._known_keys)
+ if unknown_keys:
+ raise ValueError("metadata of ebuild '%s' contains unknown keys: %s" %
+ (cpv, sorted(unknown_keys)))
repo_dir = self._get_repo_dir(repo)
ebuild_dir = os.path.join(repo_dir, a.cp)
@@ -158,33 +158,14 @@ class ResolverPlayground(object):
except os.error:
pass
- f = open(ebuild_path, "w")
- if copyright_header is not None:
- f.write(copyright_header)
- f.write('EAPI="' + str(eapi) + '"\n')
- if desc is not None:
- f.write('DESCRIPTION="%s"\n' % desc)
- if homepage is not None:
- f.write('HOMEPAGE="%s"\n' % homepage)
- if src_uri is not None:
- f.write('SRC_URI="%s"\n' % src_uri)
- f.write('LICENSE="' + str(lic) + '"\n')
- f.write('PROPERTIES="' + str(properties) + '"\n')
- f.write('SLOT="' + str(slot) + '"\n')
- f.write('KEYWORDS="' + str(keywords) + '"\n')
- f.write('IUSE="' + str(iuse) + '"\n')
- if provide is not None:
- f.write('PROVIDE="%s"\n' % provide)
- f.write('DEPEND="' + str(depend) + '"\n')
- if rdepend is not None:
- f.write('RDEPEND="' + str(rdepend) + '"\n')
- if pdepend is not None:
- f.write('PDEPEND="' + str(pdepend) + '"\n')
- if required_use is not None:
- f.write('REQUIRED_USE="' + str(required_use) + '"\n')
- if misc_content is not None:
- f.write(misc_content)
- f.close()
+ with open(ebuild_path, "w") as f:
+ if copyright_header is not None:
+ f.write(copyright_header)
+ f.write('EAPI="%s"\n' % eapi)
+ for k, v in metadata.items():
+ f.write('%s="%s"\n' % (k, v))
+ if misc_content is not None:
+ f.write(misc_content)
def _create_ebuild_manifests(self, ebuilds):
tmpsettings = config(clone=self.settings)
@@ -241,49 +222,25 @@ class ResolverPlayground(object):
pass
metadata = installed[cpv].copy()
- eapi = metadata.pop("EAPI", 0)
- lic = metadata.pop("LICENSE", "")
- properties = metadata.pop("PROPERTIES", "")
- slot = metadata.pop("SLOT", 0)
- build_time = metadata.pop("BUILD_TIME", "0")
- keywords = metadata.pop("KEYWORDS", "~x86")
- iuse = metadata.pop("IUSE", "")
- use = metadata.pop("USE", "")
- provide = metadata.pop("PROVIDE", None)
- depend = metadata.pop("DEPEND", "")
- rdepend = metadata.pop("RDEPEND", None)
- pdepend = metadata.pop("PDEPEND", None)
- required_use = metadata.pop("REQUIRED_USE", None)
-
- if metadata:
- raise ValueError("metadata of installed '%s' contains unknown keys: %s" % (cpv, metadata.keys()))
-
- def write_key(key, value):
- f = open(os.path.join(vdb_pkg_dir, key), "w")
- f.write(str(value) + "\n")
- f.close()
-
- write_key("EAPI", eapi)
- write_key("BUILD_TIME", build_time)
- write_key("COUNTER", "0")
- write_key("LICENSE", lic)
- write_key("PROPERTIES", properties)
- write_key("SLOT", slot)
- write_key("LICENSE", lic)
- write_key("PROPERTIES", properties)
- write_key("repository", repo)
- write_key("KEYWORDS", keywords)
- write_key("IUSE", iuse)
- write_key("USE", use)
- if provide is not None:
- write_key("PROVIDE", provide)
- write_key("DEPEND", depend)
- if rdepend is not None:
- write_key("RDEPEND", rdepend)
- if pdepend is not None:
- write_key("PDEPEND", pdepend)
- if required_use is not None:
- write_key("REQUIRED_USE", required_use)
+ metadata.setdefault("SLOT", "0")
+ metadata.setdefault("BUILD_TIME", "0")
+ metadata.setdefault("COUNTER", "0")
+ metadata.setdefault("KEYWORDS", "~x86")
+
+ unknown_keys = set(metadata).difference(
+ portage.dbapi.dbapi._known_keys)
+ unknown_keys.discard("BUILD_TIME")
+ unknown_keys.discard("COUNTER")
+ unknown_keys.discard("repository")
+ unknown_keys.discard("USE")
+ if unknown_keys:
+ raise ValueError("metadata of installed '%s' contains unknown keys: %s" %
+ (cpv, sorted(unknown_keys)))
+
+ metadata["repository"] = repo
+ for k, v in metadata.items():
+ with open(os.path.join(vdb_pkg_dir, k), "w") as f:
+ f.write("%s\n" % v)
def _create_profile(self, ebuilds, installed, profile, repo_configs, user_config, sets):
@@ -294,9 +251,12 @@ class ResolverPlayground(object):
except os.error:
pass
- for repo in self.repo_dirs:
+ for repo in self._repositories:
+ if repo == "DEFAULT":
+ continue
+
repo_dir = self._get_repo_dir(repo)
- profile_dir = os.path.join(self._get_repo_dir(repo), "profiles")
+ profile_dir = os.path.join(repo_dir, "profiles")
metadata_dir = os.path.join(repo_dir, "metadata")
os.makedirs(metadata_dir)
@@ -310,60 +270,66 @@ class ResolverPlayground(object):
categories.add(catsplit(cpv)[0])
categories_file = os.path.join(profile_dir, "categories")
- f = open(categories_file, "w")
- for cat in categories:
- f.write(cat + "\n")
- f.close()
-
+ with open(categories_file, "w") as f:
+ for cat in categories:
+ f.write(cat + "\n")
+
#Create $REPO/profiles/license_groups
license_file = os.path.join(profile_dir, "license_groups")
- f = open(license_file, "w")
- f.write("EULA TEST\n")
- f.close()
+ with open(license_file, "w") as f:
+ f.write("EULA TEST\n")
- repo_config = repo_configs.get(repo)
+ repo_config = repo_configs.get(repo)
if repo_config:
for config_file, lines in repo_config.items():
- if config_file not in self.config_files:
+ if config_file not in self.config_files and not any(fnmatch.fnmatch(config_file, os.path.join(x, "*")) for x in self.config_files):
raise ValueError("Unknown config file: '%s'" % config_file)
if config_file in ("layout.conf",):
file_name = os.path.join(repo_dir, "metadata", config_file)
else:
file_name = os.path.join(profile_dir, config_file)
- f = open(file_name, "w")
- for line in lines:
- f.write("%s\n" % line)
- f.close()
+ if "/" in config_file and not os.path.isdir(os.path.dirname(file_name)):
+ os.makedirs(os.path.dirname(file_name))
+ with open(file_name, "w") as f:
+ for line in lines:
+ f.write("%s\n" % line)
+ # Temporarily write empty value of masters until it becomes default.
+ # TODO: Delete all references to "# use implicit masters" when empty value becomes default.
+ if config_file == "layout.conf" and not any(line.startswith(("masters =", "# use implicit masters")) for line in lines):
+ f.write("masters =\n")
#Create $profile_dir/eclass (we fail to digest the ebuilds if it's not there)
os.makedirs(os.path.join(repo_dir, "eclass"))
+ # Temporarily write empty value of masters until it becomes default.
+ if not repo_config or "layout.conf" not in repo_config:
+ layout_conf_path = os.path.join(repo_dir, "metadata", "layout.conf")
+ with open(layout_conf_path, "w") as f:
+ f.write("masters =\n")
+
if repo == "test_repo":
#Create a minimal profile in /usr/portage
sub_profile_dir = os.path.join(profile_dir, "default", "linux", "x86", "test_profile")
os.makedirs(sub_profile_dir)
- eapi_file = os.path.join(sub_profile_dir, "eapi")
- f = open(eapi_file, "w")
- f.write("0\n")
- f.close()
+ if not (profile and "eapi" in profile):
+ eapi_file = os.path.join(sub_profile_dir, "eapi")
+ with open(eapi_file, "w") as f:
+ f.write("0\n")
make_defaults_file = os.path.join(sub_profile_dir, "make.defaults")
- f = open(make_defaults_file, "w")
- f.write("ARCH=\"x86\"\n")
- f.write("ACCEPT_KEYWORDS=\"x86\"\n")
- f.close()
+ with open(make_defaults_file, "w") as f:
+ f.write("ARCH=\"x86\"\n")
+ f.write("ACCEPT_KEYWORDS=\"x86\"\n")
use_force_file = os.path.join(sub_profile_dir, "use.force")
- f = open(use_force_file, "w")
- f.write("x86\n")
- f.close()
+ with open(use_force_file, "w") as f:
+ f.write("x86\n")
parent_file = os.path.join(sub_profile_dir, "parent")
- f = open(parent_file, "w")
- f.write("..\n")
- f.close()
+ with open(parent_file, "w") as f:
+ f.write("..\n")
if profile:
for config_file, lines in profile.items():
@@ -371,10 +337,9 @@ class ResolverPlayground(object):
raise ValueError("Unknown config file: '%s'" % config_file)
file_name = os.path.join(sub_profile_dir, config_file)
- f = open(file_name, "w")
- for line in lines:
- f.write("%s\n" % line)
- f.close()
+ with open(file_name, "w") as f:
+ for line in lines:
+ f.write("%s\n" % line)
#Create profile symlink
os.symlink(sub_profile_dir, os.path.join(user_config_dir, "make.profile"))
@@ -400,24 +365,50 @@ class ResolverPlayground(object):
with open(os.path.join(metadata_dir, "metadata.xml"), 'w') as f:
f.write(herds_xml)
- # Write empty entries for each repository, in order to exercise
- # RepoConfigLoader's repos.conf processing.
- repos_conf_file = os.path.join(user_config_dir, "repos.conf")
- f = open(repos_conf_file, "w")
- for repo in sorted(self.repo_dirs.keys()):
- f.write("[%s]\n" % repo)
- f.write("\n")
- f.close()
+ make_conf = {
+ "ACCEPT_KEYWORDS": "x86",
+ "CLEAN_DELAY": "0",
+ "DISTDIR" : self.distdir,
+ "EMERGE_WARNING_DELAY": "0",
+ "PKGDIR": self.pkgdir,
+ "PORTAGE_INST_GID": str(portage.data.portage_gid),
+ "PORTAGE_INST_UID": str(portage.data.portage_uid),
+ "PORTAGE_TMPDIR": os.path.join(self.eroot, 'var/tmp'),
+ }
+
+ if os.environ.get("NOCOLOR"):
+ make_conf["NOCOLOR"] = os.environ["NOCOLOR"]
+
+ # Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they
+ # need to be inherited by ebuild subprocesses.
+ if 'PORTAGE_USERNAME' in os.environ:
+ make_conf['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME']
+ if 'PORTAGE_GRPNAME' in os.environ:
+ make_conf['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME']
+
+ make_conf_lines = []
+ for k_v in make_conf.items():
+ make_conf_lines.append('%s="%s"' % k_v)
- for config_file, lines in user_config.items():
+ if "make.conf" in user_config:
+ make_conf_lines.extend(user_config["make.conf"])
+
+ if not portage.process.sandbox_capable or \
+ os.environ.get("SANDBOX_ON") == "1":
+ # avoid problems from nested sandbox instances
+ make_conf_lines.append('FEATURES="${FEATURES} -sandbox -usersandbox"')
+
+ configs = user_config.copy()
+ configs["make.conf"] = make_conf_lines
+
+ for config_file, lines in configs.items():
if config_file not in self.config_files:
raise ValueError("Unknown config file: '%s'" % config_file)
file_name = os.path.join(user_config_dir, config_file)
- f = open(file_name, "w")
- for line in lines:
- f.write("%s\n" % line)
- f.close()
+ with open(file_name, "w") as f:
+ for line in lines:
+ f.write("%s\n" % line)
#Create /usr/share/portage/config/make.globals
make_globals_path = os.path.join(self.eroot,
@@ -428,7 +419,7 @@ class ResolverPlayground(object):
#Create /usr/share/portage/config/sets/portage.conf
default_sets_conf_dir = os.path.join(self.eroot, "usr/share/portage/config/sets")
-
+
try:
os.makedirs(default_sets_conf_dir)
except os.error:
@@ -447,27 +438,9 @@ class ResolverPlayground(object):
for sets_file, lines in sets.items():
file_name = os.path.join(set_config_dir, sets_file)
- f = open(file_name, "w")
- for line in lines:
- f.write("%s\n" % line)
- f.close()
-
- user_config_dir = os.path.join(self.eroot, "etc", "portage")
-
- try:
- os.makedirs(user_config_dir)
- except os.error:
- pass
-
- for config_file, lines in user_config.items():
- if config_file not in self.config_files:
- raise ValueError("Unknown config file: '%s'" % config_file)
-
- file_name = os.path.join(user_config_dir, config_file)
- f = open(file_name, "w")
- for line in lines:
- f.write("%s\n" % line)
- f.close()
+ with open(file_name, "w") as f:
+ for line in lines:
+ f.write("%s\n" % line)
def _create_world(self, world, world_sets):
#Create /var/lib/portage/world
@@ -477,54 +450,34 @@ class ResolverPlayground(object):
world_file = os.path.join(var_lib_portage, "world")
world_set_file = os.path.join(var_lib_portage, "world_sets")
- f = open(world_file, "w")
- for atom in world:
- f.write("%s\n" % atom)
- f.close()
+ with open(world_file, "w") as f:
+ for atom in world:
+ f.write("%s\n" % atom)
- f = open(world_set_file, "w")
- for atom in world_sets:
- f.write("%s\n" % atom)
- f.close()
+ with open(world_set_file, "w") as f:
+ for atom in world_sets:
+ f.write("%s\n" % atom)
def _load_config(self):
- portdir_overlay = []
- for repo_name in sorted(self.repo_dirs):
- path = self.repo_dirs[repo_name]
- if path != self.portdir:
- portdir_overlay.append(path)
+
+ create_trees_kwargs = {}
+ if self.target_root != os.sep:
+ create_trees_kwargs["target_root"] = self.target_root
env = {
- "ACCEPT_KEYWORDS": "x86",
- "DISTDIR" : self.distdir,
- "PKGDIR": self.pkgdir,
- "PORTDIR": self.portdir,
- "PORTDIR_OVERLAY": " ".join(portdir_overlay),
- 'PORTAGE_TMPDIR' : os.path.join(self.eroot, 'var/tmp'),
+ "PORTAGE_REPOSITORIES": "\n".join("[%s]\n%s" % (repo_name, "\n".join("%s = %s" % (k, v) for k, v in repo_config.items())) for repo_name, repo_config in self._repositories.items())
}
- if os.environ.get("NOCOLOR"):
- env["NOCOLOR"] = os.environ["NOCOLOR"]
-
- if os.environ.get("SANDBOX_ON") == "1":
- # avoid problems from nested sandbox instances
- env["FEATURES"] = "-sandbox"
+ trees = portage.create_trees(env=env, eprefix=self.eprefix,
+ **create_trees_kwargs)
- # Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they
- # need to be inherited by ebuild subprocesses.
- if 'PORTAGE_USERNAME' in os.environ:
- env['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME']
- if 'PORTAGE_GRPNAME' in os.environ:
- env['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME']
-
- trees = portage.create_trees(env=env, eprefix=self.eprefix)
for root, root_trees in trees.items():
settings = root_trees["vartree"].settings
settings._init_dirs()
setconfig = load_default_config(settings, root_trees)
root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
-
- return settings, trees
+
+ return trees[trees._target_eroot]["vartree"].settings, trees
def run(self, atoms, options={}, action=None):
options = options.copy()
@@ -553,7 +506,7 @@ class ResolverPlayground(object):
rval, cleanlist, ordered, req_pkg_count = \
calc_depclean(self.settings, self.trees, None,
options, action, InternalPackageSet(initial_atoms=atoms, allow_wildcard=True), None)
- result = ResolverPlaygroundDepcleanResult( \
+ result = ResolverPlaygroundDepcleanResult(
atoms, rval, cleanlist, ordered, req_pkg_count)
else:
params = create_depgraph_params(options, action)
@@ -577,9 +530,9 @@ class ResolverPlayground(object):
return
def cleanup(self):
- portdb = self.trees[self.eroot]["porttree"].dbapi
- portdb.close_caches()
- portage.dbapi.porttree.portdbapi.portdbapi_instances.remove(portdb)
+ for eroot in self.trees:
+ portdb = self.trees[eroot]["porttree"].dbapi
+ portdb.close_caches()
if self.debug:
print("\nEROOT=%s" % self.eroot)
else:
@@ -742,13 +695,14 @@ class ResolverPlaygroundResult(object):
if self.depgraph._dynamic_config._serialized_tasks_cache is not None:
self.mergelist = []
+ host_root = self.depgraph._frozen_config._running_root.root
for x in self.depgraph._dynamic_config._serialized_tasks_cache:
if isinstance(x, Blocker):
self.mergelist.append(x.atom)
else:
repo_str = ""
- if x.metadata["repository"] != "test_repo":
- repo_str = _repo_separator + x.metadata["repository"]
+ if x.repo != "test_repo":
+ repo_str = _repo_separator + x.repo
mergelist_str = x.cpv + repo_str
if x.built:
if x.operation == "merge":
@@ -756,6 +710,8 @@ class ResolverPlaygroundResult(object):
else:
desc = x.operation
mergelist_str = "[%s]%s" % (desc, mergelist_str)
+ if x.root != host_root:
+ mergelist_str += "{targetroot}"
self.mergelist.append(mergelist_str)
if self.depgraph._dynamic_config._needed_use_config_changes:
@@ -781,7 +737,7 @@ class ResolverPlaygroundResult(object):
self.license_changes[pkg.cpv] = missing_licenses
if self.depgraph._dynamic_config._slot_conflict_handler is not None:
- self.slot_collision_solutions = []
+ self.slot_collision_solutions = []
handler = self.depgraph._dynamic_config._slot_conflict_handler
for change in handler.changes:
@@ -793,7 +749,7 @@ class ResolverPlaygroundResult(object):
if self.depgraph._dynamic_config._circular_dependency_handler is not None:
handler = self.depgraph._dynamic_config._circular_dependency_handler
sol = handler.solutions
- self.circular_dependency_solutions = dict( zip([x.cpv for x in sol.keys()], sol.values()) )
+ self.circular_dependency_solutions = dict(zip([x.cpv for x in sol.keys()], sol.values()))
class ResolverPlaygroundDepcleanResult(object):
diff --git a/pym/portage/tests/resolver/test_autounmask.py b/pym/portage/tests/resolver/test_autounmask.py
index 6acac9984..75fb36843 100644
--- a/pym/portage/tests/resolver/test_autounmask.py
+++ b/pym/portage/tests/resolver/test_autounmask.py
@@ -10,9 +10,9 @@ class AutounmaskTestCase(TestCase):
ebuilds = {
#ebuilds to test use changes
- "dev-libs/A-1": { "SLOT": 1, "DEPEND": "dev-libs/B[foo]", "EAPI": 2},
- "dev-libs/A-2": { "SLOT": 2, "DEPEND": "dev-libs/B[bar]", "EAPI": 2},
- "dev-libs/B-1": { "DEPEND": "foo? ( dev-libs/C ) bar? ( dev-libs/D )", "IUSE": "foo bar"},
+ "dev-libs/A-1": { "SLOT": 1, "DEPEND": "dev-libs/B[foo]", "EAPI": 2},
+ "dev-libs/A-2": { "SLOT": 2, "DEPEND": "dev-libs/B[bar]", "EAPI": 2},
+ "dev-libs/B-1": { "DEPEND": "foo? ( dev-libs/C ) bar? ( dev-libs/D )", "IUSE": "foo bar"},
"dev-libs/C-1": {},
"dev-libs/D-1": {},
@@ -56,10 +56,10 @@ class AutounmaskTestCase(TestCase):
"dev-util/R-1": { "IUSE": "bar" },
#ebuilds to test interaction with REQUIRED_USE
- "app-portage/A-1": { "DEPEND": "app-portage/B[foo]", "EAPI": 2 },
- "app-portage/A-2": { "DEPEND": "app-portage/B[foo=]", "IUSE": "+foo", "REQUIRED_USE": "foo", "EAPI": "4" },
+ "app-portage/A-1": { "DEPEND": "app-portage/B[foo]", "EAPI": 2 },
+ "app-portage/A-2": { "DEPEND": "app-portage/B[foo=]", "IUSE": "+foo", "REQUIRED_USE": "foo", "EAPI": "4" },
- "app-portage/B-1": { "IUSE": "foo +bar", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" },
+ "app-portage/B-1": { "IUSE": "foo +bar", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" },
"app-portage/C-1": { "IUSE": "+foo +bar", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" },
}
@@ -69,183 +69,183 @@ class AutounmaskTestCase(TestCase):
ResolverPlaygroundTestCase(
["dev-libs/A:1"],
- options = {"--autounmask": "n"},
- success = False),
+ options={"--autounmask": "n"},
+ success=False),
ResolverPlaygroundTestCase(
["dev-libs/A:1"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1"],
- use_changes = { "dev-libs/B-1": {"foo": True} } ),
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1"],
+ use_changes={ "dev-libs/B-1": {"foo": True} }),
#Make sure we restart if needed.
ResolverPlaygroundTestCase(
["dev-libs/A:1", "dev-libs/B"],
- options = {"--autounmask": True},
- all_permutations = True,
- success = False,
- mergelist = ["dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1"],
- use_changes = { "dev-libs/B-1": {"foo": True} } ),
+ options={"--autounmask": True},
+ all_permutations=True,
+ success=False,
+ mergelist=["dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1"],
+ use_changes={ "dev-libs/B-1": {"foo": True} }),
ResolverPlaygroundTestCase(
["dev-libs/A:1", "dev-libs/A:2", "dev-libs/B"],
- options = {"--autounmask": True},
- all_permutations = True,
- success = False,
- mergelist = ["dev-libs/D-1", "dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1", "dev-libs/A-2"],
- ignore_mergelist_order = True,
- use_changes = { "dev-libs/B-1": {"foo": True, "bar": True} } ),
+ options={"--autounmask": True},
+ all_permutations=True,
+ success=False,
+ mergelist=["dev-libs/D-1", "dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1", "dev-libs/A-2"],
+ ignore_mergelist_order=True,
+ use_changes={ "dev-libs/B-1": {"foo": True, "bar": True} }),
#Test keywording.
#The simple case.
ResolverPlaygroundTestCase(
["app-misc/Z"],
- options = {"--autounmask": "n"},
- success = False),
+ options={"--autounmask": "n"},
+ success=False),
ResolverPlaygroundTestCase(
["app-misc/Z"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["app-misc/Y-1", "app-misc/Z-1"],
- unstable_keywords = ["app-misc/Y-1", "app-misc/Z-1"]),
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["app-misc/Y-1", "app-misc/Z-1"],
+ unstable_keywords=["app-misc/Y-1", "app-misc/Z-1"]),
#Make sure that the backtracking for slot conflicts handles our mess.
ResolverPlaygroundTestCase(
["=app-misc/V-1", "app-misc/W"],
- options = {"--autounmask": True},
- all_permutations = True,
- success = False,
- mergelist = ["app-misc/W-2", "app-misc/V-1"],
- unstable_keywords = ["app-misc/W-2", "app-misc/V-1"]),
+ options={"--autounmask": True},
+ all_permutations=True,
+ success=False,
+ mergelist=["app-misc/W-2", "app-misc/V-1"],
+ unstable_keywords=["app-misc/W-2", "app-misc/V-1"]),
#Mixed testing
#Make sure we don't change use for something in a || dep if there is another choice
#that needs no change.
-
+
ResolverPlaygroundTestCase(
["=sci-libs/K-1"],
- options = {"--autounmask": True},
- success = True,
- mergelist = ["sci-libs/P-1", "sci-libs/K-1"]),
+ options={"--autounmask": True},
+ success=True,
+ mergelist=["sci-libs/P-1", "sci-libs/K-1"]),
ResolverPlaygroundTestCase(
["=sci-libs/K-2"],
- options = {"--autounmask": True},
- success = True,
- mergelist = ["sci-libs/P-1", "sci-libs/K-2"]),
+ options={"--autounmask": True},
+ success=True,
+ mergelist=["sci-libs/P-1", "sci-libs/K-2"]),
ResolverPlaygroundTestCase(
["=sci-libs/K-3"],
- options = {"--autounmask": True},
- success = True,
- mergelist = ["sci-libs/P-1", "sci-libs/K-3"]),
+ options={"--autounmask": True},
+ success=True,
+ mergelist=["sci-libs/P-1", "sci-libs/K-3"]),
ResolverPlaygroundTestCase(
["=sci-libs/K-4"],
- options = {"--autounmask": True},
- success = True,
- mergelist = ["sci-libs/P-1", "sci-libs/K-4"]),
+ options={"--autounmask": True},
+ success=True,
+ mergelist=["sci-libs/P-1", "sci-libs/K-4"]),
ResolverPlaygroundTestCase(
["=sci-libs/K-5"],
- options = {"--autounmask": True},
- success = True,
- mergelist = ["sci-libs/P-1", "sci-libs/K-5"]),
+ options={"--autounmask": True},
+ success=True,
+ mergelist=["sci-libs/P-1", "sci-libs/K-5"]),
ResolverPlaygroundTestCase(
["=sci-libs/K-6"],
- options = {"--autounmask": True},
- success = True,
- mergelist = ["sci-libs/P-1", "sci-libs/K-6"]),
+ options={"--autounmask": True},
+ success=True,
+ mergelist=["sci-libs/P-1", "sci-libs/K-6"]),
#Make sure we prefer use changes over keyword changes.
ResolverPlaygroundTestCase(
["=sci-libs/K-7"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["sci-libs/L-1", "sci-libs/K-7"],
- use_changes = { "sci-libs/L-1": { "bar": True } }),
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["sci-libs/L-1", "sci-libs/K-7"],
+ use_changes={ "sci-libs/L-1": { "bar": True } }),
ResolverPlaygroundTestCase(
["=sci-libs/K-8"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["sci-libs/L-1", "sci-libs/K-8"],
- use_changes = { "sci-libs/L-1": { "bar": True } }),
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["sci-libs/L-1", "sci-libs/K-8"],
+ use_changes={ "sci-libs/L-1": { "bar": True } }),
#Test these nice "required by cat/pkg[foo]" messages.
ResolverPlaygroundTestCase(
["=dev-util/Q-1"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["dev-util/R-1", "dev-util/Q-1"],
- use_changes = { "dev-util/R-1": { "bar": True } }),
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["dev-util/R-1", "dev-util/Q-1"],
+ use_changes={ "dev-util/R-1": { "bar": True } }),
ResolverPlaygroundTestCase(
["=dev-util/Q-2"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["dev-util/R-1", "dev-util/Q-2"],
- use_changes = { "dev-util/R-1": { "bar": True } }),
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["dev-util/R-1", "dev-util/Q-2"],
+ use_changes={ "dev-util/R-1": { "bar": True } }),
#Test interaction with REQUIRED_USE.
ResolverPlaygroundTestCase(
["=app-portage/A-1"],
- options = { "--autounmask": True },
- use_changes = None,
- success = False),
+ options={ "--autounmask": True },
+ use_changes=None,
+ success=False),
ResolverPlaygroundTestCase(
["=app-portage/A-2"],
- options = { "--autounmask": True },
- use_changes = None,
- success = False),
+ options={ "--autounmask": True },
+ use_changes=None,
+ success=False),
ResolverPlaygroundTestCase(
["=app-portage/C-1"],
- options = { "--autounmask": True },
- use_changes = None,
- success = False),
+ options={ "--autounmask": True },
+ use_changes=None,
+ success=False),
#Make sure we don't change masked/forced flags.
ResolverPlaygroundTestCase(
["dev-libs/E:1"],
- options = {"--autounmask": True},
- use_changes = None,
- success = False),
+ options={"--autounmask": True},
+ use_changes=None,
+ success=False),
ResolverPlaygroundTestCase(
["dev-libs/E:2"],
- options = {"--autounmask": True},
- use_changes = None,
- success = False),
+ options={"--autounmask": True},
+ use_changes=None,
+ success=False),
#Test mask and keyword changes.
ResolverPlaygroundTestCase(
["app-text/A"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["app-text/A-1"],
- needed_p_mask_changes = ["app-text/A-1"]),
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["app-text/A-1"],
+ needed_p_mask_changes=["app-text/A-1"]),
ResolverPlaygroundTestCase(
["app-text/B"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["app-text/B-1"],
- unstable_keywords = ["app-text/B-1"],
- needed_p_mask_changes = ["app-text/B-1"]),
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["app-text/B-1"],
+ unstable_keywords=["app-text/B-1"],
+ needed_p_mask_changes=["app-text/B-1"]),
ResolverPlaygroundTestCase(
["app-text/C"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["app-text/C-1"],
- unstable_keywords = ["app-text/C-1"],
- needed_p_mask_changes = ["app-text/C-1"]),
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["app-text/C-1"],
+ unstable_keywords=["app-text/C-1"],
+ needed_p_mask_changes=["app-text/C-1"]),
#Make sure unstable keyword is preferred over missing keyword
ResolverPlaygroundTestCase(
["app-text/D"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["app-text/D-1"],
- unstable_keywords = ["app-text/D-1"]),
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["app-text/D-1"],
+ unstable_keywords=["app-text/D-1"]),
#Test missing keyword
ResolverPlaygroundTestCase(
["=app-text/D-2"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["app-text/D-2"],
- unstable_keywords = ["app-text/D-2"])
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["app-text/D-2"],
+ unstable_keywords=["app-text/D-2"])
)
profile = {
@@ -279,7 +279,7 @@ class AutounmaskTestCase(TestCase):
"dev-libs/A-1": { "LICENSE": "TEST" },
"dev-libs/B-1": { "LICENSE": "TEST", "IUSE": "foo", "KEYWORDS": "~x86"},
"dev-libs/C-1": { "DEPEND": "dev-libs/B[foo]", "EAPI": 2 },
-
+
"dev-libs/D-1": { "DEPEND": "dev-libs/E dev-libs/F", "LICENSE": "TEST" },
"dev-libs/E-1": { "LICENSE": "TEST" },
"dev-libs/E-2": { "LICENSE": "TEST" },
@@ -292,40 +292,40 @@ class AutounmaskTestCase(TestCase):
test_cases = (
ResolverPlaygroundTestCase(
["=dev-libs/A-1"],
- options = {"--autounmask": 'n'},
- success = False),
+ options={"--autounmask": 'n'},
+ success=False),
ResolverPlaygroundTestCase(
["=dev-libs/A-1"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["dev-libs/A-1"],
- license_changes = { "dev-libs/A-1": set(["TEST"]) }),
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["dev-libs/A-1"],
+ license_changes={ "dev-libs/A-1": set(["TEST"]) }),
#Test license+keyword+use change at once.
ResolverPlaygroundTestCase(
["=dev-libs/C-1"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["dev-libs/B-1", "dev-libs/C-1"],
- license_changes = { "dev-libs/B-1": set(["TEST"]) },
- unstable_keywords = ["dev-libs/B-1"],
- use_changes = { "dev-libs/B-1": { "foo": True } }),
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["dev-libs/B-1", "dev-libs/C-1"],
+ license_changes={ "dev-libs/B-1": set(["TEST"]) },
+ unstable_keywords=["dev-libs/B-1"],
+ use_changes={ "dev-libs/B-1": { "foo": True } }),
#Test license with backtracking.
ResolverPlaygroundTestCase(
["=dev-libs/D-1"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["dev-libs/E-1", "dev-libs/F-1", "dev-libs/D-1"],
- license_changes = { "dev-libs/D-1": set(["TEST"]), "dev-libs/E-1": set(["TEST"]), "dev-libs/E-2": set(["TEST"]), "dev-libs/F-1": set(["TEST"]) }),
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["dev-libs/E-1", "dev-libs/F-1", "dev-libs/D-1"],
+ license_changes={ "dev-libs/D-1": set(["TEST"]), "dev-libs/E-1": set(["TEST"]), "dev-libs/E-2": set(["TEST"]), "dev-libs/F-1": set(["TEST"]) }),
#Test license only for bug #420847
ResolverPlaygroundTestCase(
["dev-java/sun-jdk"],
- options = {"--autounmask": True},
- success = False,
- mergelist = ["dev-java/sun-jdk-1.6.0.31"],
- license_changes = { "dev-java/sun-jdk-1.6.0.31": set(["TEST"]) }),
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["dev-java/sun-jdk-1.6.0.31"],
+ license_changes={ "dev-java/sun-jdk-1.6.0.31": set(["TEST"]) }),
)
playground = ResolverPlayground(ebuilds=ebuilds)
@@ -348,7 +348,7 @@ class AutounmaskTestCase(TestCase):
"dev-libs/D-1": { "DEPEND": "dev-libs/A" },
}
- world_sets = [ "@test-set" ]
+ world_sets = ["@test-set"]
sets = {
"test-set": (
"dev-libs/A", "dev-libs/B", "dev-libs/C", "dev-libs/D",
@@ -362,29 +362,29 @@ class AutounmaskTestCase(TestCase):
ResolverPlaygroundTestCase(
["dev-libs/B", "dev-libs/C", "dev-libs/D"],
all_permutations=True,
- options = {"--autounmask": "y"},
+ options={"--autounmask": "y"},
mergelist=["dev-libs/A-2", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
ignore_mergelist_order=True,
- unstable_keywords = ["dev-libs/A-2"],
- success = False),
+ unstable_keywords=["dev-libs/A-2"],
+ success=False),
ResolverPlaygroundTestCase(
["@test-set"],
all_permutations=True,
- options = {"--autounmask": "y"},
+ options={"--autounmask": "y"},
mergelist=["dev-libs/A-2", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
ignore_mergelist_order=True,
- unstable_keywords = ["dev-libs/A-2"],
- success = False),
+ unstable_keywords=["dev-libs/A-2"],
+ success=False),
ResolverPlaygroundTestCase(
["@world"],
all_permutations=True,
- options = {"--autounmask": "y"},
+ options={"--autounmask": "y"},
mergelist=["dev-libs/A-2", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
ignore_mergelist_order=True,
- unstable_keywords = ["dev-libs/A-2"],
- success = False),
+ unstable_keywords=["dev-libs/A-2"],
+ success=False),
)
@@ -411,16 +411,16 @@ class AutounmaskTestCase(TestCase):
#Test mask and keyword changes.
ResolverPlaygroundTestCase(
["app-text/A"],
- options = {"--autounmask": True,
- "--autounmask-keep-masks": "y"},
- success = False),
+ options={"--autounmask": True,
+ "--autounmask-keep-masks": "y"},
+ success=False),
ResolverPlaygroundTestCase(
["app-text/A"],
- options = {"--autounmask": True,
- "--autounmask-keep-masks": "n"},
- success = False,
- mergelist = ["app-text/A-1"],
- needed_p_mask_changes = ["app-text/A-1"]),
+ options={"--autounmask": True,
+ "--autounmask-keep-masks": "n"},
+ success=False,
+ mergelist=["app-text/A-1"],
+ needed_p_mask_changes=["app-text/A-1"]),
)
profile = {
@@ -460,16 +460,16 @@ class AutounmaskTestCase(TestCase):
test_cases = (
ResolverPlaygroundTestCase(
["dev-libs/B"],
- success = False,
- mergelist = ["dev-libs/A-2", "dev-libs/B-1"],
- needed_p_mask_changes = set(["dev-libs/A-2"])),
+ success=False,
+ mergelist=["dev-libs/A-2", "dev-libs/B-1"],
+ needed_p_mask_changes=set(["dev-libs/A-2"])),
ResolverPlaygroundTestCase(
["dev-libs/C"],
- success = False,
- mergelist = ["dev-libs/A-9999", "dev-libs/C-1"],
- unstable_keywords = set(["dev-libs/A-9999"]),
- needed_p_mask_changes = set(["dev-libs/A-9999"])),
+ success=False,
+ mergelist=["dev-libs/A-9999", "dev-libs/C-1"],
+ unstable_keywords=set(["dev-libs/A-9999"]),
+ needed_p_mask_changes=set(["dev-libs/A-9999"])),
)
playground = ResolverPlayground(ebuilds=ebuilds, profile=profile)
diff --git a/pym/portage/tests/resolver/test_autounmask_multilib_use.py b/pym/portage/tests/resolver/test_autounmask_multilib_use.py
new file mode 100644
index 000000000..e160c77ce
--- /dev/null
+++ b/pym/portage/tests/resolver/test_autounmask_multilib_use.py
@@ -0,0 +1,85 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class AutounmaskMultilibUseTestCase(TestCase):
+
+ def testAutounmaskMultilibUse(self):
+
+ self.todo = True
+
+ ebuilds = {
+ "x11-proto/xextproto-7.2.1-r1": {
+ "EAPI": "5",
+ "IUSE": "abi_x86_32 abi_x86_64",
+ },
+ "x11-libs/libXaw-1.0.11-r2": {
+ "EAPI": "5",
+ "IUSE": "abi_x86_32 abi_x86_64",
+ "RDEPEND": "x11-proto/xextproto[abi_x86_32(-)?,abi_x86_64(-)?]"
+ },
+ "app-emulation/emul-linux-x86-xlibs-20130224-r2": {
+ "EAPI": "5",
+ "RDEPEND": "x11-libs/libXaw[abi_x86_32]"
+ },
+ "games-util/steam-client-meta-0-r20130514": {
+ "EAPI": "5",
+ "RDEPEND": "app-emulation/emul-linux-x86-xlibs"
+ }
+ }
+
+ installed = {
+ "x11-proto/xextproto-7.2.1-r1": {
+ "EAPI": "5",
+ "IUSE": "abi_x86_32 abi_x86_64",
+ "USE": "abi_x86_32 abi_x86_64"
+ },
+ "x11-libs/libXaw-1.0.11-r2": {
+ "EAPI": "5",
+ "IUSE": "abi_x86_32 abi_x86_64",
+ "RDEPEND": "x11-proto/xextproto[abi_x86_32(-)?,abi_x86_64(-)?]",
+ "USE": "abi_x86_32 abi_x86_64"
+ },
+ "app-emulation/emul-linux-x86-xlibs-20130224-r2": {
+ "EAPI": "5",
+ "RDEPEND": "x11-libs/libXaw[abi_x86_32]"
+ },
+ "games-util/steam-client-meta-0-r20130514": {
+ "EAPI": "5",
+ "RDEPEND": "app-emulation/emul-linux-x86-xlibs"
+ }
+ }
+
+ user_config = {
+ #"make.conf" : ("USE=\"abi_x86_32 abi_x86_64\"",)
+ "make.conf" : ("USE=\"abi_x86_64\"",)
+ }
+
+ world = ("games-util/steam-client-meta",)
+
+ test_cases = (
+
+ # Test autounmask solving of multilib use deps for bug #481628.
+ # We would like it to suggest some USE changes, but instead it
+ # currently fails with a SLOT conflict.
+
+ ResolverPlaygroundTestCase(
+ ["x11-proto/xextproto", "x11-libs/libXaw"],
+ options = {"--oneshot": True, "--autounmask": True,
+ "--backtrack": 30},
+ mergelist = ["x11-proto/xextproto-7.2.1-r1", "x11-libs/libXaw-1.0.11-r2"],
+ success = True),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed,
+ user_config=user_config, world=world, debug=False)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_backtracking.py b/pym/portage/tests/resolver/test_backtracking.py
index 600f68216..3b69eda09 100644
--- a/pym/portage/tests/resolver/test_backtracking.py
+++ b/pym/portage/tests/resolver/test_backtracking.py
@@ -1,4 +1,4 @@
-# Copyright 2010 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage.tests import TestCase
@@ -31,7 +31,7 @@ class BacktrackingTestCase(TestCase):
playground.cleanup()
- def testHittingTheBacktrackLimit(self):
+ def testBacktrackNotNeeded(self):
ebuilds = {
"dev-libs/A-1": {},
"dev-libs/A-2": {},
@@ -45,47 +45,9 @@ class BacktrackingTestCase(TestCase):
ResolverPlaygroundTestCase(
["dev-libs/C", "dev-libs/D"],
all_permutations = True,
- mergelist = ["dev-libs/A-1", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
- ignore_mergelist_order = True,
- success = True),
- #This one hits the backtrack limit. Be aware that this depends on the argument order.
- ResolverPlaygroundTestCase(
- ["dev-libs/D", "dev-libs/C"],
options = { "--backtrack": 1 },
- mergelist = ["dev-libs/A-1", "dev-libs/B-1", "dev-libs/A-2", "dev-libs/B-2", "dev-libs/C-1", "dev-libs/D-1"],
+ mergelist = ["dev-libs/A-1", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
ignore_mergelist_order = True,
- slot_collision_solutions = [],
- success = False),
- )
-
- playground = ResolverPlayground(ebuilds=ebuilds)
-
- try:
- for test_case in test_cases:
- playground.run_TestCase(test_case)
- self.assertEqual(test_case.test_success, True, test_case.fail_msg)
- finally:
- playground.cleanup()
-
-
- def testBacktrackingGoodVersionFirst(self):
- """
- When backtracking due to slot conflicts, we masked the version that has been pulled
- in first. This is not always a good idea. Mask the highest version instead.
- """
-
- ebuilds = {
- "dev-libs/A-1": { "DEPEND": "=dev-libs/C-1 dev-libs/B" },
- "dev-libs/B-1": { "DEPEND": "=dev-libs/C-1" },
- "dev-libs/B-2": { "DEPEND": "=dev-libs/C-2" },
- "dev-libs/C-1": { },
- "dev-libs/C-2": { },
- }
-
- test_cases = (
- ResolverPlaygroundTestCase(
- ["dev-libs/A"],
- mergelist = ["dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1", ],
success = True),
)
@@ -118,7 +80,7 @@ class BacktrackingTestCase(TestCase):
ResolverPlaygroundTestCase(
["dev-libs/B", "dev-libs/A"],
all_permutations = True,
- mergelist = ["dev-libs/Z-2", "dev-libs/B-1", "dev-libs/A-1", ],
+ mergelist = ["dev-libs/Z-2", "dev-libs/B-1", "dev-libs/A-1",],
ignore_mergelist_order = True,
success = True),
)
@@ -190,7 +152,7 @@ class BacktrackingTestCase(TestCase):
"dev-libs/D-1": { "RDEPEND": "<dev-libs/A-2" },
}
- world = [ "dev-libs/B", "dev-libs/C" ]
+ world = ["dev-libs/B", "dev-libs/C"]
options = {'--update' : True, '--deep' : True, '--selective' : True}
diff --git a/pym/portage/tests/resolver/test_blocker.py b/pym/portage/tests/resolver/test_blocker.py
new file mode 100644
index 000000000..94a88b8b4
--- /dev/null
+++ b/pym/portage/tests/resolver/test_blocker.py
@@ -0,0 +1,48 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class SlotConflictWithBlockerTestCase(TestCase):
+
+ def testBlocker(self):
+ ebuilds = {
+ "dev-libs/A-1": { "DEPEND": "dev-libs/X" },
+ "dev-libs/B-1": { "DEPEND": "<dev-libs/X-2" },
+ "dev-libs/C-1": { "DEPEND": "<dev-libs/X-3" },
+
+ "dev-libs/X-1": { "EAPI": "2", "RDEPEND": "!=dev-libs/Y-1" },
+ "dev-libs/X-2": { "EAPI": "2", "RDEPEND": "!=dev-libs/Y-2" },
+ "dev-libs/X-3": { "EAPI": "2", "RDEPEND": "!=dev-libs/Y-3" },
+
+ "dev-libs/Y-1": { "SLOT": "1" },
+ "dev-libs/Y-2": { "SLOT": "2" },
+ "dev-libs/Y-3": { "SLOT": "3" },
+ }
+
+ installed = {
+ "dev-libs/Y-1": { "SLOT": "1" },
+ "dev-libs/Y-2": { "SLOT": "2" },
+ "dev-libs/Y-3": { "SLOT": "3" },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A", "dev-libs/B", "dev-libs/C"],
+ options = { "--backtrack": 0 },
+ all_permutations = True,
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = ["dev-libs/X-1", "[uninstall]dev-libs/Y-1", "!=dev-libs/Y-1", \
+ ("dev-libs/A-1", "dev-libs/B-1", "dev-libs/C-1")]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_complete_graph.py b/pym/portage/tests/resolver/test_complete_graph.py
index 1b0342c67..95b1f8809 100644
--- a/pym/portage/tests/resolver/test_complete_graph.py
+++ b/pym/portage/tests/resolver/test_complete_graph.py
@@ -93,7 +93,7 @@ class CompleteGraphTestCase(TestCase):
test_cases = (
ResolverPlaygroundTestCase(
[">=sys-libs/x-2"],
- options = {"--complete-graph-if-new-ver" : "n", "--rebuild-if-new-slot-abi": "n"},
+ options = {"--complete-graph-if-new-ver" : "n", "--rebuild-if-new-slot": "n"},
mergelist = ["sys-libs/x-2"],
success = True,
),
@@ -106,7 +106,7 @@ class CompleteGraphTestCase(TestCase):
),
ResolverPlaygroundTestCase(
["<sys-libs/x-1"],
- options = {"--complete-graph-if-new-ver" : "n", "--rebuild-if-new-slot-abi": "n"},
+ options = {"--complete-graph-if-new-ver" : "n", "--rebuild-if-new-slot": "n"},
mergelist = ["sys-libs/x-0.1"],
success = True,
),
diff --git a/pym/portage/tests/resolver/test_complete_if_new_subslot_without_revbump.py b/pym/portage/tests/resolver/test_complete_if_new_subslot_without_revbump.py
new file mode 100644
index 000000000..fddbead7c
--- /dev/null
+++ b/pym/portage/tests/resolver/test_complete_if_new_subslot_without_revbump.py
@@ -0,0 +1,74 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class CompeteIfNewSubSlotWithoutRevBumpTestCase(TestCase):
+
+ def testCompeteIfNewSubSlotWithoutRevBump(self):
+
+ ebuilds = {
+ "media-libs/libpng-1.5.14" : {
+ "EAPI": "5",
+ "SLOT": "0"
+ },
+
+ "x11-libs/gdk-pixbuf-2.26.5" : {
+ "EAPI": "5",
+ "DEPEND": ">=media-libs/libpng-1.4:=",
+ "RDEPEND": ">=media-libs/libpng-1.4:="
+ },
+ }
+
+ binpkgs = {
+ "x11-libs/gdk-pixbuf-2.26.5" : {
+ "EAPI": "5",
+ "DEPEND": ">=media-libs/libpng-1.4:0/15=",
+ "RDEPEND": ">=media-libs/libpng-1.4:0/15="
+ },
+ }
+
+ installed = {
+ "media-libs/libpng-1.5.14" : {
+ "EAPI": "5",
+ "SLOT": "0/15"
+ },
+
+ "x11-libs/gdk-pixbuf-2.26.5" : {
+ "EAPI": "5",
+ "DEPEND": ">=media-libs/libpng-1.4:0/15=",
+ "RDEPEND": ">=media-libs/libpng-1.4:0/15="
+ },
+ }
+
+ world = ["x11-libs/gdk-pixbuf"]
+
+ test_cases = (
+ # Test that --complete-graph-if-new-ver=y triggers rebuild
+ # when the sub-slot changes without a revbump.
+ ResolverPlaygroundTestCase(
+ ["media-libs/libpng"],
+ options = {
+ "--oneshot": True,
+ "--complete-graph-if-new-ver": "y",
+ "--rebuild-if-new-slot": "n",
+ "--usepkg": True
+ },
+ success = True,
+ mergelist = [
+ "media-libs/libpng-1.5.14",
+ "x11-libs/gdk-pixbuf-2.26.5"
+ ]
+ ),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, binpkgs=binpkgs,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_depclean.py b/pym/portage/tests/resolver/test_depclean.py
index ba70144b8..42350be8b 100644
--- a/pym/portage/tests/resolver/test_depclean.py
+++ b/pym/portage/tests/resolver/test_depclean.py
@@ -23,9 +23,9 @@ class SimpleDepcleanTestCase(TestCase):
test_cases = (
ResolverPlaygroundTestCase(
[],
- options = {"--depclean": True},
- success = True,
- cleanlist = ["dev-libs/B-1"]),
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/B-1"]),
)
playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, world=world)
@@ -63,9 +63,9 @@ class DepcleanWithDepsTestCase(TestCase):
test_cases = (
ResolverPlaygroundTestCase(
[],
- options = {"--depclean": True},
- success = True,
- cleanlist = ["dev-libs/B-1", "dev-libs/D-1",
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/B-1", "dev-libs/D-1",
"dev-libs/E-1", "dev-libs/F-1"]),
)
@@ -104,10 +104,10 @@ class DepcleanWithInstalledMaskedTestCase(TestCase):
test_cases = (
ResolverPlaygroundTestCase(
[],
- options = {"--depclean": True},
- success = True,
- #cleanlist = ["dev-libs/C-1"]),
- cleanlist = ["dev-libs/B-1"]),
+ options={"--depclean": True},
+ success=True,
+ #cleanlist=["dev-libs/C-1"]),
+ cleanlist=["dev-libs/B-1"]),
)
playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, world=world)
@@ -143,9 +143,9 @@ class DepcleanInstalledKeywordMaskedSlotTestCase(TestCase):
test_cases = (
ResolverPlaygroundTestCase(
[],
- options = {"--depclean": True},
- success = True,
- cleanlist = ["dev-libs/B-2.7"]),
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/B-2.7"]),
)
playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, world=world)
@@ -169,31 +169,31 @@ class DepcleanWithExcludeTestCase(TestCase):
#Without --exclude.
ResolverPlaygroundTestCase(
[],
- options = {"--depclean": True},
- success = True,
- cleanlist = ["dev-libs/B-1", "dev-libs/A-1"]),
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/B-1", "dev-libs/A-1"]),
ResolverPlaygroundTestCase(
["dev-libs/A"],
- options = {"--depclean": True},
- success = True,
- cleanlist = []),
+ options={"--depclean": True},
+ success=True,
+ cleanlist=[]),
ResolverPlaygroundTestCase(
["dev-libs/B"],
- options = {"--depclean": True},
- success = True,
- cleanlist = ["dev-libs/B-1"]),
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/B-1"]),
#With --exclude
ResolverPlaygroundTestCase(
[],
- options = {"--depclean": True, "--exclude": ["dev-libs/A"]},
- success = True,
- cleanlist = ["dev-libs/B-1"]),
+ options={"--depclean": True, "--exclude": ["dev-libs/A"]},
+ success=True,
+ cleanlist=["dev-libs/B-1"]),
ResolverPlaygroundTestCase(
["dev-libs/B"],
- options = {"--depclean": True, "--exclude": ["dev-libs/B"]},
- success = True,
- cleanlist = []),
+ options={"--depclean": True, "--exclude": ["dev-libs/B"]},
+ success=True,
+ cleanlist=[]),
)
playground = ResolverPlayground(installed=installed)
@@ -215,25 +215,25 @@ class DepcleanWithExcludeAndSlotsTestCase(TestCase):
"dev-libs/Y-2": { "RDEPEND": "=dev-libs/Z-2", "SLOT": 2 },
}
- world = [ "dev-libs/Y" ]
+ world=["dev-libs/Y"]
test_cases = (
#Without --exclude.
ResolverPlaygroundTestCase(
[],
- options = {"--depclean": True},
- success = True,
- cleanlist = ["dev-libs/Y-1", "dev-libs/Z-1"]),
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/Y-1", "dev-libs/Z-1"]),
ResolverPlaygroundTestCase(
[],
- options = {"--depclean": True, "--exclude": ["dev-libs/Z"]},
- success = True,
- cleanlist = ["dev-libs/Y-1"]),
+ options={"--depclean": True, "--exclude": ["dev-libs/Z"]},
+ success=True,
+ cleanlist=["dev-libs/Y-1"]),
ResolverPlaygroundTestCase(
[],
- options = {"--depclean": True, "--exclude": ["dev-libs/Y"]},
- success = True,
- cleanlist = []),
+ options={"--depclean": True, "--exclude": ["dev-libs/Y"]},
+ success=True,
+ cleanlist=[]),
)
playground = ResolverPlayground(installed=installed, world=world)
@@ -256,24 +256,24 @@ class DepcleanAndWildcardsTestCase(TestCase):
test_cases = (
ResolverPlaygroundTestCase(
["*/*"],
- options = {"--depclean": True},
- success = True,
- cleanlist = ["dev-libs/A-1", "dev-libs/B-1"]),
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/A-1", "dev-libs/B-1"]),
ResolverPlaygroundTestCase(
["dev-libs/*"],
- options = {"--depclean": True},
- success = True,
- cleanlist = ["dev-libs/A-1", "dev-libs/B-1"]),
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/A-1", "dev-libs/B-1"]),
ResolverPlaygroundTestCase(
["*/A"],
- options = {"--depclean": True},
- success = True,
- cleanlist = ["dev-libs/A-1"]),
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/A-1"]),
ResolverPlaygroundTestCase(
["*/B"],
- options = {"--depclean": True},
- success = True,
- cleanlist = []),
+ options={"--depclean": True},
+ success=True,
+ cleanlist=[]),
)
playground = ResolverPlayground(installed=installed)
diff --git a/pym/portage/tests/resolver/test_depclean_order.py b/pym/portage/tests/resolver/test_depclean_order.py
new file mode 100644
index 000000000..9511d292c
--- /dev/null
+++ b/pym/portage/tests/resolver/test_depclean_order.py
@@ -0,0 +1,57 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class SimpleDepcleanTestCase(TestCase):
+
+ def testSimpleDepclean(self):
+
+ ebuilds = {
+ "dev-libs/A-1": {
+ "EAPI": "5",
+ "RDEPEND": "dev-libs/B:=",
+ },
+ "dev-libs/B-1": {
+ "EAPI": "5",
+ "RDEPEND": "dev-libs/A",
+ },
+ "dev-libs/C-1": {},
+ }
+
+ installed = {
+ "dev-libs/A-1": {
+ "EAPI": "5",
+ "RDEPEND": "dev-libs/B:0/0=",
+ },
+ "dev-libs/B-1": {
+ "EAPI": "5",
+ "RDEPEND": "dev-libs/A",
+ },
+ "dev-libs/C-1": {},
+ }
+
+ world = (
+ "dev-libs/C",
+ )
+
+ test_cases = (
+ # Remove dev-libs/A-1 first because of dev-libs/B:0/0= (built
+ # slot-operator dep).
+ ResolverPlaygroundTestCase(
+ [],
+ options={"--depclean": True},
+ success=True,
+ ordered=True,
+ cleanlist=["dev-libs/A-1", "dev-libs/B-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_depclean_slot_unavailable.py b/pym/portage/tests/resolver/test_depclean_slot_unavailable.py
new file mode 100644
index 000000000..689392bb5
--- /dev/null
+++ b/pym/portage/tests/resolver/test_depclean_slot_unavailable.py
@@ -0,0 +1,78 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class DepcleanUnavailableSlotTestCase(TestCase):
+
+ def testDepcleanUnavailableSlot(self):
+ """
+ Test bug #445506, where we want to remove the slot
+ for which the ebuild is no longer available, even
+ though its version is higher.
+ """
+
+ ebuilds = {
+ "sys-kernel/gentoo-sources-3.0.53": {
+ "SLOT": "3.0.53",
+ "KEYWORDS": "x86"
+ },
+ }
+
+ installed = {
+ "sys-kernel/gentoo-sources-3.0.53": {
+ "SLOT": "3.0.53",
+ "KEYWORDS": "x86"
+ },
+ "sys-kernel/gentoo-sources-3.2.21": {
+ "SLOT": "3.2.21",
+ "KEYWORDS": "x86"
+ },
+ }
+
+ world = ["sys-kernel/gentoo-sources"]
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ [],
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["sys-kernel/gentoo-sources-3.2.21"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ # Now make the newer version availale and verify that
+ # the lower version is depcleaned.
+ ebuilds.update({
+ "sys-kernel/gentoo-sources-3.2.21": {
+ "SLOT": "3.2.21",
+ "KEYWORDS": "x86"
+ },
+ })
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ [],
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["sys-kernel/gentoo-sources-3.0.53"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_features_test_use.py b/pym/portage/tests/resolver/test_features_test_use.py
new file mode 100644
index 000000000..bdd179d7a
--- /dev/null
+++ b/pym/portage/tests/resolver/test_features_test_use.py
@@ -0,0 +1,68 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class FeaturesTestUse(TestCase):
+
+ def testFeaturesTestUse(self):
+ ebuilds = {
+ "dev-libs/A-1" : {
+ "IUSE": "test"
+ },
+ "dev-libs/B-1" : {
+ "IUSE": "test foo"
+ },
+ }
+
+ installed = {
+ "dev-libs/A-1" : {
+ "USE": "",
+ "IUSE": "test"
+ },
+ "dev-libs/B-1" : {
+ "USE": "foo",
+ "IUSE": "test foo"
+ },
+ }
+
+ user_config = {
+ "make.conf" : ("FEATURES=test", "USE=\"-test -foo\"")
+ }
+
+ test_cases = (
+
+ # USE=test state should not trigger --newuse rebuilds, as
+ # specified in bug #373209, comment #3.
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--newuse": True, "--selective": True},
+ success = True,
+ mergelist = []),
+
+ # USE=-test -> USE=test, with USE=test forced by FEATURES=test
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {},
+ success = True,
+ mergelist = ["dev-libs/A-1"]),
+
+ # USE=foo -> USE=-foo, with USE=test forced by FEATURES=test
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ options = {"--newuse": True, "--selective": True},
+ success = True,
+ mergelist = ["dev-libs/B-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, user_config=user_config, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
diff --git a/pym/portage/tests/resolver/test_merge_order.py b/pym/portage/tests/resolver/test_merge_order.py
index 5b5709afe..5d000d12b 100644
--- a/pym/portage/tests/resolver/test_merge_order.py
+++ b/pym/portage/tests/resolver/test_merge_order.py
@@ -1,4 +1,4 @@
-# Copyright 2011 Gentoo Foundation
+# Copyright 2011-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import portage
@@ -191,6 +191,12 @@ class MergeOrderTestCase(TestCase):
"DEPEND" : "kde-base/libkdegames",
"RDEPEND" : "kde-base/libkdegames",
},
+ "media-libs/mesa-9.1.3" : {
+ "EAPI" : "5",
+ "IUSE" : "+xorg",
+ "DEPEND" : "xorg? ( x11-base/xorg-server:= )",
+ "RDEPEND" : "xorg? ( x11-base/xorg-server:= )",
+ },
"media-video/libav-0.7_pre20110327" : {
"EAPI" : "2",
"IUSE" : "X +encode",
@@ -205,6 +211,12 @@ class MergeOrderTestCase(TestCase):
"IUSE" : "X +encode",
"RDEPEND" : "|| ( >=media-video/ffmpeg-0.6.90_rc0-r2[X=,encode=] >=media-video/libav-0.6.90_rc[X=,encode=] )",
},
+ "x11-base/xorg-server-1.14.1" : {
+ "EAPI" : "5",
+ "SLOT": "0/1.14.1",
+ "DEPEND" : "media-libs/mesa",
+ "RDEPEND" : "media-libs/mesa",
+ },
}
installed = {
@@ -256,6 +268,13 @@ class MergeOrderTestCase(TestCase):
"RDEPEND": "",
},
"app-arch/xz-utils-5.0.1" : {},
+ "media-libs/mesa-9.1.3" : {
+ "EAPI" : "5",
+ "IUSE" : "+xorg",
+ "USE": "xorg",
+ "DEPEND" : "x11-base/xorg-server:0/1.14.1=",
+ "RDEPEND" : "x11-base/xorg-server:0/1.14.1=",
+ },
"media-video/ffmpeg-0.7_rc1" : {
"EAPI" : "2",
"IUSE" : "X +encode",
@@ -267,6 +286,12 @@ class MergeOrderTestCase(TestCase):
"USE" : "encode",
"RDEPEND" : "|| ( >=media-video/ffmpeg-0.6.90_rc0-r2[X=,encode=] >=media-video/libav-0.6.90_rc[X=,encode=] )",
},
+ "x11-base/xorg-server-1.14.1" : {
+ "EAPI" : "5",
+ "SLOT": "0/1.14.1",
+ "DEPEND" : "media-libs/mesa",
+ "RDEPEND" : "media-libs/mesa",
+ },
}
test_cases = (
@@ -434,6 +459,14 @@ class MergeOrderTestCase(TestCase):
('kde-base/libkdegames-3.5.7', 'kde-base/kmines-3.5.7'),
),
mergelist = [('kde-base/kdelibs-3.5.7', 'dev-util/pkgconfig-0.25-r2', 'kde-misc/kdnssd-avahi-0.1.2', 'app-arch/xz-utils-5.0.2', 'kde-base/libkdegames-3.5.7', 'kde-base/kdnssd-3.5.7', 'kde-base/kmines-3.5.7')]),
+ # Test satisfied circular DEPEND/RDEPEND with one := operator.
+ # Both deps are already satisfied by installed packages, but
+ # the := dep is given higher priority in merge order.
+ ResolverPlaygroundTestCase(
+ ["media-libs/mesa", "x11-base/xorg-server"],
+ success=True,
+ all_permutations = True,
+ mergelist = ['x11-base/xorg-server-1.14.1', 'media-libs/mesa-9.1.3']),
)
playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)
diff --git a/pym/portage/tests/resolver/test_multirepo.py b/pym/portage/tests/resolver/test_multirepo.py
index 34c6d45a0..2b1a6d073 100644
--- a/pym/portage/tests/resolver/test_multirepo.py
+++ b/pym/portage/tests/resolver/test_multirepo.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage.tests import TestCase
@@ -37,16 +37,25 @@ class MultirepoTestCase(TestCase):
"dev-libs/I-1::repo2": { "SLOT" : "1"},
"dev-libs/I-2::repo2": { "SLOT" : "2"},
+
+ "dev-libs/K-1::repo2": { },
}
installed = {
"dev-libs/H-1": { "RDEPEND" : "|| ( dev-libs/I:2 dev-libs/I:1 )"},
"dev-libs/I-2::repo1": {"SLOT" : "2"},
+ "dev-libs/K-1::repo1": { },
+ }
+
+ binpkgs = {
+ "dev-libs/C-1::repo2": { },
+ "dev-libs/I-2::repo1": {"SLOT" : "2"},
+ "dev-libs/K-1::repo2": { },
}
sets = {
- "multirepotest":
- ( "dev-libs/A::test_repo", )
+ "multirepotest":
+ ("dev-libs/A::test_repo",)
}
test_cases = (
@@ -96,6 +105,68 @@ class MultirepoTestCase(TestCase):
check_repo_names = True,
mergelist = ["dev-libs/D-1::repo2"]),
+ #--usepkg: don't reinstall on new repo without --newrepo
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ options = {"--usepkg": True, "--selective": True},
+ success = True,
+ check_repo_names = True,
+ mergelist = ["[binary]dev-libs/C-1::repo2"]),
+
+ #--usepkgonly: don't reinstall on new repo without --newrepo
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ options = {"--usepkgonly": True, "--selective": True},
+ success = True,
+ check_repo_names = True,
+ mergelist = ["[binary]dev-libs/C-1::repo2"]),
+
+ #--newrepo: pick ebuild if binpkg/ebuild have different repo
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ options = {"--usepkg": True, "--newrepo": True, "--selective": True},
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/C-1::repo1"]),
+
+ #--newrepo --usepkgonly: ebuild is ignored
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ options = {"--usepkgonly": True, "--newrepo": True, "--selective": True},
+ success = True,
+ check_repo_names = True,
+ mergelist = ["[binary]dev-libs/C-1::repo2"]),
+
+ #--newrepo: pick ebuild if binpkg/ebuild have different repo
+ ResolverPlaygroundTestCase(
+ ["dev-libs/I"],
+ options = {"--usepkg": True, "--newrepo": True, "--selective": True},
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/I-2::repo2"]),
+
+ #--newrepo --usepkgonly: if binpkg matches installed, do nothing
+ ResolverPlaygroundTestCase(
+ ["dev-libs/I"],
+ options = {"--usepkgonly": True, "--newrepo": True, "--selective": True},
+ success = True,
+ mergelist = []),
+
+ #--newrepo --usepkgonly: reinstall if binpkg has new repo.
+ ResolverPlaygroundTestCase(
+ ["dev-libs/K"],
+ options = {"--usepkgonly": True, "--newrepo": True, "--selective": True},
+ success = True,
+ check_repo_names = True,
+ mergelist = ["[binary]dev-libs/K-1::repo2"]),
+
+ #--usepkgonly: don't reinstall on new repo without --newrepo.
+ ResolverPlaygroundTestCase(
+ ["dev-libs/K"],
+ options = {"--usepkgonly": True, "--selective": True},
+ success = True,
+ mergelist = []),
+
#Atoms with slots
ResolverPlaygroundTestCase(
["dev-libs/E"],
@@ -137,6 +208,15 @@ class MultirepoTestCase(TestCase):
success = True,
mergelist = []),
+ # Dependency on installed dev-libs/I-2 ebuild should trigger reinstall
+ # when --newrepo flag is used.
+ ResolverPlaygroundTestCase(
+ ["dev-libs/H"],
+ options = {"--update": True, "--deep": True, "--newrepo": True},
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/I-2::repo2"]),
+
# Check interaction between repo priority and unsatisfied
# REQUIRED_USE, for bug #350254.
ResolverPlaygroundTestCase(
@@ -147,7 +227,7 @@ class MultirepoTestCase(TestCase):
)
playground = ResolverPlayground(ebuilds=ebuilds,
- installed=installed, sets=sets)
+ binpkgs=binpkgs, installed=installed, sets=sets)
try:
for test_case in test_cases:
playground.run_TestCase(test_case)
diff --git a/pym/portage/tests/resolver/test_onlydeps.py b/pym/portage/tests/resolver/test_onlydeps.py
new file mode 100644
index 000000000..986769aee
--- /dev/null
+++ b/pym/portage/tests/resolver/test_onlydeps.py
@@ -0,0 +1,34 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class OnlydepsTestCase(TestCase):
+
+ def testOnlydeps(self):
+ ebuilds = {
+ "dev-libs/A-1": { "DEPEND": "dev-libs/B" },
+ "dev-libs/B-1": { },
+ }
+ installed = {
+ "dev-libs/B-1": { },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A", "dev-libs/B"],
+ all_permutations = True,
+ success = True,
+ options = { "--onlydeps": True },
+ mergelist = ["dev-libs/B-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_or_choices.py b/pym/portage/tests/resolver/test_or_choices.py
new file mode 100644
index 000000000..90e681408
--- /dev/null
+++ b/pym/portage/tests/resolver/test_or_choices.py
@@ -0,0 +1,134 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class OrChoicesTestCase(TestCase):
+
+ def testOrChoices(self):
+ ebuilds = {
+ "dev-lang/vala-0.20.0" : {
+ "EAPI": "5",
+ "SLOT": "0.20"
+ },
+ "dev-lang/vala-0.18.0" : {
+ "EAPI": "5",
+ "SLOT": "0.18"
+ },
+ #"dev-libs/gobject-introspection-1.36.0" : {
+ # "EAPI": "5",
+ # "RDEPEND" : "!<dev-lang/vala-0.20.0",
+ #},
+ "dev-libs/gobject-introspection-1.34.0" : {
+ "EAPI": "5"
+ },
+ "sys-apps/systemd-ui-2" : {
+ "EAPI": "5",
+ "RDEPEND" : "|| ( dev-lang/vala:0.20 dev-lang/vala:0.18 )"
+ },
+ }
+
+ installed = {
+ "dev-lang/vala-0.18.0" : {
+ "EAPI": "5",
+ "SLOT": "0.18"
+ },
+ "dev-libs/gobject-introspection-1.34.0" : {
+ "EAPI": "5"
+ },
+ "sys-apps/systemd-ui-2" : {
+ "EAPI": "5",
+ "RDEPEND" : "|| ( dev-lang/vala:0.20 dev-lang/vala:0.18 )"
+ },
+ }
+
+ world = ["dev-libs/gobject-introspection", "sys-apps/systemd-ui"]
+
+ test_cases = (
+ # Demonstrate that vala:0.20 update is pulled in, for bug #478188
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success=True,
+ all_permutations = True,
+ mergelist = ['dev-lang/vala-0.20.0']),
+ # Verify that vala:0.20 is not pulled in without --deep
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True},
+ success=True,
+ all_permutations = True,
+ mergelist = []),
+ # Verify that vala:0.20 is not pulled in without --update
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--selective": True, "--deep": True},
+ success=True,
+ all_permutations = True,
+ mergelist = []),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testOrChoicesLibpostproc(self):
+ ebuilds = {
+ "media-video/ffmpeg-0.10" : {
+ "EAPI": "5",
+ "SLOT": "0.10"
+ },
+ "media-video/ffmpeg-1.2.2" : {
+ "EAPI": "5",
+ "SLOT": "0"
+ },
+ "media-libs/libpostproc-0.8.0.20121125" : {
+ "EAPI": "5"
+ },
+ "media-plugins/gst-plugins-ffmpeg-0.10.13_p201211-r1" : {
+ "EAPI": "5",
+ "RDEPEND" : "|| ( media-video/ffmpeg:0 media-libs/libpostproc )"
+ },
+ }
+
+ installed = {
+ "media-video/ffmpeg-0.10" : {
+ "EAPI": "5",
+ "SLOT": "0.10"
+ },
+ "media-libs/libpostproc-0.8.0.20121125" : {
+ "EAPI": "5"
+ },
+ "media-plugins/gst-plugins-ffmpeg-0.10.13_p201211-r1" : {
+ "EAPI": "5",
+ "RDEPEND" : "|| ( media-video/ffmpeg:0 media-libs/libpostproc )"
+ },
+ }
+
+ world = ["media-plugins/gst-plugins-ffmpeg"]
+
+ test_cases = (
+ # Demonstrate that libpostproc is preferred
+ # over ffmpeg:0 for bug #480736.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success=True,
+ all_permutations = True,
+ mergelist = []),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed,
+ world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_package_tracker.py b/pym/portage/tests/resolver/test_package_tracker.py
new file mode 100644
index 000000000..8fa3513e6
--- /dev/null
+++ b/pym/portage/tests/resolver/test_package_tracker.py
@@ -0,0 +1,261 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import collections
+
+from portage.dep import Atom
+from portage.tests import TestCase
+from _emerge.resolver.package_tracker import PackageTracker, PackageTrackerDbapiWrapper
+
+class PackageTrackerTestCase(TestCase):
+
+ FakePackage = collections.namedtuple("FakePackage",
+ ["root", "cp", "cpv", "slot", "slot_atom", "version", "repo"])
+
+ FakeConflict = collections.namedtuple("FakeConflict",
+ ["description", "root", "pkgs"])
+
+ def make_pkg(self, root, atom, repo="test_repo"):
+ atom = Atom(atom)
+ slot_atom = Atom("%s:%s" % (atom.cp, atom.slot))
+ slot = atom.slot
+
+ return self.FakePackage(root=root, cp=atom.cp, cpv=atom.cpv,
+ slot=slot, slot_atom=slot_atom, version=atom.version, repo=repo)
+
+ def make_conflict(self, description, root, pkgs):
+ return self.FakeConflict(description=description, root=root, pkgs=pkgs)
+
+ def test_add_remove_discard(self):
+ p = PackageTracker()
+
+ x1 = self.make_pkg("/", "=dev-libs/X-1:0")
+ x2 = self.make_pkg("/", "=dev-libs/X-2:0")
+
+ p.add_pkg(x1)
+ self.assertTrue(x1 in p)
+ self.assertTrue(p.contains(x1, installed=True))
+ self.assertTrue(p.contains(x1, installed=False))
+ p.remove_pkg(x1)
+ self.assertTrue(x1 not in p)
+
+ p.add_pkg(x1)
+ self.assertTrue(x1 in p)
+ p.add_pkg(x1)
+ self.assertTrue(x1 in p)
+
+ self.assertRaises(KeyError, p.remove_pkg, x2)
+
+ p.add_pkg(x2)
+ self.assertTrue(x2 in p)
+ p.remove_pkg(x2)
+ self.assertTrue(x2 not in p)
+ p.discard_pkg(x2)
+ self.assertTrue(x2 not in p)
+ p.add_pkg(x2)
+ self.assertTrue(x2 in p)
+
+ all_pkgs = list(p.all_pkgs("/"))
+ self.assertEqual(len(all_pkgs), 2)
+ self.assertTrue(all_pkgs[0] is x1 and all_pkgs[1] is x2)
+
+ self.assertEqual(len(list(p.all_pkgs("/"))), 2)
+ self.assertEqual(len(list(p.all_pkgs("/xxx"))), 0)
+
+ def test_match(self):
+ p = PackageTracker()
+ x1 = self.make_pkg("/", "=dev-libs/X-1:0")
+ x2 = self.make_pkg("/", "=dev-libs/X-2:0")
+ x3 = self.make_pkg("/", "=dev-libs/X-3:1")
+
+ p.add_pkg(x2)
+ p.add_pkg(x1)
+
+ matches = list(p.match("/", Atom("=dev-libs/X-1")))
+ self.assertTrue(x1 in matches)
+ self.assertEqual(len(matches), 1)
+
+ matches = list(p.match("/", Atom("dev-libs/X")))
+ self.assertTrue(x1 is matches[0] and x2 is matches[1])
+ self.assertEqual(len(matches), 2)
+
+ matches = list(p.match("/xxx", Atom("dev-libs/X")))
+ self.assertEqual(len(matches), 0)
+
+ matches = list(p.match("/", Atom("dev-libs/Y")))
+ self.assertEqual(len(matches), 0)
+
+ p.add_pkg(x3)
+ matches = list(p.match("/", Atom("dev-libs/X")))
+ self.assertTrue(x1 is matches[0] and x2 is matches[1] and x3 is matches[2])
+ self.assertEqual(len(matches), 3)
+
+ p.remove_pkg(x3)
+ matches = list(p.match("/", Atom("dev-libs/X")))
+ self.assertTrue(x1 is matches[0] and x2 is matches[1])
+ self.assertEqual(len(matches), 2)
+
+ def test_dbapi_interface(self):
+ p = PackageTracker()
+ dbapi = PackageTrackerDbapiWrapper("/", p)
+ installed = self.make_pkg("/", "=dev-libs/X-0:0")
+ x1 = self.make_pkg("/", "=dev-libs/X-1:0")
+ x2 = self.make_pkg("/", "=dev-libs/X-2:0")
+ x3 = self.make_pkg("/", "=dev-libs/X-3:0")
+ x4 = self.make_pkg("/", "=dev-libs/X-4:6")
+ x5 = self.make_pkg("/xxx", "=dev-libs/X-5:6")
+
+ def check_dbapi(pkgs):
+ all_pkgs = set(dbapi)
+ self.assertEqual(len(all_pkgs), len(pkgs))
+
+ x_atom = "dev-libs/X"
+ y_atom = "dev-libs/Y"
+ matches = dbapi.cp_list(x_atom)
+ for pkg in pkgs:
+ if pkg.root == "/" and pkg.cp == x_atom:
+ self.assertTrue(pkg in matches)
+ self.assertTrue(not dbapi.cp_list(y_atom))
+ matches = dbapi.match(x_atom)
+ for pkg in pkgs:
+ if pkg.root == "/" and pkg.cp == x_atom:
+ self.assertTrue(pkg in matches)
+ self.assertTrue(not dbapi.match(y_atom))
+
+ check_dbapi([])
+
+ p.add_installed_pkg(installed)
+ check_dbapi([installed])
+
+ p.add_pkg(x1)
+ check_dbapi([x1])
+
+ p.remove_pkg(x1)
+ check_dbapi([installed])
+
+ dbapi.cpv_inject(x1)
+ check_dbapi([x1])
+
+ dbapi.cpv_inject(x2)
+ check_dbapi([x1, x2])
+
+ p.remove_pkg(x1)
+ check_dbapi([x2])
+
+ p.add_pkg(x5)
+ check_dbapi([x2])
+
+
+ def test_installed(self):
+ p = PackageTracker()
+ x1 = self.make_pkg("/", "=dev-libs/X-1:0")
+ x1b = self.make_pkg("/", "=dev-libs/X-1.1:0")
+ x2 = self.make_pkg("/", "=dev-libs/X-2:0")
+ x3 = self.make_pkg("/", "=dev-libs/X-3:1")
+
+ def check_installed(x, should_contain, num_pkgs):
+ self.assertEqual(x in p, should_contain)
+ self.assertEqual(p.contains(x), should_contain)
+ self.assertEqual(p.contains(x1, installed=True), should_contain)
+ self.assertEqual(p.contains(x1, installed=False), False)
+ self.assertEqual(len(list(p.all_pkgs("/"))), num_pkgs)
+
+ def check_matches(atom, expected):
+ matches = list(p.match("/", Atom(atom)))
+ self.assertEqual(len(matches), len(expected))
+ for x, y in zip(matches, expected):
+ self.assertTrue(x is y)
+
+ p.add_installed_pkg(x1)
+ check_installed(x1, True, 1)
+ check_matches("dev-libs/X", [x1])
+
+ p.add_installed_pkg(x1)
+ check_installed(x1, True, 1)
+ check_matches("dev-libs/X", [x1])
+
+ p.add_pkg(x2)
+ check_installed(x1, False, 1)
+ check_matches("dev-libs/X", [x2])
+
+ p.add_installed_pkg(x1)
+ check_installed(x1, False, 1)
+ check_matches("dev-libs/X", [x2])
+
+ p.add_installed_pkg(x1b)
+ check_installed(x1, False, 1)
+ check_installed(x1b, False, 1)
+ check_matches("dev-libs/X", [x2])
+
+ p.remove_pkg(x2)
+ check_installed(x1, True, 2)
+ check_installed(x1b, True, 2)
+ check_matches("dev-libs/X", [x1, x1b])
+
+ def test_conflicts(self):
+ p = PackageTracker()
+ installed1 = self.make_pkg("/", "=dev-libs/X-0:0")
+ installed2 = self.make_pkg("/", "=dev-libs/X-0.1:0")
+ x1 = self.make_pkg("/", "=dev-libs/X-1:0")
+ x2 = self.make_pkg("/", "=dev-libs/X-2:0")
+ x3 = self.make_pkg("/", "=dev-libs/X-3:0")
+ x4 = self.make_pkg("/", "=dev-libs/X-4:4")
+ x4b = self.make_pkg("/", "=dev-libs/X-4:4b::x-repo")
+
+ def check_conflicts(expected, slot_conflicts_only=False):
+ if slot_conflicts_only:
+ conflicts = list(p.slot_conflicts())
+ else:
+ conflicts = list(p.conflicts())
+ self.assertEqual(len(conflicts), len(expected))
+ for got, exp in zip(conflicts, expected):
+ self.assertEqual(got.description, exp.description)
+ self.assertEqual(got.root, exp.root)
+ self.assertEqual(len(got.pkgs), len(exp.pkgs))
+ self.assertEqual(len(got), len(exp.pkgs))
+ for x, y in zip(got.pkgs, exp.pkgs):
+ self.assertTrue(x is y)
+ for x, y in zip(got, exp.pkgs):
+ self.assertTrue(x is y)
+ for x in exp.pkgs:
+ self.assertTrue(x in got)
+
+ check_conflicts([])
+ check_conflicts([])
+
+ p.add_installed_pkg(installed1)
+ p.add_installed_pkg(installed2)
+ check_conflicts([])
+
+ p.add_pkg(x1)
+ check_conflicts([])
+ p.add_pkg(x2)
+ check_conflicts([self.make_conflict("slot conflict", "/", [x1, x2])])
+ p.add_pkg(x3)
+ check_conflicts([self.make_conflict("slot conflict", "/", [x1, x2, x3])])
+ p.remove_pkg(x3)
+ check_conflicts([self.make_conflict("slot conflict", "/", [x1, x2])])
+ p.remove_pkg(x2)
+ check_conflicts([])
+ p.add_pkg(x3)
+ check_conflicts([self.make_conflict("slot conflict", "/", [x1, x3])])
+ p.add_pkg(x2)
+ check_conflicts([self.make_conflict("slot conflict", "/", [x1, x3, x2])])
+
+ p.add_pkg(x4)
+ check_conflicts([self.make_conflict("slot conflict", "/", [x1, x3, x2])])
+
+ p.add_pkg(x4b)
+ check_conflicts(
+ [
+ self.make_conflict("slot conflict", "/", [x1, x3, x2]),
+ self.make_conflict("cpv conflict", "/", [x4, x4b]),
+ ]
+ )
+
+ check_conflicts(
+ [
+ self.make_conflict("slot conflict", "/", [x1, x3, x2]),
+ ],
+ slot_conflicts_only=True
+ )
diff --git a/pym/portage/tests/resolver/test_regular_slot_change_without_revbump.py b/pym/portage/tests/resolver/test_regular_slot_change_without_revbump.py
new file mode 100644
index 000000000..415277bc7
--- /dev/null
+++ b/pym/portage/tests/resolver/test_regular_slot_change_without_revbump.py
@@ -0,0 +1,59 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class RegularSlotChangeWithoutRevBumpTestCase(TestCase):
+
+ def testRegularSlotChangeWithoutRevBumpTestCase(self):
+
+ ebuilds = {
+ "dev-libs/boost-1.52.0" : {
+ "SLOT": "0"
+ },
+ "app-office/libreoffice-4.0.0.2" : {
+ "EAPI": "5",
+ "DEPEND": ">=dev-libs/boost-1.46:=",
+ "RDEPEND": ">=dev-libs/boost-1.46:=",
+ },
+ }
+
+ binpkgs = {
+ "dev-libs/boost-1.52.0" : {
+ "SLOT": "1.52"
+ },
+ }
+
+ installed = {
+ "dev-libs/boost-1.52.0" : {
+ "SLOT": "1.52"
+ },
+ }
+
+ world = []
+
+ test_cases = (
+ # Test that @__auto_slot_operator_replace_installed__
+ # pulls in the available slot, even though it's
+ # different from the installed slot (0 instead of 1.52).
+ ResolverPlaygroundTestCase(
+ ["app-office/libreoffice"],
+ options = {"--oneshot": True, "--usepkg": True},
+ success = True,
+ mergelist = [
+ 'dev-libs/boost-1.52.0',
+ 'app-office/libreoffice-4.0.0.2'
+ ]
+ ),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, binpkgs=binpkgs,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_slot_abi.py b/pym/portage/tests/resolver/test_slot_abi.py
index 6381bcc4d..7263504b8 100644
--- a/pym/portage/tests/resolver/test_slot_abi.py
+++ b/pym/portage/tests/resolver/test_slot_abi.py
@@ -1,4 +1,4 @@
-# Copyright 2012 Gentoo Foundation
+# Copyright 2012-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage.tests import TestCase
@@ -65,7 +65,7 @@ class SlotAbiTestCase(TestCase):
ResolverPlaygroundTestCase(
["dev-libs/icu"],
- options = {"--oneshot": True, "--ignore-built-slot-abi-deps": "y"},
+ options = {"--oneshot": True, "--ignore-built-slot-operator-deps": "y"},
success = True,
mergelist = ["dev-libs/icu-49"]),
@@ -83,7 +83,7 @@ class SlotAbiTestCase(TestCase):
ResolverPlaygroundTestCase(
["dev-libs/icu"],
- options = {"--oneshot": True, "--usepkgonly": True, "--ignore-built-slot-abi-deps": "y"},
+ options = {"--oneshot": True, "--usepkgonly": True, "--ignore-built-slot-operator-deps": "y"},
success = True,
mergelist = ["[binary]dev-libs/icu-49"]),
@@ -95,7 +95,7 @@ class SlotAbiTestCase(TestCase):
ResolverPlaygroundTestCase(
["@world"],
- options = {"--update": True, "--deep": True, "--ignore-built-slot-abi-deps": "y"},
+ options = {"--update": True, "--deep": True, "--ignore-built-slot-operator-deps": "y"},
success = True,
mergelist = ["dev-libs/icu-49"]),
@@ -113,7 +113,7 @@ class SlotAbiTestCase(TestCase):
ResolverPlaygroundTestCase(
["@world"],
- options = {"--update": True, "--deep": True, "--usepkgonly": True, "--ignore-built-slot-abi-deps": "y"},
+ options = {"--update": True, "--deep": True, "--usepkgonly": True, "--ignore-built-slot-operator-deps": "y"},
success = True,
mergelist = ["[binary]dev-libs/icu-49"]),
@@ -178,7 +178,7 @@ class SlotAbiTestCase(TestCase):
ResolverPlaygroundTestCase(
["sys-libs/db"],
- options = {"--oneshot": True, "--ignore-built-slot-abi-deps": "y"},
+ options = {"--oneshot": True, "--ignore-built-slot-operator-deps": "y"},
success = True,
mergelist = ["sys-libs/db-4.8"]),
@@ -196,7 +196,7 @@ class SlotAbiTestCase(TestCase):
ResolverPlaygroundTestCase(
["sys-libs/db"],
- options = {"--oneshot": True, "--rebuild-if-new-slot-abi": "n"},
+ options = {"--oneshot": True, "--rebuild-if-new-slot": "n"},
success = True,
mergelist = ["sys-libs/db-4.8"]),
@@ -214,7 +214,7 @@ class SlotAbiTestCase(TestCase):
ResolverPlaygroundTestCase(
["@world"],
- options = {"--update": True, "--deep": True, "--usepkg": True, "--ignore-built-slot-abi-deps": "y"},
+ options = {"--update": True, "--deep": True, "--usepkg": True, "--ignore-built-slot-operator-deps": "y"},
success = True,
mergelist = ["[binary]sys-libs/db-4.8"]),
@@ -226,13 +226,13 @@ class SlotAbiTestCase(TestCase):
ResolverPlaygroundTestCase(
["@world"],
- options = {"--update": True, "--deep": True, "--usepkgonly": True, "--ignore-built-slot-abi-deps": "y"},
+ options = {"--update": True, "--deep": True, "--usepkgonly": True, "--ignore-built-slot-operator-deps": "y"},
success = True,
mergelist = ["[binary]sys-libs/db-4.8"]),
ResolverPlaygroundTestCase(
["@world"],
- options = {"--update": True, "--deep": True, "--rebuild-if-new-slot-abi": "n"},
+ options = {"--update": True, "--deep": True, "--rebuild-if-new-slot": "n"},
success = True,
mergelist = []),
@@ -247,6 +247,89 @@ class SlotAbiTestCase(TestCase):
finally:
playground.cleanup()
+
+ def testWholeSlotConditional(self):
+ ebuilds = {
+ "dev-libs/libnl-3.2.14" : {
+ "SLOT": "3"
+ },
+ "dev-libs/libnl-1.1-r3" : {
+ "SLOT": "1.1"
+ },
+ "net-misc/networkmanager-0.9.6.4-r1" : {
+ "EAPI": "5",
+ "IUSE": "wimax",
+ "DEPEND": "wimax? ( dev-libs/libnl:1.1= ) !wimax? ( dev-libs/libnl:3= )",
+ "RDEPEND": "wimax? ( dev-libs/libnl:1.1= ) !wimax? ( dev-libs/libnl:3= )"
+ },
+ }
+ installed = {
+ "dev-libs/libnl-1.1-r3" : {
+ "SLOT": "1.1"
+ },
+ "net-misc/networkmanager-0.9.6.4-r1" : {
+ "EAPI": "5",
+ "IUSE": "wimax",
+ "USE": "wimax",
+ "DEPEND": "dev-libs/libnl:1.1/1.1=",
+ "RDEPEND": "dev-libs/libnl:1.1/1.1="
+ },
+ }
+
+ user_config = {
+ "make.conf" : ("USE=\"wimax\"",)
+ }
+
+ world = ["net-misc/networkmanager"]
+
+ test_cases = (
+
+ # Demonstrate bug #460304, where _slot_operator_update_probe needs
+ # to account for USE conditional deps.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = []),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, user_config=user_config, world=world,
+ debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ user_config = {
+ "make.conf" : ("USE=\"-wimax\"",)
+ }
+
+ test_cases = (
+
+ # Demonstrate bug #460304 again, but with inverted USE
+ # settings this time.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = ['dev-libs/libnl-3.2.14', 'net-misc/networkmanager-0.9.6.4-r1']),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, user_config=user_config, world=world,
+ debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
def testWholeSlotSubSlotMix(self):
ebuilds = {
"dev-libs/glib-1.2.10" : {
@@ -312,7 +395,7 @@ class SlotAbiTestCase(TestCase):
ResolverPlaygroundTestCase(
["dev-libs/glib"],
- options = {"--oneshot": True, "--ignore-built-slot-abi-deps": "y"},
+ options = {"--oneshot": True, "--ignore-built-slot-operator-deps": "y"},
success = True,
mergelist = ["dev-libs/glib-2.32.3"]),
@@ -330,7 +413,7 @@ class SlotAbiTestCase(TestCase):
ResolverPlaygroundTestCase(
["dev-libs/glib"],
- options = {"--oneshot": True, "--usepkgonly": True, "--ignore-built-slot-abi-deps": "y"},
+ options = {"--oneshot": True, "--usepkgonly": True, "--ignore-built-slot-operator-deps": "y"},
success = True,
mergelist = ["[binary]dev-libs/glib-2.32.3"]),
@@ -342,7 +425,7 @@ class SlotAbiTestCase(TestCase):
ResolverPlaygroundTestCase(
["@world"],
- options = {"--update": True, "--deep": True, "--ignore-built-slot-abi-deps": "y"},
+ options = {"--update": True, "--deep": True, "--ignore-built-slot-operator-deps": "y"},
success = True,
mergelist = ["dev-libs/glib-2.32.3"]),
@@ -360,7 +443,7 @@ class SlotAbiTestCase(TestCase):
ResolverPlaygroundTestCase(
["@world"],
- options = {"--update": True, "--deep": True, "--usepkgonly": True, "--ignore-built-slot-abi-deps": "y"},
+ options = {"--update": True, "--deep": True, "--usepkgonly": True, "--ignore-built-slot-operator-deps": "y"},
success = True,
mergelist = ["[binary]dev-libs/glib-2.32.3"]),
diff --git a/pym/portage/tests/resolver/test_slot_abi_downgrade.py b/pym/portage/tests/resolver/test_slot_abi_downgrade.py
index 45a7555c2..08e9a9db2 100644
--- a/pym/portage/tests/resolver/test_slot_abi_downgrade.py
+++ b/pym/portage/tests/resolver/test_slot_abi_downgrade.py
@@ -61,7 +61,7 @@ class SlotAbiDowngradeTestCase(TestCase):
ResolverPlaygroundTestCase(
["dev-libs/icu"],
- options = {"--oneshot": True, "--ignore-built-slot-abi-deps": "y"},
+ options = {"--oneshot": True, "--ignore-built-slot-operator-deps": "y"},
success = True,
mergelist = ["dev-libs/icu-4.8"]),
@@ -85,7 +85,7 @@ class SlotAbiDowngradeTestCase(TestCase):
ResolverPlaygroundTestCase(
["@world"],
- options = {"--update": True, "--deep": True, "--ignore-built-slot-abi-deps": "y"},
+ options = {"--update": True, "--deep": True, "--ignore-built-slot-operator-deps": "y"},
success = True,
mergelist = ["dev-libs/icu-4.8"]),
@@ -173,7 +173,7 @@ class SlotAbiDowngradeTestCase(TestCase):
ResolverPlaygroundTestCase(
["dev-libs/glib"],
- options = {"--oneshot": True, "--ignore-built-slot-abi-deps": "y"},
+ options = {"--oneshot": True, "--ignore-built-slot-operator-deps": "y"},
success = True,
mergelist = ["dev-libs/glib-2.30.2"]),
@@ -197,7 +197,7 @@ class SlotAbiDowngradeTestCase(TestCase):
ResolverPlaygroundTestCase(
["@world"],
- options = {"--update": True, "--deep": True, "--ignore-built-slot-abi-deps": "y"},
+ options = {"--update": True, "--deep": True, "--ignore-built-slot-operator-deps": "y"},
success = True,
mergelist = ["dev-libs/glib-2.30.2"]),
diff --git a/pym/portage/tests/resolver/test_slot_change_without_revbump.py b/pym/portage/tests/resolver/test_slot_change_without_revbump.py
new file mode 100644
index 000000000..d85ff7e05
--- /dev/null
+++ b/pym/portage/tests/resolver/test_slot_change_without_revbump.py
@@ -0,0 +1,69 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotChangeWithoutRevBumpTestCase(TestCase):
+
+ def testSlotChangeWithoutRevBump(self):
+
+ ebuilds = {
+ "app-arch/libarchive-3.1.1" : {
+ "EAPI": "5",
+ "SLOT": "0/13"
+ },
+ "app-arch/libarchive-3.0.4-r1" : {
+ "EAPI": "5",
+ "SLOT": "0"
+ },
+ "kde-base/ark-4.10.0" : {
+ "EAPI": "5",
+ "DEPEND": "app-arch/libarchive:=",
+ "RDEPEND": "app-arch/libarchive:="
+ },
+ }
+
+ binpkgs = {
+ "app-arch/libarchive-3.1.1" : {
+ "EAPI": "5",
+ "SLOT": "0"
+ },
+ }
+
+ installed = {
+ "app-arch/libarchive-3.1.1" : {
+ "EAPI": "5",
+ "SLOT": "0"
+ },
+
+ "kde-base/ark-4.10.0" : {
+ "EAPI": "5",
+ "DEPEND": "app-arch/libarchive:0/0=",
+ "RDEPEND": "app-arch/libarchive:0/0="
+ },
+ }
+
+ world = ["kde-base/ark"]
+
+ test_cases = (
+
+ # Demonstrate bug #456208, where a sub-slot change
+ # without revbump needs to trigger a rebuild.
+ ResolverPlaygroundTestCase(
+ ["kde-base/ark"],
+ options = {"--oneshot": True, "--usepkg": True},
+ success = True,
+ mergelist = ['app-arch/libarchive-3.1.1', "kde-base/ark-4.10.0"]),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, binpkgs=binpkgs,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_slot_collisions.py b/pym/portage/tests/resolver/test_slot_collisions.py
index 95d68fe04..9fcd5294a 100644
--- a/pym/portage/tests/resolver/test_slot_collisions.py
+++ b/pym/portage/tests/resolver/test_slot_collisions.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage.tests import TestCase
@@ -153,3 +153,107 @@ class SlotCollisionTestCase(TestCase):
self.assertEqual(test_case.test_success, True, test_case.fail_msg)
finally:
playground.cleanup()
+
+ def testConnectedCollision(self):
+ """
+ Ensure that we are able to solve connected slot conflicts
+ which cannot be solved each on their own.
+ """
+ ebuilds = {
+ "dev-libs/A-1": { "RDEPEND": "=dev-libs/X-1" },
+ "dev-libs/B-1": { "RDEPEND": "dev-libs/X" },
+
+ "dev-libs/X-1": { "RDEPEND": "=dev-libs/Y-1" },
+ "dev-libs/X-2": { "RDEPEND": "=dev-libs/Y-2" },
+
+ "dev-libs/Y-1": { "PDEPEND": "=dev-libs/X-1" },
+ "dev-libs/Y-2": { "PDEPEND": "=dev-libs/X-2" },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A", "dev-libs/B"],
+ all_permutations = True,
+ options = { "--backtrack": 0 },
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = ["dev-libs/Y-1", "dev-libs/X-1", ("dev-libs/A-1", "dev-libs/B-1")]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testDeeplyConnectedCollision(self):
+ """
+ Like testConnectedCollision, except that there is another
+ level of dependencies between the two conflicts.
+ """
+ ebuilds = {
+ "dev-libs/A-1": { "RDEPEND": "=dev-libs/X-1" },
+ "dev-libs/B-1": { "RDEPEND": "dev-libs/X" },
+
+ "dev-libs/X-1": { "RDEPEND": "dev-libs/K" },
+ "dev-libs/X-2": { "RDEPEND": "dev-libs/L" },
+
+ "dev-libs/K-1": { "RDEPEND": "=dev-libs/Y-1" },
+ "dev-libs/L-1": { "RDEPEND": "=dev-libs/Y-2" },
+
+ "dev-libs/Y-1": { "PDEPEND": "=dev-libs/X-1" },
+ "dev-libs/Y-2": { "PDEPEND": "=dev-libs/X-2" },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A", "dev-libs/B"],
+ all_permutations = True,
+ options = { "--backtrack": 0 },
+ success = True,
+ ignore_mergelist_order = True,
+ mergelist = ["dev-libs/Y-1", "dev-libs/X-1", "dev-libs/K-1", \
+ "dev-libs/A-1", "dev-libs/B-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testSelfDEPENDRemovalCrash(self):
+ """
+ Make sure we don't try to remove a packages twice. This happened
+ in the past when a package had a DEPEND on itself.
+ """
+ ebuilds = {
+ "dev-libs/A-1": { "RDEPEND": "=dev-libs/X-1" },
+ "dev-libs/B-1": { "RDEPEND": "dev-libs/X" },
+
+ "dev-libs/X-1": { },
+ "dev-libs/X-2": { "DEPEND": ">=dev-libs/X-2" },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A", "dev-libs/B"],
+ all_permutations = True,
+ success = True,
+ ignore_mergelist_order = True,
+ mergelist = ["dev-libs/X-1", "dev-libs/A-1", "dev-libs/B-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_slot_conflict_mask_update.py b/pym/portage/tests/resolver/test_slot_conflict_mask_update.py
new file mode 100644
index 000000000..a90eeac29
--- /dev/null
+++ b/pym/portage/tests/resolver/test_slot_conflict_mask_update.py
@@ -0,0 +1,41 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotConflictMaskUpdateTestCase(TestCase):
+
+ def testBacktrackingGoodVersionFirst(self):
+ """
+ When backtracking due to slot conflicts, we masked the version that has been pulled
+ in first. This is not always a good idea. Mask the highest version instead.
+ """
+
+
+ self.todo = True
+
+ ebuilds = {
+ "dev-libs/A-1": { "DEPEND": "=dev-libs/C-1 dev-libs/B" },
+ "dev-libs/B-1": { "DEPEND": "=dev-libs/C-1" },
+ "dev-libs/B-2": { "DEPEND": "=dev-libs/C-2" },
+ "dev-libs/C-1": { },
+ "dev-libs/C-2": { },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ mergelist = ["dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1",],
+ success = True),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_slot_conflict_rebuild.py b/pym/portage/tests/resolver/test_slot_conflict_rebuild.py
new file mode 100644
index 000000000..17737cf45
--- /dev/null
+++ b/pym/portage/tests/resolver/test_slot_conflict_rebuild.py
@@ -0,0 +1,408 @@
+# Copyright 2012-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotConflictRebuildTestCase(TestCase):
+
+ def testSlotConflictRebuild(self):
+
+ ebuilds = {
+
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/A-2" : {
+ "EAPI": "5",
+ "SLOT": "0/2"
+ },
+
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/A:=",
+ "RDEPEND": "app-misc/A:="
+ },
+
+ "app-misc/C-0" : {
+ "EAPI": "5",
+ "DEPEND": "<app-misc/A-2",
+ "RDEPEND": "<app-misc/A-2"
+ },
+
+ "app-misc/D-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/D-2" : {
+ "EAPI": "5",
+ "SLOT": "0/2"
+ },
+
+ "app-misc/E-0" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/D:=",
+ "RDEPEND": "app-misc/D:="
+ },
+
+ }
+
+ installed = {
+
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/A:0/1=",
+ "RDEPEND": "app-misc/A:0/1="
+ },
+
+ "app-misc/C-0" : {
+ "EAPI": "5",
+ "DEPEND": "<app-misc/A-2",
+ "RDEPEND": "<app-misc/A-2"
+ },
+
+ "app-misc/D-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/E-0" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/D:0/1=",
+ "RDEPEND": "app-misc/D:0/1="
+ },
+
+ }
+
+ world = ["app-misc/B", "app-misc/C", "app-misc/E"]
+
+ test_cases = (
+
+ # Test bug #439688, where a slot conflict prevents an
+ # upgrade and we don't want to trigger unnecessary rebuilds.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = ["app-misc/D-2", "app-misc/E-0"]),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testSlotConflictMassRebuild(self):
+ """
+ Bug 486580
+ Before this bug was fixed, emerge would backtrack for each package that needs
+ a rebuild. This could cause it to hit the backtrack limit and not rebuild all
+ needed packages.
+ """
+ ebuilds = {
+
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B:=",
+ "RDEPEND": "app-misc/B:="
+ },
+
+ "app-misc/B-1" : {
+ "EAPI": "5",
+ "SLOT": "1"
+ },
+
+ "app-misc/B-2" : {
+ "EAPI": "5",
+ "SLOT": "2/2"
+ },
+ }
+
+ installed = {
+ "app-misc/B-1" : {
+ "EAPI": "5",
+ "SLOT": "1"
+ },
+ }
+
+ expected_mergelist = ['app-misc/A-1', 'app-misc/B-2']
+
+ for i in range(5):
+ ebuilds["app-misc/C%sC-1" % i] = {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B:=",
+ "RDEPEND": "app-misc/B:="
+ }
+
+ installed["app-misc/C%sC-1" % i] = {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B:1/1=",
+ "RDEPEND": "app-misc/B:1/1="
+ }
+ for x in ("DEPEND", "RDEPEND"):
+ ebuilds["app-misc/A-1"][x] += " app-misc/C%sC" % i
+
+ expected_mergelist.append("app-misc/C%sC-1" % i)
+
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["app-misc/A"],
+ ignore_mergelist_order=True,
+ all_permutations=True,
+ options = {"--backtrack": 3, '--deep': True},
+ success = True,
+ mergelist = expected_mergelist),
+ )
+
+ world = []
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testSlotConflictForgottenChild(self):
+ """
+ Similar to testSlotConflictMassRebuild above, but this time the rebuilds are scheduled,
+ but the package causing the rebuild (the child) is not installed.
+ """
+ ebuilds = {
+
+ "app-misc/A-2" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B:= app-misc/C",
+ "RDEPEND": "app-misc/B:= app-misc/C",
+ },
+
+ "app-misc/B-2" : {
+ "EAPI": "5",
+ "SLOT": "2"
+ },
+
+ "app-misc/C-1": {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B:=",
+ "RDEPEND": "app-misc/B:="
+ },
+ }
+
+ installed = {
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B:1/1= app-misc/C",
+ "RDEPEND": "app-misc/B:1/1= app-misc/C",
+ },
+
+ "app-misc/B-1" : {
+ "EAPI": "5",
+ "SLOT": "1"
+ },
+
+ "app-misc/C-1": {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B:1/1=",
+ "RDEPEND": "app-misc/B:1/1="
+ },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["app-misc/A"],
+ success = True,
+ mergelist = ['app-misc/B-2', 'app-misc/C-1', 'app-misc/A-2']),
+ )
+
+ world = []
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testSlotConflictDepChange(self):
+ """
+ Bug 490362
+ The dependency in the ebuild was changed form slot operator to
+ no slot operator. The vdb contained the slot operator and emerge
+ would refuse to rebuild.
+ """
+ ebuilds = {
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B",
+ "RDEPEND": "app-misc/B"
+ },
+
+ "app-misc/B-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/B-2" : {
+ "EAPI": "5",
+ "SLOT": "0/2"
+ },
+ }
+
+ installed = {
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B:0/1=",
+ "RDEPEND": "app-misc/B:0/1="
+ },
+ "app-misc/B-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["app-misc/B"],
+ success = True,
+ mergelist = ['app-misc/B-2', 'app-misc/A-1']),
+ )
+
+ world = ["app-misc/A"]
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testSlotConflictMixedDependencies(self):
+ """
+ Bug 487198
+ For parents with mixed >= and < dependencies, we scheduled rebuilds for the
+ >= atom, but in the end didn't install the child update because of the < atom.
+ """
+ ebuilds = {
+ "cat/slotted-lib-1" : {
+ "EAPI": "5",
+ "SLOT": "1"
+ },
+ "cat/slotted-lib-2" : {
+ "EAPI": "5",
+ "SLOT": "2"
+ },
+ "cat/slotted-lib-3" : {
+ "EAPI": "5",
+ "SLOT": "3"
+ },
+ "cat/slotted-lib-4" : {
+ "EAPI": "5",
+ "SLOT": "4"
+ },
+ "cat/slotted-lib-5" : {
+ "EAPI": "5",
+ "SLOT": "5"
+ },
+ "cat/user-1" : {
+ "EAPI": "5",
+ "DEPEND": ">=cat/slotted-lib-2:= <cat/slotted-lib-4:=",
+ "RDEPEND": ">=cat/slotted-lib-2:= <cat/slotted-lib-4:=",
+ },
+ }
+
+ installed = {
+ "cat/slotted-lib-3" : {
+ "EAPI": "5",
+ "SLOT": "3"
+ },
+ "cat/user-1" : {
+ "EAPI": "5",
+ "DEPEND": ">=cat/slotted-lib-2:3/3= <cat/slotted-lib-4:3/3=",
+ "RDEPEND": ">=cat/slotted-lib-2:3/3= <cat/slotted-lib-4:3/3=",
+ },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["cat/user"],
+ options = {"--deep": True, "--update": True},
+ success = True,
+ mergelist = []),
+ )
+
+ world = []
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testSlotConflictMultiRepo(self):
+ """
+ Bug 497238
+ Different repositories contain the same cpv with different sub-slots for
+ a slot operator child.
+ Downgrading the slot operator parent would result in a sub-slot change of
+ the installed package by changing the source repository.
+ Make sure we don't perform this undesirable rebuild.
+ """
+ ebuilds = {
+ "net-firewall/iptables-1.4.21::overlay" : { "EAPI": "5", "SLOT": "0/10" },
+ "sys-apps/iproute2-3.11.0::overlay" : { "EAPI": "5", "RDEPEND": "net-firewall/iptables:=" },
+
+ "net-firewall/iptables-1.4.21" : { "EAPI": "5", "SLOT": "0" },
+ "sys-apps/iproute2-3.12.0": { "EAPI": "5", "RDEPEND": "net-firewall/iptables:=" },
+ }
+
+ installed = {
+ "net-firewall/iptables-1.4.21::overlay" : { "EAPI": "5", "SLOT": "0/10" },
+ "sys-apps/iproute2-3.12.0": { "EAPI": "5", "RDEPEND": "net-firewall/iptables:0/10=" },
+ }
+
+ world = ["sys-apps/iproute2"]
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--deep": True, "--update": True, "--verbose": True},
+ success = True,
+ mergelist = []),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_slot_conflict_update.py b/pym/portage/tests/resolver/test_slot_conflict_update.py
new file mode 100644
index 000000000..331e5788b
--- /dev/null
+++ b/pym/portage/tests/resolver/test_slot_conflict_update.py
@@ -0,0 +1,98 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotConflictUpdateTestCase(TestCase):
+
+ def testSlotConflictUpdate(self):
+
+ ebuilds = {
+
+ "app-text/podofo-0.9.2" : {
+ "EAPI": "5",
+ "RDEPEND" : "dev-util/boost-build"
+ },
+
+ "dev-cpp/libcmis-0.3.1" : {
+ "EAPI": "5",
+ "RDEPEND" : "dev-libs/boost:="
+ },
+
+ "dev-libs/boost-1.53.0" : {
+ "EAPI": "5",
+ "SLOT": "0/1.53",
+ "RDEPEND" : "=dev-util/boost-build-1.53.0"
+ },
+
+ "dev-libs/boost-1.52.0" : {
+ "EAPI": "5",
+ "SLOT": "0/1.52",
+ "RDEPEND" : "=dev-util/boost-build-1.52.0"
+ },
+
+ "dev-util/boost-build-1.53.0" : {
+ "EAPI": "5",
+ "SLOT": "0"
+ },
+
+ "dev-util/boost-build-1.52.0" : {
+ "EAPI": "5",
+ "SLOT": "0"
+ },
+
+
+ }
+
+ installed = {
+
+ "app-text/podofo-0.9.2" : {
+ "EAPI": "5",
+ "RDEPEND" : "dev-util/boost-build"
+ },
+
+ "dev-cpp/libcmis-0.3.1" : {
+ "EAPI": "5",
+ "RDEPEND" : "dev-libs/boost:0/1.52="
+ },
+
+ "dev-util/boost-build-1.52.0" : {
+ "EAPI": "5",
+ "SLOT": "0"
+ },
+
+ "dev-libs/boost-1.52.0" : {
+ "EAPI": "5",
+ "SLOT": "0/1.52",
+ "RDEPEND" : "=dev-util/boost-build-1.52.0"
+ }
+
+ }
+
+ world = ["dev-cpp/libcmis", "dev-libs/boost", "app-text/podofo"]
+
+ test_cases = (
+
+ # In order to avoid a missed update, first mask lower
+ # versions that conflict with higher versions. Note that
+ # this behavior makes SlotConflictMaskUpdateTestCase
+ # fail.
+ ResolverPlaygroundTestCase(
+ world,
+ all_permutations = True,
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = ['dev-util/boost-build-1.53.0', 'dev-libs/boost-1.53.0', 'dev-cpp/libcmis-0.3.1']),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_slot_operator_autounmask.py b/pym/portage/tests/resolver/test_slot_operator_autounmask.py
new file mode 100644
index 000000000..624271b39
--- /dev/null
+++ b/pym/portage/tests/resolver/test_slot_operator_autounmask.py
@@ -0,0 +1,120 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotOperatorAutoUnmaskTestCase(TestCase):
+
+ def __init__(self, *args, **kwargs):
+ super(SlotOperatorAutoUnmaskTestCase, self).__init__(*args, **kwargs)
+
+ def testSubSlot(self):
+ ebuilds = {
+ "dev-libs/icu-49" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/49"
+ },
+ "dev-libs/icu-4.8" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/48"
+ },
+ "dev-libs/libxml2-2.7.8" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/icu:=",
+ "RDEPEND": "dev-libs/icu:=",
+ "KEYWORDS": "~x86"
+ },
+ }
+ binpkgs = {
+ "dev-libs/icu-49" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/49"
+ },
+ "dev-libs/icu-4.8" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/48"
+ },
+ "dev-libs/libxml2-2.7.8" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/icu:0/48=",
+ "RDEPEND": "dev-libs/icu:0/48="
+ },
+ }
+ installed = {
+ "dev-libs/icu-4.8" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/48"
+ },
+ "dev-libs/libxml2-2.7.8" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/icu:0/48=",
+ "RDEPEND": "dev-libs/icu:0/48="
+ },
+ }
+
+ world = ["dev-libs/libxml2"]
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--autounmask": True, "--oneshot": True},
+ success = False,
+ mergelist = ["dev-libs/icu-49", "dev-libs/libxml2-2.7.8" ],
+ unstable_keywords = ['dev-libs/libxml2-2.7.8']),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--oneshot": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["dev-libs/icu-49"]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--oneshot": True, "--usepkg": True},
+ success = False,
+ mergelist = ["[binary]dev-libs/icu-49", "dev-libs/libxml2-2.7.8" ],
+ unstable_keywords = ['dev-libs/libxml2-2.7.8']),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--oneshot": True, "--usepkgonly": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/icu-4.8"]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--oneshot": True, "--usepkgonly": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["[binary]dev-libs/icu-49"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["dev-libs/icu-49"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkgonly": True},
+ success = True,
+ mergelist = []),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkgonly": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["[binary]dev-libs/icu-49"]),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, binpkgs=binpkgs,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_slot_operator_unsatisfied.py b/pym/portage/tests/resolver/test_slot_operator_unsatisfied.py
new file mode 100644
index 000000000..e3b53d159
--- /dev/null
+++ b/pym/portage/tests/resolver/test_slot_operator_unsatisfied.py
@@ -0,0 +1,70 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotOperatorUnsatisfiedTestCase(TestCase):
+
+ def testSlotOperatorUnsatisfied(self):
+
+ ebuilds = {
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+ "app-misc/A-2" : {
+ "EAPI": "5",
+ "SLOT": "0/2"
+ },
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/A:=",
+ "RDEPEND": "app-misc/A:="
+ },
+ }
+
+ installed = {
+ "app-misc/A-2" : {
+ "EAPI": "5",
+ "SLOT": "0/2"
+ },
+
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/A:0/1=",
+ "RDEPEND": "app-misc/A:0/1="
+ },
+ }
+
+ world = ["app-misc/B"]
+
+ test_cases = (
+
+ # Demonstrate bug #439694, where a broken slot-operator
+ # sub-slot dependency needs to trigger a rebuild.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = ["app-misc/B-0"]),
+
+ # This doesn't trigger a rebuild, since there's no version
+ # change to trigger complete graph mode, and initially
+ # unsatisfied deps are ignored in complete graph mode anyway.
+ ResolverPlaygroundTestCase(
+ ["app-misc/A"],
+ options = {"--oneshot": True},
+ success = True,
+ mergelist = ["app-misc/A-2"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_slot_operator_unsolved.py b/pym/portage/tests/resolver/test_slot_operator_unsolved.py
new file mode 100644
index 000000000..c19783ddf
--- /dev/null
+++ b/pym/portage/tests/resolver/test_slot_operator_unsolved.py
@@ -0,0 +1,88 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotOperatorUnsolvedTestCase(TestCase):
+ """
+ Demonstrate bug #456340, where an unsolved circular dependency
+ interacts with an unsatisfied built slot-operator dep.
+ """
+ def __init__(self, *args, **kwargs):
+ super(SlotOperatorUnsolvedTestCase, self).__init__(*args, **kwargs)
+
+ def testSlotOperatorUnsolved(self):
+ ebuilds = {
+ "dev-libs/icu-50.1.2" : {
+ "EAPI": "5",
+ "SLOT": "0/50.1.2"
+ },
+ "net-libs/webkit-gtk-1.10.2-r300" : {
+ "EAPI": "5",
+ "DEPEND": ">=dev-libs/icu-3.8.1-r1:=",
+ "RDEPEND": ">=dev-libs/icu-3.8.1-r1:="
+ },
+ "dev-ruby/rdoc-3.12.1" : {
+ "EAPI": "5",
+ "IUSE": "test",
+ "DEPEND": "test? ( >=dev-ruby/hoe-2.7.0 )",
+ },
+ "dev-ruby/hoe-2.13.0" : {
+ "EAPI": "5",
+ "IUSE": "test",
+ "DEPEND": "test? ( >=dev-ruby/rdoc-3.10 )",
+ "RDEPEND": "test? ( >=dev-ruby/rdoc-3.10 )",
+ },
+ }
+
+ binpkgs = {
+ "net-libs/webkit-gtk-1.10.2-r300" : {
+ "EAPI": "5",
+ "DEPEND": ">=dev-libs/icu-3.8.1-r1:0/50=",
+ "RDEPEND": ">=dev-libs/icu-3.8.1-r1:0/50="
+ },
+ }
+
+ installed = {
+ "dev-libs/icu-50.1.2" : {
+ "EAPI": "5",
+ "SLOT": "0/50.1.2"
+ },
+ "net-libs/webkit-gtk-1.10.2-r300" : {
+ "EAPI": "5",
+ "DEPEND": ">=dev-libs/icu-3.8.1-r1:0/50=",
+ "RDEPEND": ">=dev-libs/icu-3.8.1-r1:0/50="
+ },
+ }
+
+ user_config = {
+ "make.conf" : ("FEATURES=test",)
+ }
+
+ world = ["net-libs/webkit-gtk", "dev-ruby/hoe"]
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkg": True},
+ circular_dependency_solutions = {
+ 'dev-ruby/hoe-2.13.0': frozenset([frozenset([('test', False)])]),
+ 'dev-ruby/rdoc-3.12.1': frozenset([frozenset([('test', False)])])
+ },
+ success = False
+ ),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, binpkgs=binpkgs,
+ installed=installed, user_config=user_config,
+ world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_targetroot.py b/pym/portage/tests/resolver/test_targetroot.py
new file mode 100644
index 000000000..db6c60de3
--- /dev/null
+++ b/pym/portage/tests/resolver/test_targetroot.py
@@ -0,0 +1,85 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class TargetRootTestCase(TestCase):
+
+ def testTargetRoot(self):
+ ebuilds = {
+ "dev-lang/python-3.2": {
+ "EAPI": "5-hdepend",
+ "IUSE": "targetroot",
+ "HDEPEND": "targetroot? ( ~dev-lang/python-3.2 )",
+ },
+ "dev-libs/A-1": {
+ "EAPI": "4",
+ "DEPEND": "dev-libs/B",
+ "RDEPEND": "dev-libs/C",
+ },
+ "dev-libs/B-1": {},
+ "dev-libs/C-1": {},
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-lang/python"],
+ options = {},
+ success = True,
+ mergelist = ["dev-lang/python-3.2", "dev-lang/python-3.2{targetroot}"]),
+ ResolverPlaygroundTestCase(
+ ["dev-lang/python"],
+ options = {"--root-deps": True},
+ success = True,
+ mergelist = ["dev-lang/python-3.2", "dev-lang/python-3.2{targetroot}"]),
+ ResolverPlaygroundTestCase(
+ ["dev-lang/python"],
+ options = {"--root-deps": "rdeps"},
+ success = True,
+ mergelist = ["dev-lang/python-3.2", "dev-lang/python-3.2{targetroot}"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {},
+ ambiguous_merge_order = True,
+ success = True,
+ mergelist = [("dev-libs/B-1", "dev-libs/C-1{targetroot}"), "dev-libs/A-1{targetroot}"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--root-deps": True},
+ ambiguous_merge_order = True,
+ success = True,
+ mergelist = [("dev-libs/B-1{targetroot}", "dev-libs/C-1{targetroot}"), "dev-libs/A-1{targetroot}"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--root-deps": "rdeps"},
+ ambiguous_merge_order = True,
+ success = True,
+ mergelist = [("dev-libs/C-1{targetroot}"), "dev-libs/A-1{targetroot}"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, targetroot=True,
+ debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-lang/python"],
+ options = {},
+ success = True,
+ mergelist = ["dev-lang/python-3.2"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, targetroot=False,
+ debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_unpack_dependencies.py b/pym/portage/tests/resolver/test_unpack_dependencies.py
new file mode 100644
index 000000000..cfceff4b1
--- /dev/null
+++ b/pym/portage/tests/resolver/test_unpack_dependencies.py
@@ -0,0 +1,65 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class UnpackDependenciesTestCase(TestCase):
+ def testUnpackDependencies(self):
+ distfiles = {
+ "A-1.tar.gz": b"binary\0content",
+ "B-1.TAR.XZ": b"binary\0content",
+ "B-docs-1.tar.bz2": b"binary\0content",
+ "C-1.TAR.XZ": b"binary\0content",
+ "C-docs-1.tar.bz2": b"binary\0content",
+ }
+
+ ebuilds = {
+ "dev-libs/A-1": {"SRC_URI": "A-1.tar.gz", "EAPI": "5-progress"},
+ "dev-libs/B-1": {"IUSE": "doc", "SRC_URI": "B-1.TAR.XZ doc? ( B-docs-1.tar.bz2 )", "EAPI": "5-progress"},
+ "dev-libs/C-1": {"IUSE": "doc", "SRC_URI": "C-1.TAR.XZ doc? ( C-docs-1.tar.bz2 )", "EAPI": "5-progress"},
+ "app-arch/bzip2-1": {},
+ "app-arch/gzip-1": {},
+ "app-arch/tar-1": {},
+ "app-arch/xz-utils-1": {},
+ }
+
+ repo_configs = {
+ "test_repo": {
+ "unpack_dependencies/5-progress": (
+ "tar.bz2 app-arch/tar app-arch/bzip2",
+ "tar.gz app-arch/tar app-arch/gzip",
+ "tar.xz app-arch/tar app-arch/xz-utils",
+ ),
+ },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ success = True,
+ ignore_mergelist_order = True,
+ mergelist = ["app-arch/tar-1", "app-arch/gzip-1", "dev-libs/A-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ success = True,
+ ignore_mergelist_order = True,
+ mergelist = ["app-arch/tar-1", "app-arch/xz-utils-1", "dev-libs/B-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ success = True,
+ ignore_mergelist_order = True,
+ mergelist = ["app-arch/tar-1", "app-arch/xz-utils-1", "app-arch/bzip2-1", "dev-libs/C-1"]),
+ )
+
+ user_config = {
+ "package.use": ("dev-libs/C doc",)
+ }
+
+ playground = ResolverPlayground(distfiles=distfiles, ebuilds=ebuilds, repo_configs=repo_configs, user_config=user_config)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_use_aliases.py b/pym/portage/tests/resolver/test_use_aliases.py
new file mode 100644
index 000000000..7c2debbb1
--- /dev/null
+++ b/pym/portage/tests/resolver/test_use_aliases.py
@@ -0,0 +1,131 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class UseAliasesTestCase(TestCase):
+ def testUseAliases(self):
+ ebuilds = {
+ "dev-libs/A-1": {"DEPEND": "dev-libs/K[x]", "RDEPEND": "dev-libs/K[x]", "EAPI": "5"},
+ "dev-libs/B-1": {"DEPEND": "dev-libs/L[x]", "RDEPEND": "dev-libs/L[x]", "EAPI": "5"},
+ "dev-libs/C-1": {"DEPEND": "dev-libs/M[xx]", "RDEPEND": "dev-libs/M[xx]", "EAPI": "5"},
+ "dev-libs/D-1": {"DEPEND": "dev-libs/N[-x]", "RDEPEND": "dev-libs/N[-x]", "EAPI": "5"},
+ "dev-libs/E-1": {"DEPEND": "dev-libs/O[-xx]", "RDEPEND": "dev-libs/O[-xx]", "EAPI": "5"},
+ "dev-libs/F-1": {"DEPEND": "dev-libs/P[-xx]", "RDEPEND": "dev-libs/P[-xx]", "EAPI": "5"},
+ "dev-libs/G-1": {"DEPEND": "dev-libs/Q[x-y]", "RDEPEND": "dev-libs/Q[x-y]", "EAPI": "5"},
+ "dev-libs/H-1": {"DEPEND": "=dev-libs/R-1*[yy]", "RDEPEND": "=dev-libs/R-1*[yy]", "EAPI": "5"},
+ "dev-libs/H-2": {"DEPEND": "=dev-libs/R-2*[yy]", "RDEPEND": "=dev-libs/R-2*[yy]", "EAPI": "5"},
+ "dev-libs/I-1": {"DEPEND": "dev-libs/S[y-z]", "RDEPEND": "dev-libs/S[y-z]", "EAPI": "5"},
+ "dev-libs/I-2": {"DEPEND": "dev-libs/S[y_z]", "RDEPEND": "dev-libs/S[y_z]", "EAPI": "5"},
+ "dev-libs/J-1": {"DEPEND": "dev-libs/T[x]", "RDEPEND": "dev-libs/T[x]", "EAPI": "5"},
+ "dev-libs/K-1": {"IUSE": "+x", "EAPI": "5"},
+ "dev-libs/K-2::repo1": {"IUSE": "+X", "EAPI": "5-progress"},
+ "dev-libs/L-1": {"IUSE": "+x", "EAPI": "5"},
+ "dev-libs/M-1::repo1": {"IUSE": "X", "EAPI": "5-progress"},
+ "dev-libs/N-1": {"IUSE": "x", "EAPI": "5"},
+ "dev-libs/N-2::repo1": {"IUSE": "X", "EAPI": "5-progress"},
+ "dev-libs/O-1": {"IUSE": "x", "EAPI": "5"},
+ "dev-libs/P-1::repo1": {"IUSE": "+X", "EAPI": "5-progress"},
+ "dev-libs/Q-1::repo2": {"IUSE": "X.Y", "EAPI": "5-progress"},
+ "dev-libs/R-1::repo1": {"IUSE": "Y", "EAPI": "5-progress"},
+ "dev-libs/R-2::repo1": {"IUSE": "y", "EAPI": "5-progress"},
+ "dev-libs/S-1::repo2": {"IUSE": "Y.Z", "EAPI": "5-progress"},
+ "dev-libs/S-2::repo2": {"IUSE": "Y.Z", "EAPI": "5-progress"},
+ "dev-libs/T-1::repo1": {"IUSE": "+X", "EAPI": "5"},
+ }
+
+ installed = {
+ "dev-libs/L-2::repo1": {"IUSE": "+X", "USE": "X", "EAPI": "5-progress"},
+ "dev-libs/O-2::repo1": {"IUSE": "X", "USE": "", "EAPI": "5-progress"},
+ }
+
+ repo_configs = {
+ "repo1": {
+ "use.aliases": ("X x xx",),
+ "package.use.aliases": (
+ "=dev-libs/R-1* Y yy",
+ "=dev-libs/R-2* y yy",
+ )
+ },
+ "repo2": {
+ "eapi": ("5-progress",),
+ "use.aliases": ("X.Y x-y",),
+ "package.use.aliases": (
+ "=dev-libs/S-1* Y.Z y-z",
+ "=dev-libs/S-2* Y.Z y_z",
+ ),
+ },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ success = True,
+ mergelist = ["dev-libs/K-2", "dev-libs/A-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ success = True,
+ mergelist = ["dev-libs/B-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["dev-libs/M-1", "dev-libs/C-1"],
+ use_changes = {"dev-libs/M-1": {"X": True}}),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/D"],
+ success = True,
+ mergelist = ["dev-libs/N-2", "dev-libs/D-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/E"],
+ success = True,
+ mergelist = ["dev-libs/E-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/F"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["dev-libs/P-1", "dev-libs/F-1"],
+ use_changes = {"dev-libs/P-1": {"X": False}}),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/G"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["dev-libs/Q-1", "dev-libs/G-1"],
+ use_changes = {"dev-libs/Q-1": {"X.Y": True}}),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/H-1*"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["dev-libs/R-1", "dev-libs/H-1"],
+ use_changes = {"dev-libs/R-1": {"Y": True}}),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/H-2*"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["dev-libs/R-2", "dev-libs/H-2"],
+ use_changes = {"dev-libs/R-2": {"y": True}}),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/I-1*"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["dev-libs/S-1", "dev-libs/I-1"],
+ use_changes = {"dev-libs/S-1": {"Y.Z": True}}),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/I-2*"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["dev-libs/S-2", "dev-libs/I-2"],
+ use_changes = {"dev-libs/S-2": {"Y.Z": True}}),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/J"],
+ success = False),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, repo_configs=repo_configs)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/resolver/test_useflags.py b/pym/portage/tests/resolver/test_useflags.py
new file mode 100644
index 000000000..0a5f3b3ff
--- /dev/null
+++ b/pym/portage/tests/resolver/test_useflags.py
@@ -0,0 +1,78 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class UseFlagsTestCase(TestCase):
+
+ def testUseFlags(self):
+ ebuilds = {
+ "dev-libs/A-1": { "IUSE": "X", },
+ "dev-libs/B-1": { "IUSE": "X Y", },
+ }
+
+ installed = {
+ "dev-libs/A-1": { "IUSE": "X", },
+ "dev-libs/B-1": { "IUSE": "X", },
+ }
+
+ binpkgs = installed
+
+ user_config = {
+ "package.use": ( "dev-libs/A X", ),
+ "use.force": ( "Y", ),
+ }
+
+ test_cases = (
+ #default: don't reinstall on use flag change
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--selective": True, "--usepkg": True},
+ success = True,
+ mergelist = []),
+
+ #default: respect use flags for binpkgs
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--usepkg": True},
+ success = True,
+ mergelist = ["dev-libs/A-1"]),
+
+ #--binpkg-respect-use=n: use binpkgs with different use flags
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--binpkg-respect-use": "n", "--usepkg": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/A-1"]),
+
+ #--reinstall=changed-use: reinstall if use flag changed
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--reinstall": "changed-use", "--usepkg": True},
+ success = True,
+ mergelist = ["dev-libs/A-1"]),
+
+ #--reinstall=changed-use: don't reinstall on new use flag
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ options = {"--reinstall": "changed-use", "--usepkg": True},
+ success = True,
+ mergelist = []),
+
+ #--newuse: reinstall on new use flag
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ options = {"--newuse": True, "--usepkg": True},
+ success = True,
+ mergelist = ["dev-libs/B-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ binpkgs=binpkgs, installed=installed, user_config=user_config)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/pym/portage/tests/runTests b/pym/portage/tests/runTests
index 1c1008dff..9c452764f 100755
--- a/pym/portage/tests/runTests
+++ b/pym/portage/tests/runTests
@@ -1,6 +1,6 @@
-#!/usr/bin/python -Wd
+#!/usr/bin/python -bWd
# runTests.py -- Portage Unit Test Functionality
-# Copyright 2006-2012 Gentoo Foundation
+# Copyright 2006-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import os, sys
@@ -29,9 +29,10 @@ os.environ["PORTAGE_GRPNAME"] = grp.getgrgid(os.getgid()).gr_name
# Insert our parent dir so we can do shiny import "tests"
# This line courtesy of Marienz and Pkgcore ;)
-sys.path.insert(0, osp.dirname(osp.dirname(osp.dirname(osp.abspath(__file__)))))
+sys.path.insert(0, osp.dirname(osp.dirname(osp.dirname(osp.realpath(__file__)))))
import portage
+portage._internal_caller = True
# Ensure that we don't instantiate portage.settings, so that tests should
# work the same regardless of global configuration file state/existence.
@@ -44,11 +45,17 @@ import portage.tests as tests
from portage.const import PORTAGE_BIN_PATH
path = os.environ.get("PATH", "").split(":")
path = [x for x in path if x]
-if not path or not os.path.samefile(path[0], PORTAGE_BIN_PATH):
+
+insert_bin_path = True
+try:
+ insert_bin_path = not path or \
+ not os.path.samefile(path[0], PORTAGE_BIN_PATH)
+except OSError:
+ pass
+
+if insert_bin_path:
path.insert(0, PORTAGE_BIN_PATH)
os.environ["PATH"] = ":".join(path)
-del path
-
if __name__ == "__main__":
sys.exit(tests.main())
diff --git a/pym/portage/tests/unicode/test_string_format.py b/pym/portage/tests/unicode/test_string_format.py
index fb6e8e02e..9d4366a91 100644
--- a/pym/portage/tests/unicode/test_string_format.py
+++ b/pym/portage/tests/unicode/test_string_format.py
@@ -1,15 +1,18 @@
-# Copyright 2010 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
import sys
-from portage import _encodings, _unicode_decode
+from portage import _encodings, _unicode_encode
from portage.exception import PortageException
from portage.tests import TestCase
from _emerge.DependencyArg import DependencyArg
from _emerge.UseFlagDisplay import UseFlagDisplay
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
STR_IS_UNICODE = sys.hexversion >= 0x3000000
@@ -20,27 +23,25 @@ class StringFormatTestCase(TestCase):
which may be either python2 or python3.
"""
- # In order to get some unicode test strings in a way that works in
- # both python2 and python3, write them here as byte strings and
- # decode them before use. This assumes _encodings['content'] is
- # utf_8.
+ # We need unicode_literals in order to get some unicode test strings
+ # in a way that works in both python2 and python3.
unicode_strings = (
- b'\xE2\x80\x98',
- b'\xE2\x80\x99',
+ '\u2018',
+ '\u2019',
)
def testDependencyArg(self):
self.assertEqual(_encodings['content'], 'utf_8')
- for arg_bytes in self.unicode_strings:
- arg_unicode = _unicode_decode(arg_bytes, encoding=_encodings['content'])
+ for arg_unicode in self.unicode_strings:
+ arg_bytes = _unicode_encode(arg_unicode, encoding=_encodings['content'])
dependency_arg = DependencyArg(arg=arg_unicode)
- # Force unicode format string so that __unicode__() is
- # called in python2.
- formatted_str = _unicode_decode("%s") % (dependency_arg,)
+ # Use unicode_literals for unicode format string so that
+ # __unicode__() is called in Python 2.
+ formatted_str = "%s" % (dependency_arg,)
self.assertEqual(formatted_str, arg_unicode)
if STR_IS_UNICODE:
@@ -52,20 +53,20 @@ class StringFormatTestCase(TestCase):
else:
# Test the __str__ method which returns encoded bytes in python2
- formatted_bytes = "%s" % (dependency_arg,)
+ formatted_bytes = b"%s" % (dependency_arg,)
self.assertEqual(formatted_bytes, arg_bytes)
def testPortageException(self):
self.assertEqual(_encodings['content'], 'utf_8')
- for arg_bytes in self.unicode_strings:
- arg_unicode = _unicode_decode(arg_bytes, encoding=_encodings['content'])
+ for arg_unicode in self.unicode_strings:
+ arg_bytes = _unicode_encode(arg_unicode, encoding=_encodings['content'])
e = PortageException(arg_unicode)
- # Force unicode format string so that __unicode__() is
- # called in python2.
- formatted_str = _unicode_decode("%s") % (e,)
+ # Use unicode_literals for unicode format string so that
+ # __unicode__() is called in Python 2.
+ formatted_str = "%s" % (e,)
self.assertEqual(formatted_str, arg_unicode)
if STR_IS_UNICODE:
@@ -77,7 +78,7 @@ class StringFormatTestCase(TestCase):
else:
# Test the __str__ method which returns encoded bytes in python2
- formatted_bytes = "%s" % (e,)
+ formatted_bytes = b"%s" % (e,)
self.assertEqual(formatted_bytes, arg_bytes)
def testUseFlagDisplay(self):
@@ -86,13 +87,12 @@ class StringFormatTestCase(TestCase):
for enabled in (True, False):
for forced in (True, False):
- for arg_bytes in self.unicode_strings:
- arg_unicode = _unicode_decode(arg_bytes, encoding=_encodings['content'])
+ for arg_unicode in self.unicode_strings:
e = UseFlagDisplay(arg_unicode, enabled, forced)
- # Force unicode format string so that __unicode__() is
- # called in python2.
- formatted_str = _unicode_decode("%s") % (e,)
+ # Use unicode_literals for unicode format string so that
+ # __unicode__() is called in Python 2.
+ formatted_str = "%s" % (e,)
self.assertEqual(isinstance(formatted_str, basestring), True)
if STR_IS_UNICODE:
@@ -104,5 +104,5 @@ class StringFormatTestCase(TestCase):
else:
# Test the __str__ method which returns encoded bytes in python2
- formatted_bytes = "%s" % (e,)
+ formatted_bytes = b"%s" % (e,)
self.assertEqual(isinstance(formatted_bytes, bytes), True)
diff --git a/pym/portage/tests/update/test_move_ent.py b/pym/portage/tests/update/test_move_ent.py
index 2504dee2b..d9647a95e 100644
--- a/pym/portage/tests/update/test_move_ent.py
+++ b/pym/portage/tests/update/test_move_ent.py
@@ -1,4 +1,4 @@
-# Copyright 2012 Gentoo Foundation
+# Copyright 2012-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import textwrap
@@ -59,12 +59,12 @@ class MoveEntTestCase(TestCase):
settings = playground.settings
trees = playground.trees
eroot = settings["EROOT"]
- portdir = settings["PORTDIR"]
+ test_repo_location = settings.repositories["test_repo"].location
portdb = trees[eroot]["porttree"].dbapi
vardb = trees[eroot]["vartree"].dbapi
bindb = trees[eroot]["bintree"].dbapi
- updates_dir = os.path.join(portdir, "profiles", "updates")
+ updates_dir = os.path.join(test_repo_location, "profiles", "updates")
try:
ensure_dirs(updates_dir)
diff --git a/pym/portage/tests/update/test_move_slot_ent.py b/pym/portage/tests/update/test_move_slot_ent.py
index fcb0cc64c..3e49e1144 100644
--- a/pym/portage/tests/update/test_move_slot_ent.py
+++ b/pym/portage/tests/update/test_move_slot_ent.py
@@ -1,4 +1,4 @@
-# Copyright 2012 Gentoo Foundation
+# Copyright 2012-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import textwrap
@@ -94,12 +94,12 @@ class MoveSlotEntTestCase(TestCase):
settings = playground.settings
trees = playground.trees
eroot = settings["EROOT"]
- portdir = settings["PORTDIR"]
+ test_repo_location = settings.repositories["test_repo"].location
portdb = trees[eroot]["porttree"].dbapi
vardb = trees[eroot]["vartree"].dbapi
bindb = trees[eroot]["bintree"].dbapi
- updates_dir = os.path.join(portdir, "profiles", "updates")
+ updates_dir = os.path.join(test_repo_location, "profiles", "updates")
try:
ensure_dirs(updates_dir)
diff --git a/pym/portage/tests/update/test_update_dbentry.py b/pym/portage/tests/update/test_update_dbentry.py
index e13cfed74..88951149a 100644
--- a/pym/portage/tests/update/test_update_dbentry.py
+++ b/pym/portage/tests/update/test_update_dbentry.py
@@ -1,4 +1,4 @@
-# Copyright 2012 Gentoo Foundation
+# Copyright 2012-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import re
@@ -6,14 +6,107 @@ import textwrap
import portage
from portage import os
+from portage.dep import Atom
from portage.tests import TestCase
from portage.tests.resolver.ResolverPlayground import ResolverPlayground
+from portage.update import update_dbentry
from portage.util import ensure_dirs
+from portage.versions import _pkg_str
from portage._global_updates import _do_global_updates
class UpdateDbentryTestCase(TestCase):
def testUpdateDbentryTestCase(self):
+ cases = (
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), "1",
+ " dev-libs/A:0 ", " dev-libs/B:0 "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), "1",
+ " >=dev-libs/A-1:0 ", " >=dev-libs/B-1:0 "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), "2",
+ " dev-libs/A[foo] ", " dev-libs/B[foo] "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), "5",
+ " dev-libs/A:0/1=[foo] ", " dev-libs/B:0/1=[foo] "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), "5",
+ " dev-libs/A:0/1[foo] ", " dev-libs/B:0/1[foo] "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), "5",
+ " dev-libs/A:0/0[foo] ", " dev-libs/B:0/0[foo] "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), "5",
+ " dev-libs/A:0=[foo] ", " dev-libs/B:0=[foo] "),
+
+ (("slotmove", Atom("dev-libs/A"), "0", "1"), "1",
+ " dev-libs/A:0 ", " dev-libs/A:1 "),
+
+ (("slotmove", Atom("dev-libs/A"), "0", "1"), "1",
+ " >=dev-libs/A-1:0 ", " >=dev-libs/A-1:1 "),
+
+ (("slotmove", Atom("dev-libs/A"), "0", "1"), "5",
+ " dev-libs/A:0/1=[foo] ", " dev-libs/A:1/1=[foo] "),
+
+ (("slotmove", Atom("dev-libs/A"), "0", "1"), "5",
+ " dev-libs/A:0/1[foo] ", " dev-libs/A:1/1[foo] "),
+
+ (("slotmove", Atom("dev-libs/A"), "0", "1"), "5",
+ " dev-libs/A:0/0[foo] ", " dev-libs/A:1/1[foo] "),
+
+ (("slotmove", Atom("dev-libs/A"), "0", "1"), "5",
+ " dev-libs/A:0=[foo] ", " dev-libs/A:1=[foo] "),
+ )
+ for update_cmd, eapi, input_str, output_str in cases:
+ result = update_dbentry(update_cmd, input_str, eapi=eapi)
+ self.assertEqual(result, output_str)
+
+
+ def testUpdateDbentryBlockerTestCase(self):
+ """
+ Avoid creating self-blockers for bug #367215.
+ """
+ cases = (
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")),
+ _pkg_str("dev-libs/B-1", eapi="1", slot="0"),
+ " !dev-libs/A ", " !dev-libs/A "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")),
+ _pkg_str("dev-libs/C-1", eapi="1", slot="0"),
+ " !dev-libs/A ", " !dev-libs/B "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")),
+ _pkg_str("dev-libs/B-1", eapi="1", slot="0"),
+ " !dev-libs/A:0 ", " !dev-libs/A:0 "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")),
+ _pkg_str("dev-libs/C-1", eapi="1", slot="0"),
+ " !dev-libs/A:0 ", " !dev-libs/B:0 "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")),
+ _pkg_str("dev-libs/C-1", eapi="1", slot="0"),
+ " !>=dev-libs/A-1:0 ", " !>=dev-libs/B-1:0 "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")),
+ _pkg_str("dev-libs/B-1", eapi="1", slot="0"),
+ " !>=dev-libs/A-1:0 ", " !>=dev-libs/A-1:0 "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")),
+ _pkg_str("dev-libs/C-1", eapi="1", slot="0"),
+ " !>=dev-libs/A-1 ", " !>=dev-libs/B-1 "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")),
+ _pkg_str("dev-libs/B-1", eapi="1", slot="0"),
+ " !>=dev-libs/A-1 ", " !>=dev-libs/A-1 "),
+
+ )
+ for update_cmd, parent, input_str, output_str in cases:
+ result = update_dbentry(update_cmd, input_str, parent=parent)
+ self.assertEqual(result, output_str)
+
+ def testUpdateDbentryDbapiTestCase(self):
ebuilds = {
@@ -96,14 +189,14 @@ class UpdateDbentryTestCase(TestCase):
settings = playground.settings
trees = playground.trees
eroot = settings["EROOT"]
- portdir = settings["PORTDIR"]
+ test_repo_location = settings.repositories["test_repo"].location
portdb = trees[eroot]["porttree"].dbapi
vardb = trees[eroot]["vartree"].dbapi
bindb = trees[eroot]["bintree"].dbapi
setconfig = trees[eroot]["root_config"].setconfig
selected_set = setconfig.getSets()["selected"]
- updates_dir = os.path.join(portdir, "profiles", "updates")
+ updates_dir = os.path.join(test_repo_location, "profiles", "updates")
try:
ensure_dirs(updates_dir)
@@ -143,7 +236,7 @@ class UpdateDbentryTestCase(TestCase):
self.assertTrue(old_pattern.search(rdepend) is None)
self.assertTrue("dev-libs/M-moved" in rdepend)
- # EAPI 4-python N -> N.moved
+ # EAPI 4-python/*-progress N -> N.moved
rdepend = vardb.aux_get("dev-libs/B-1", ["RDEPEND"])[0]
old_pattern = re.compile(r"\bdev-libs/N(\s|$)")
self.assertTrue(old_pattern.search(rdepend) is None)
diff --git a/pym/portage/tests/util/test_getconfig.py b/pym/portage/tests/util/test_getconfig.py
index f13b75358..e5fd60f6d 100644
--- a/pym/portage/tests/util/test_getconfig.py
+++ b/pym/portage/tests/util/test_getconfig.py
@@ -1,13 +1,15 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import tempfile
from portage import os
+from portage import shutil
from portage import _unicode_encode
from portage.const import PORTAGE_BASE_PATH
from portage.tests import TestCase
from portage.util import getconfig
+from portage.exception import ParseError
class GetConfigTestCase(TestCase):
"""
@@ -18,8 +20,8 @@ class GetConfigTestCase(TestCase):
_cases = {
'FETCHCOMMAND' : 'wget -t 3 -T 60 --passive-ftp -O "${DISTDIR}/${FILE}" "${URI}"',
'FETCHCOMMAND_RSYNC' : 'rsync -avP "${URI}" "${DISTDIR}/${FILE}"',
- 'FETCHCOMMAND_SFTP' : 'bash -c "x=\\${2#sftp://} ; host=\\${x%%/*} ; port=\\${host##*:} ; host=\\${host%:*} ; [[ \\${host} = \\${port} ]] && port=22 ; exec sftp -P \\${port} \\"\\${host}:/\\${x#*/}\\" \\"\\$1\\"" sftp "${DISTDIR}/${FILE}" "${URI}"',
- 'FETCHCOMMAND_SSH' : 'bash -c "x=\\${2#ssh://} ; host=\\${x%%/*} ; port=\\${host##*:} ; host=\\${host%:*} ; [[ \\${host} = \\${port} ]] && port=22 ; exec rsync --rsh=\\"ssh -p\\${port}\\" -avP \\"\\${host}:/\\${x#*/}\\" \\"\\$1\\"" rsync "${DISTDIR}/${FILE}" "${URI}"',
+ 'FETCHCOMMAND_SFTP' : 'bash -c "x=\\${2#sftp://} ; host=\\${x%%/*} ; port=\\${host##*:} ; host=\\${host%:*} ; [[ \\${host} = \\${port} ]] && port=22 ; eval \\"declare -a ssh_opts=(\\${3})\\" ; exec sftp -P \\${port} \\"\\${ssh_opts[@]}\\" \\"\\${host}:/\\${x#*/}\\" \\"\\$1\\"" sftp "${DISTDIR}/${FILE}" "${URI}" "${PORTAGE_SSH_OPTS}"',
+ 'FETCHCOMMAND_SSH' : 'bash -c "x=\\${2#ssh://} ; host=\\${x%%/*} ; port=\\${host##*:} ; host=\\${host%:*} ; [[ \\${host} = \\${port} ]] && port=22 ; exec rsync --rsh=\\"ssh -p\\${port} \\${3}\\" -avP \\"\\${host}:/\\${x#*/}\\" \\"\\$1\\"" rsync "${DISTDIR}/${FILE}" "${URI}" "${PORTAGE_SSH_OPTS}"',
'PORTAGE_ELOG_MAILSUBJECT' : '[portage] ebuild log for ${PACKAGE} on ${HOST}'
}
@@ -31,6 +33,29 @@ class GetConfigTestCase(TestCase):
for k, v in self._cases.items():
self.assertEqual(d[k], v)
+ def testGetConfigSourceLex(self):
+ try:
+ tempdir = tempfile.mkdtemp()
+ make_conf_file = os.path.join(tempdir, 'make.conf')
+ with open(make_conf_file, 'w') as f:
+ f.write('source "${DIR}/sourced_file"\n')
+ sourced_file = os.path.join(tempdir, 'sourced_file')
+ with open(sourced_file, 'w') as f:
+ f.write('PASSES_SOURCING_TEST="True"\n')
+
+ d = getconfig(make_conf_file, allow_sourcing=True, expand={"DIR": tempdir})
+
+ # PASSES_SOURCING_TEST should exist in getconfig result.
+ self.assertTrue(d is not None)
+ self.assertEqual("True", d['PASSES_SOURCING_TEST'])
+
+ # With allow_sourcing=True and empty expand map, this should
+ # throw a FileNotFound exception.
+ self.assertRaisesMsg("An empty expand map should throw an exception",
+ ParseError, getconfig, make_conf_file, allow_sourcing=True, expand={})
+ finally:
+ shutil.rmtree(tempdir)
+
def testGetConfigProfileEnv(self):
# Test the mode which is used to parse /etc/env.d and /etc/profile.env.
diff --git a/pym/portage/tests/util/test_stackDictList.py b/pym/portage/tests/util/test_stackDictList.py
index 678001c38..25a723c69 100644
--- a/pym/portage/tests/util/test_stackDictList.py
+++ b/pym/portage/tests/util/test_stackDictList.py
@@ -8,10 +8,12 @@ class StackDictListTestCase(TestCase):
def testStackDictList(self):
from portage.util import stack_dictlist
-
- tests = [ ({'a':'b'},{'x':'y'},False,{'a':['b'],'x':['y']}) ]
- tests.append(( {'KEYWORDS':['alpha','x86']},{'KEYWORDS':['-*']},True,{} ))
- tests.append(( {'KEYWORDS':['alpha','x86']},{'KEYWORDS':['-x86']},True,{'KEYWORDS':['alpha']} ))
+
+ tests = [
+ ({'a': 'b'}, {'x': 'y'}, False, {'a': ['b'], 'x': ['y']}),
+ ({'KEYWORDS': ['alpha', 'x86']}, {'KEYWORDS': ['-*']}, True, {}),
+ ({'KEYWORDS': ['alpha', 'x86']}, {'KEYWORDS': ['-x86']}, True, {'KEYWORDS': ['alpha']}),
+ ]
for test in tests:
self.assertEqual(
- stack_dictlist([test[0],test[1]],incremental=test[2]), test[3] )
+ stack_dictlist([test[0], test[1]], incremental=test[2]), test[3])
diff --git a/pym/portage/tests/util/test_stackDicts.py b/pym/portage/tests/util/test_stackDicts.py
index 0d2cadd0c..0c1dcdb78 100644
--- a/pym/portage/tests/util/test_stackDicts.py
+++ b/pym/portage/tests/util/test_stackDicts.py
@@ -7,30 +7,27 @@ from portage.util import stack_dicts
class StackDictsTestCase(TestCase):
-
- def testStackDictsPass(self):
-
- tests = [ ( [ { "a":"b" }, { "b":"c" } ], { "a":"b", "b":"c" },
- False, [], False ),
- ( [ { "a":"b" }, { "a":"c" } ], { "a":"b c" },
- True, [], False ),
- ( [ { "a":"b" }, { "a":"c" } ], { "a":"b c" },
- False, ["a"], False ),
- ( [ { "a":"b" }, None ], { "a":"b" },
- False, [], True ),
- ( [ None ], {}, False, [], False ),
- ( [ None, {}], {}, False, [], True ) ]
+ def testStackDictsPass(self):
+ tests = [
+ ([{'a': 'b'}, {'b': 'c'}], {'a': 'b', 'b': 'c'}, False, [], False),
+ ([{'a': 'b'}, {'a': 'c'}], {'a': 'b c'}, True, [], False),
+ ([{'a': 'b'}, {'a': 'c'}], {'a': 'b c'}, False, ['a'], False),
+ ([{'a': 'b'}, None], {'a': 'b'}, False, [], True),
+ ([None], {}, False, [], False),
+ ([None, {}], {}, False, [], True)
+ ]
for test in tests:
- result = stack_dicts( test[0], test[2], test[3], test[4] )
- self.assertEqual( result, test[1] )
-
+ result = stack_dicts(test[0], test[2], test[3], test[4])
+ self.assertEqual(result, test[1])
+
def testStackDictsFail(self):
-
- tests = [ ( [ None, {} ], None, False, [], True ),
- ( [ { "a":"b"}, {"a":"c" } ], { "a":"b c" },
- False, [], False ) ]
+
+ tests = [
+ ([None, {}], None, False, [], True),
+ ([{'a': 'b'}, {'a': 'c'}], {'a': 'b c'}, False, [], False)
+ ]
for test in tests:
- result = stack_dicts( test[0], test[2], test[3], test[4] )
- self.assertNotEqual( result , test[1] )
+ result = stack_dicts(test[0], test[2], test[3], test[4])
+ self.assertNotEqual(result, test[1])
diff --git a/pym/portage/tests/util/test_stackLists.py b/pym/portage/tests/util/test_stackLists.py
index e52477255..3ba69ecd2 100644
--- a/pym/portage/tests/util/test_stackLists.py
+++ b/pym/portage/tests/util/test_stackLists.py
@@ -6,14 +6,16 @@ from portage.tests import TestCase
from portage.util import stack_lists
class StackListsTestCase(TestCase):
-
+
def testStackLists(self):
-
- tests = [ ( [ ['a','b','c'], ['d','e','f'] ], ['a','c','b','e','d','f'], False ),
- ( [ ['a','x'], ['b','x'] ], ['a','x','b'], False ),
- ( [ ['a','b','c'], ['-*'] ], [], True ),
- ( [ ['a'], ['-a'] ], [], True ) ]
+
+ tests = [
+ ([['a', 'b', 'c'], ['d', 'e', 'f']], ['a', 'c', 'b', 'e', 'd', 'f'], False),
+ ([['a', 'x'], ['b', 'x']], ['a', 'x', 'b'], False),
+ ([['a', 'b', 'c'], ['-*']], [], True),
+ ([['a'], ['-a']], [], True)
+ ]
for test in tests:
- result = stack_lists( test[0], test[2] )
- self.assertEqual( set(result) , set(test[1]) )
+ result = stack_lists(test[0], test[2])
+ self.assertEqual(set(result), set(test[1]))
diff --git a/pym/portage/tests/util/test_uniqueArray.py b/pym/portage/tests/util/test_uniqueArray.py
index e23428c31..aae88cce8 100644
--- a/pym/portage/tests/util/test_uniqueArray.py
+++ b/pym/portage/tests/util/test_uniqueArray.py
@@ -7,18 +7,20 @@ from portage.tests import TestCase
from portage.util import unique_array
class UniqueArrayTestCase(TestCase):
-
+
def testUniqueArrayPass(self):
"""
test portage.util.uniqueArray()
"""
- tests = [ ( ["a","a","a",os,os,[],[],[]], ['a',os,[]] ),
- ( [1,1,1,2,3,4,4] , [1,2,3,4]) ]
+ tests = [
+ (['a', 'a', 'a', os, os, [], [], []], ['a', os, []]),
+ ([1, 1, 1, 2, 3, 4, 4], [1, 2, 3, 4])
+ ]
for test in tests:
- result = unique_array( test[0] )
+ result = unique_array(test[0])
for item in test[1]:
number = result.count(item)
- self.assertFalse( number != 1, msg=("%s contains %s of %s, "
- "should be only 1") % (result, number, item) )
+ self.assertFalse(number != 1, msg=("%s contains %s of %s, "
+ "should be only 1") % (result, number, item))
diff --git a/pym/portage/tests/util/test_varExpand.py b/pym/portage/tests/util/test_varExpand.py
index 7b528d6db..498b50ead 100644
--- a/pym/portage/tests/util/test_varExpand.py
+++ b/pym/portage/tests/util/test_varExpand.py
@@ -6,20 +6,20 @@ from portage.tests import TestCase
from portage.util import varexpand
class VarExpandTestCase(TestCase):
-
+
def testVarExpandPass(self):
- varDict = { "a":"5", "b":"7", "c":"-5" }
+ varDict = {"a": "5", "b": "7", "c": "-5"}
for key in varDict:
- result = varexpand( "$%s" % key, varDict )
-
- self.assertFalse( result != varDict[key],
- msg="Got %s != %s, from varexpand( %s, %s )" % \
- ( result, varDict[key], "$%s" % key, varDict ) )
- result = varexpand( "${%s}" % key, varDict )
- self.assertFalse( result != varDict[key],
- msg="Got %s != %s, from varexpand( %s, %s )" % \
- ( result, varDict[key], "${%s}" % key, varDict ) )
+ result = varexpand("$%s" % key, varDict)
+
+ self.assertFalse(result != varDict[key],
+ msg="Got %s != %s, from varexpand(%s, %s)" %
+ (result, varDict[key], "$%s" % key, varDict))
+ result = varexpand("${%s}" % key, varDict)
+ self.assertFalse(result != varDict[key],
+ msg="Got %s != %s, from varexpand(%s, %s)" %
+ (result, varDict[key], "${%s}" % key, varDict))
def testVarExpandBackslashes(self):
"""
@@ -49,44 +49,44 @@ class VarExpandTestCase(TestCase):
("\\'", "\\'"),
]
for test in tests:
- result = varexpand( test[0], varDict )
- self.assertFalse( result != test[1],
- msg="Got %s != %s from varexpand( %s, %s )" \
- % ( result, test[1], test[0], varDict ) )
+ result = varexpand(test[0], varDict)
+ self.assertFalse(result != test[1],
+ msg="Got %s != %s from varexpand(%s, %s)"
+ % (result, test[1], test[0], varDict))
def testVarExpandDoubleQuotes(self):
-
- varDict = { "a":"5" }
- tests = [ ("\"${a}\"", "\"5\"") ]
+
+ varDict = {"a": "5"}
+ tests = [("\"${a}\"", "\"5\"")]
for test in tests:
- result = varexpand( test[0], varDict )
- self.assertFalse( result != test[1],
- msg="Got %s != %s from varexpand( %s, %s )" \
- % ( result, test[1], test[0], varDict ) )
+ result = varexpand(test[0], varDict)
+ self.assertFalse(result != test[1],
+ msg="Got %s != %s from varexpand(%s, %s)"
+ % (result, test[1], test[0], varDict))
def testVarExpandSingleQuotes(self):
-
- varDict = { "a":"5" }
- tests = [ ("\'${a}\'", "\'${a}\'") ]
+
+ varDict = {"a": "5"}
+ tests = [("\'${a}\'", "\'${a}\'")]
for test in tests:
- result = varexpand( test[0], varDict )
- self.assertFalse( result != test[1],
- msg="Got %s != %s from varexpand( %s, %s )" \
- % ( result, test[1], test[0], varDict ) )
+ result = varexpand(test[0], varDict)
+ self.assertFalse(result != test[1],
+ msg="Got %s != %s from varexpand(%s, %s)"
+ % (result, test[1], test[0], varDict))
def testVarExpandFail(self):
- varDict = { "a":"5", "b":"7", "c":"15" }
+ varDict = {"a": "5", "b": "7", "c": "15"}
- testVars = [ "fail" ]
+ testVars = ["fail"]
for var in testVars:
- result = varexpand( "$%s" % var, varDict )
- self.assertFalse( len(result),
- msg="Got %s == %s, from varexpand( %s, %s )" \
- % ( result, var, "$%s" % var, varDict ) )
-
- result = varexpand( "${%s}" % var, varDict )
- self.assertFalse( len(result),
- msg="Got %s == %s, from varexpand( %s, %s )" \
- % ( result, var, "${%s}" % var, varDict ) )
+ result = varexpand("$%s" % var, varDict)
+ self.assertFalse(len(result),
+ msg="Got %s == %s, from varexpand(%s, %s)"
+ % (result, var, "$%s" % var, varDict))
+
+ result = varexpand("${%s}" % var, varDict)
+ self.assertFalse(len(result),
+ msg="Got %s == %s, from varexpand(%s, %s)"
+ % (result, var, "${%s}" % var, varDict))
diff --git a/pym/portage/tests/util/test_whirlpool.py b/pym/portage/tests/util/test_whirlpool.py
index dd0de899a..fbe7cae56 100644
--- a/pym/portage/tests/util/test_whirlpool.py
+++ b/pym/portage/tests/util/test_whirlpool.py
@@ -1,4 +1,4 @@
-# Copyright 2011 Gentoo Foundation
+# Copyright 2011-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import subprocess
@@ -11,6 +11,6 @@ from portage.tests import TestCase
class WhirlpoolTestCase(TestCase):
def testBundledWhirlpool(self):
# execute the tests bundled with the whirlpool module
- retval = subprocess.call([portage._python_interpreter, "-Wd",
+ retval = subprocess.call([portage._python_interpreter, "-b", "-Wd",
os.path.join(PORTAGE_PYM_PATH, "portage/util/whirlpool.py")])
self.assertEqual(retval, os.EX_OK)
diff --git a/pym/portage/tests/versions/test_cpv_sort_key.py b/pym/portage/tests/versions/test_cpv_sort_key.py
index a223d78c7..eeb0eae69 100644
--- a/pym/portage/tests/versions/test_cpv_sort_key.py
+++ b/pym/portage/tests/versions/test_cpv_sort_key.py
@@ -8,9 +8,10 @@ class CpvSortKeyTestCase(TestCase):
def testCpvSortKey(self):
- tests = [ (("a/b-2_alpha", "a", "b", "a/b-2", "a/a-1", "a/b-1"),
- ( "a", "a/a-1", "a/b-1", "a/b-2_alpha", "a/b-2", "b")),
+ tests = [
+ (("a/b-2_alpha", "a", "b", "a/b-2", "a/a-1", "a/b-1"),
+ ("a", "a/a-1", "a/b-1", "a/b-2_alpha", "a/b-2", "b")),
]
for test in tests:
- self.assertEqual( tuple(sorted(test[0], key=cpv_sort_key())), test[1] )
+ self.assertEqual(tuple(sorted(test[0], key=cpv_sort_key())), test[1])
diff --git a/pym/portage/tests/versions/test_vercmp.py b/pym/portage/tests/versions/test_vercmp.py
index aa7969ce8..78fe7ede8 100644
--- a/pym/portage/tests/versions/test_vercmp.py
+++ b/pym/portage/tests/versions/test_vercmp.py
@@ -8,10 +8,11 @@ from portage.versions import vercmp
class VerCmpTestCase(TestCase):
""" A simple testCase for portage.versions.vercmp()
"""
-
+
def testVerCmpGreater(self):
-
- tests = [ ( "6.0", "5.0"), ("5.0","5"),
+
+ tests = [
+ ("6.0", "5.0"), ("5.0", "5"),
("1.0-r1", "1.0-r0"),
("1.0-r1", "1.0"),
("cvs.9999", "9999"),
@@ -24,14 +25,15 @@ class VerCmpTestCase(TestCase):
("12.2.5", "12.2b"),
]
for test in tests:
- self.assertFalse( vercmp( test[0], test[1] ) <= 0, msg="%s < %s? Wrong!" % (test[0],test[1]) )
+ self.assertFalse(vercmp(test[0], test[1]) <= 0, msg="%s < %s? Wrong!" % (test[0], test[1]))
def testVerCmpLess(self):
"""
pre < alpha < beta < rc < p -> test each of these, they are inductive (or should be..)
"""
- tests = [ ( "4.0", "5.0"), ("5", "5.0"), ("1.0_pre2","1.0_p2"),
- ("1.0_alpha2", "1.0_p2"),("1.0_alpha1", "1.0_beta1"),("1.0_beta3","1.0_rc3"),
+ tests = [
+ ("4.0", "5.0"), ("5", "5.0"), ("1.0_pre2", "1.0_p2"),
+ ("1.0_alpha2", "1.0_p2"), ("1.0_alpha1", "1.0_beta1"), ("1.0_beta3", "1.0_rc3"),
("1.001000000000000000001", "1.001000000000000000002"),
("1.00100000000", "1.0010000000000000001"),
("9999", "cvs.9999"),
@@ -47,23 +49,25 @@ class VerCmpTestCase(TestCase):
("12.2b", "12.2.5"),
]
for test in tests:
- self.assertFalse( vercmp( test[0], test[1]) >= 0, msg="%s > %s? Wrong!" % (test[0],test[1]))
-
-
+ self.assertFalse(vercmp(test[0], test[1]) >= 0, msg="%s > %s? Wrong!" % (test[0], test[1]))
+
def testVerCmpEqual(self):
-
- tests = [ ("4.0", "4.0"),
+
+ tests = [
+ ("4.0", "4.0"),
("1.0", "1.0"),
("1.0-r0", "1.0"),
("1.0", "1.0-r0"),
("1.0-r0", "1.0-r0"),
- ("1.0-r1", "1.0-r1")]
+ ("1.0-r1", "1.0-r1")
+ ]
for test in tests:
- self.assertFalse( vercmp( test[0], test[1]) != 0, msg="%s != %s? Wrong!" % (test[0],test[1]))
-
+ self.assertFalse(vercmp(test[0], test[1]) != 0, msg="%s != %s? Wrong!" % (test[0], test[1]))
+
def testVerNotEqual(self):
-
- tests = [ ("1","2"),("1.0_alpha","1.0_pre"),("1.0_beta","1.0_alpha"),
+
+ tests = [
+ ("1", "2"), ("1.0_alpha", "1.0_pre"), ("1.0_beta", "1.0_alpha"),
("0", "0.0"),
("cvs.9999", "9999"),
("1.0-r0", "1.0-r1"),
@@ -77,4 +81,4 @@ class VerCmpTestCase(TestCase):
("12.2b", "12.2"),
]
for test in tests:
- self.assertFalse( vercmp( test[0], test[1]) == 0, msg="%s == %s? Wrong!" % (test[0],test[1]))
+ self.assertFalse(vercmp(test[0], test[1]) == 0, msg="%s == %s? Wrong!" % (test[0], test[1]))
diff --git a/pym/portage/update.py b/pym/portage/update.py
index 121e95720..df4e11b54 100644
--- a/pym/portage/update.py
+++ b/pym/portage/update.py
@@ -1,11 +1,14 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
import errno
import io
import re
import stat
import sys
+import warnings
from portage import os
from portage import _encodings
@@ -13,21 +16,19 @@ from portage import _unicode_decode
from portage import _unicode_encode
import portage
portage.proxy.lazyimport.lazyimport(globals(),
- 'portage.dep:Atom,dep_getkey,isvalidatom,' + \
- 'remove_slot',
+ 'portage.dep:Atom,dep_getkey,isvalidatom,match_from_list',
'portage.util:ConfigProtect,new_protect_filename,' + \
'normalize_path,write_atomic,writemsg',
- 'portage.util.listdir:_ignorecvs_dirs',
- 'portage.versions:catsplit,ververify'
+ 'portage.versions:_get_slot_re',
)
-from portage.const import USER_CONFIG_PATH
-from portage.dep import _get_slot_re
+from portage.const import USER_CONFIG_PATH, VCS_DIRS
from portage.eapi import _get_eapi_attrs
from portage.exception import DirectoryNotFound, InvalidAtom, PortageException
from portage.localization import _
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
long = int
_unicode = str
else:
@@ -35,7 +36,10 @@ else:
ignored_dbentries = ("CONTENTS", "environment.bz2")
-def update_dbentry(update_cmd, mycontent, eapi=None):
+def update_dbentry(update_cmd, mycontent, eapi=None, parent=None):
+
+ if parent is not None:
+ eapi = parent.eapi
if update_cmd[0] == "move":
old_value = _unicode(update_cmd[1])
@@ -44,28 +48,76 @@ def update_dbentry(update_cmd, mycontent, eapi=None):
# Use isvalidatom() to check if this move is valid for the
# EAPI (characters allowed in package names may vary).
if old_value in mycontent and isvalidatom(new_value, eapi=eapi):
- old_value = re.escape(old_value);
- mycontent = re.sub(old_value+"(:|$|\\s)", new_value+"\\1", mycontent)
- def myreplace(matchobj):
- # Strip slot and * operator if necessary
- # so that ververify works.
- ver = remove_slot(matchobj.group(2))
- ver = ver.rstrip("*")
- if ververify(ver):
- return "%s-%s" % (new_value, matchobj.group(2))
- else:
- return "".join(matchobj.groups())
- mycontent = re.sub("(%s-)(\\S*)" % old_value, myreplace, mycontent)
+ # this split preserves existing whitespace
+ split_content = re.split(r'(\s+)', mycontent)
+ modified = False
+ for i, token in enumerate(split_content):
+ if old_value not in token:
+ continue
+ try:
+ atom = Atom(token, eapi=eapi)
+ except InvalidAtom:
+ continue
+ if atom.cp != old_value:
+ continue
+
+ new_atom = Atom(token.replace(old_value, new_value, 1),
+ eapi=eapi)
+
+ # Avoid creating self-blockers for bug #367215.
+ if new_atom.blocker and parent is not None and \
+ parent.cp == new_atom.cp and \
+ match_from_list(new_atom, [parent]):
+ continue
+
+ split_content[i] = _unicode(new_atom)
+ modified = True
+
+ if modified:
+ mycontent = "".join(split_content)
+
elif update_cmd[0] == "slotmove" and update_cmd[1].operator is None:
- pkg, origslot, newslot = update_cmd[1:]
- old_value = "%s:%s" % (pkg, origslot)
- if old_value in mycontent:
- old_value = re.escape(old_value)
- new_value = "%s:%s" % (pkg, newslot)
- mycontent = re.sub(old_value+"($|\\s)", new_value+"\\1", mycontent)
+ orig_atom, origslot, newslot = update_cmd[1:]
+ orig_cp = orig_atom.cp
+
+ # We don't support versioned slotmove atoms here, since it can be
+ # difficult to determine if the version constraints really match
+ # the atoms that we're trying to update.
+ if orig_atom.version is None and orig_cp in mycontent:
+ # this split preserves existing whitespace
+ split_content = re.split(r'(\s+)', mycontent)
+ modified = False
+ for i, token in enumerate(split_content):
+ if orig_cp not in token:
+ continue
+ try:
+ atom = Atom(token, eapi=eapi)
+ except InvalidAtom:
+ continue
+ if atom.cp != orig_cp:
+ continue
+ if atom.slot is None or atom.slot != origslot:
+ continue
+
+ slot_part = newslot
+ if atom.sub_slot is not None:
+ if atom.sub_slot == origslot:
+ sub_slot = newslot
+ else:
+ sub_slot = atom.sub_slot
+ slot_part += "/" + sub_slot
+ if atom.slot_operator is not None:
+ slot_part += atom.slot_operator
+
+ split_content[i] = atom.with_slot(slot_part)
+ modified = True
+
+ if modified:
+ mycontent = "".join(split_content)
+
return mycontent
-def update_dbentries(update_iter, mydata, eapi=None):
+def update_dbentries(update_iter, mydata, eapi=None, parent=None):
"""Performs update commands and returns a
dict containing only the updated items."""
updated_items = {}
@@ -79,7 +131,8 @@ def update_dbentries(update_iter, mydata, eapi=None):
is_encoded = mycontent is not orig_content
orig_content = mycontent
for update_cmd in update_iter:
- mycontent = update_dbentry(update_cmd, mycontent, eapi=eapi)
+ mycontent = update_dbentry(update_cmd, mycontent,
+ eapi=eapi, parent=parent)
if mycontent != orig_content:
if is_encoded:
mycontent = _unicode_encode(mycontent,
@@ -88,10 +141,14 @@ def update_dbentries(update_iter, mydata, eapi=None):
updated_items[k] = mycontent
return updated_items
-def fixdbentries(update_iter, dbdir, eapi=None):
+def fixdbentries(update_iter, dbdir, eapi=None, parent=None):
"""Performs update commands which result in search and replace operations
for each of the files in dbdir (excluding CONTENTS and environment.bz2).
Returns True when actual modifications are necessary and False otherwise."""
+
+ warnings.warn("portage.update.fixdbentries() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
mydata = {}
for myfile in [f for f in os.listdir(dbdir) if f not in ignored_dbentries]:
file_path = os.path.join(dbdir, myfile)
@@ -100,7 +157,8 @@ def fixdbentries(update_iter, dbdir, eapi=None):
mode='r', encoding=_encodings['repo.content'],
errors='replace') as f:
mydata[myfile] = f.read()
- updated_items = update_dbentries(update_iter, mydata, eapi=eapi)
+ updated_items = update_dbentries(update_iter, mydata,
+ eapi=eapi, parent=parent)
for myfile, mycontent in updated_items.items():
file_path = os.path.join(dbdir, myfile)
write_atomic(file_path, mycontent, encoding=_encodings['repo.content'])
@@ -225,7 +283,8 @@ def parse_updates(mycontent):
return myupd, errors
def update_config_files(config_root, protect, protect_mask, update_iter, match_callback = None):
- """Perform global updates on /etc/portage/package.*.
+ """Perform global updates on /etc/portage/package.*, /etc/portage/profile/package.*,
+ /etc/portage/profile/packages and /etc/portage/sets.
config_root - location of files to update
protect - list of paths from CONFIG_PROTECT
protect_mask - list of paths from CONFIG_PROTECT_MASK
@@ -248,9 +307,15 @@ def update_config_files(config_root, protect, protect_mask, update_iter, match_c
"package.accept_keywords", "package.env",
"package.keywords", "package.license",
"package.mask", "package.properties",
- "package.unmask", "package.use"
+ "package.unmask", "package.use", "sets"
]
- myxfiles += [os.path.join("profile", x) for x in myxfiles]
+ myxfiles += [os.path.join("profile", x) for x in (
+ "packages", "package.accept_keywords",
+ "package.keywords", "package.mask",
+ "package.unmask", "package.use",
+ "package.use.force", "package.use.mask",
+ "package.use.stable.force", "package.use.stable.mask"
+ )]
abs_user_config = os.path.join(config_root, USER_CONFIG_PATH)
recursivefiles = []
for x in myxfiles:
@@ -269,7 +334,7 @@ def update_config_files(config_root, protect, protect_mask, update_iter, match_c
except UnicodeDecodeError:
dirs.remove(y_enc)
continue
- if y.startswith(".") or y in _ignorecvs_dirs:
+ if y.startswith(".") or y in VCS_DIRS:
dirs.remove(y_enc)
for y in files:
try:
@@ -299,7 +364,6 @@ def update_config_files(config_root, protect, protect_mask, update_iter, match_c
if f is not None:
f.close()
- # update /etc/portage/packages.*
ignore_line_re = re.compile(r'^#|^\s*$')
if repo_dict is None:
update_items = [(None, update_iter)]
@@ -319,6 +383,9 @@ def update_config_files(config_root, protect, protect_mask, update_iter, match_c
if atom[:1] == "-":
# package.mask supports incrementals
atom = atom[1:]
+ if atom[:1] == "*":
+ # packages file supports "*"-prefixed atoms as indication of system packages.
+ atom = atom[1:]
if not isvalidatom(atom):
continue
new_atom = update_dbentry(update_cmd, atom)
diff --git a/pym/portage/util/ExtractKernelVersion.py b/pym/portage/util/ExtractKernelVersion.py
index 69bd58a68..af4a4fe63 100644
--- a/pym/portage/util/ExtractKernelVersion.py
+++ b/pym/portage/util/ExtractKernelVersion.py
@@ -61,18 +61,18 @@ def ExtractKernelVersion(base_dir):
# Grab a list of files named localversion* and sort them
localversions = os.listdir(base_dir)
- for x in range(len(localversions)-1,-1,-1):
+ for x in range(len(localversions) - 1, -1, -1):
if localversions[x][:12] != "localversion":
del localversions[x]
localversions.sort()
# Append the contents of each to the version string, stripping ALL whitespace
for lv in localversions:
- version += "".join( " ".join( grabfile( base_dir+ "/" + lv ) ).split() )
+ version += "".join(" ".join(grabfile(base_dir + "/" + lv)).split())
# Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
kernelconfig = getconfig(base_dir+"/.config")
if kernelconfig and "CONFIG_LOCALVERSION" in kernelconfig:
version += "".join(kernelconfig["CONFIG_LOCALVERSION"].split())
- return (version,None)
+ return (version, None)
diff --git a/pym/portage/util/SlotObject.py b/pym/portage/util/SlotObject.py
index a59dfc199..4bb682258 100644
--- a/pym/portage/util/SlotObject.py
+++ b/pym/portage/util/SlotObject.py
@@ -48,4 +48,3 @@ class SlotObject(object):
setattr(obj, myattr, getattr(self, myattr))
return obj
-
diff --git a/pym/portage/util/_ShelveUnicodeWrapper.py b/pym/portage/util/_ShelveUnicodeWrapper.py
new file mode 100644
index 000000000..adbd5199f
--- /dev/null
+++ b/pym/portage/util/_ShelveUnicodeWrapper.py
@@ -0,0 +1,45 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+class ShelveUnicodeWrapper(object):
+ """
+ Convert unicode to str and back again, since python-2.x shelve
+ module doesn't support unicode.
+ """
+ def __init__(self, shelve_instance):
+ self._shelve = shelve_instance
+
+ def _encode(self, s):
+ if isinstance(s, unicode):
+ s = s.encode('utf_8')
+ return s
+
+ def __len__(self):
+ return len(self._shelve)
+
+ def __contains__(self, k):
+ return self._encode(k) in self._shelve
+
+ def __iter__(self):
+ return self._shelve.__iter__()
+
+ def items(self):
+ return self._shelve.iteritems()
+
+ def __setitem__(self, k, v):
+ self._shelve[self._encode(k)] = self._encode(v)
+
+ def __getitem__(self, k):
+ return self._shelve[self._encode(k)]
+
+ def __delitem__(self, k):
+ del self._shelve[self._encode(k)]
+
+ def get(self, k, *args):
+ return self._shelve.get(self._encode(k), *args)
+
+ def close(self):
+ self._shelve.close()
+
+ def clear(self):
+ self._shelve.clear()
diff --git a/pym/portage/util/__init__.py b/pym/portage/util/__init__.py
index 4645be52f..614b2b388 100644
--- a/pym/portage/util/__init__.py
+++ b/pym/portage/util/__init__.py
@@ -1,6 +1,8 @@
-# Copyright 2004-2012 Gentoo Foundation
+# Copyright 2004-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = ['apply_permissions', 'apply_recursive_permissions',
'apply_secpass_permissions', 'apply_stat_permissions', 'atomic_ofstream',
'cmp_sort_key', 'ConfigProtect', 'dump_traceback', 'ensure_dirs',
@@ -31,21 +33,26 @@ import portage
portage.proxy.lazyimport.lazyimport(globals(),
'pickle',
'portage.dep:Atom',
- 'portage.util.listdir:_ignorecvs_dirs'
+ 'subprocess',
)
from portage import os
-from portage import subprocess_getstatusoutput
from portage import _encodings
from portage import _os_merge
from portage import _unicode_encode
from portage import _unicode_decode
+from portage.const import VCS_DIRS
from portage.exception import InvalidAtom, PortageException, FileNotFound, \
OperationNotPermitted, ParseError, PermissionDenied, ReadOnlyFileSystem
from portage.localization import _
from portage.proxy.objectproxy import ObjectProxy
from portage.cache.mappings import UserDict
+if sys.hexversion >= 0x3000000:
+ _unicode = str
+else:
+ _unicode = unicode
+
noiselimit = 0
def initialize_logger(level=logging.WARN):
@@ -57,7 +64,7 @@ def initialize_logger(level=logging.WARN):
"""
logging.basicConfig(level=logging.WARN, format='[%(levelname)-4s] %(message)s')
-def writemsg(mystr,noiselevel=0,fd=None):
+def writemsg(mystr, noiselevel=0, fd=None):
"""Prints out warning and debug messages based on the noiselimit setting"""
global noiselimit
if fd is None:
@@ -75,7 +82,7 @@ def writemsg(mystr,noiselevel=0,fd=None):
fd.write(mystr)
fd.flush()
-def writemsg_stdout(mystr,noiselevel=0):
+def writemsg_stdout(mystr, noiselevel=0):
"""Prints messages stdout based on the noiselimit setting"""
writemsg(mystr, noiselevel=noiselevel, fd=sys.stdout)
@@ -100,7 +107,7 @@ def writemsg_level(msg, level=0, noiselevel=0):
writemsg(msg, noiselevel=noiselevel, fd=fd)
def normalize_path(mypath):
- """
+ """
os.path.normpath("//foo") returns "//foo" instead of "/foo"
We dislike this behavior so we create our own normpath func
to fix it.
@@ -120,8 +127,8 @@ def grabfile(myfilename, compat_level=0, recursive=0, remember_source_file=False
"""This function grabs the lines in a file, normalizes whitespace and returns lines in a list; if a line
begins with a #, it is ignored, as are empty lines"""
- mylines=grablines(myfilename, recursive, remember_source_file=True)
- newlines=[]
+ mylines = grablines(myfilename, recursive, remember_source_file=True)
+ newlines = []
for x, source_file in mylines:
#the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
@@ -139,10 +146,10 @@ def grabfile(myfilename, compat_level=0, recursive=0, remember_source_file=False
myline = " ".join(myline)
if not myline:
continue
- if myline[0]=="#":
+ if myline[0] == "#":
# Check if we have a compat-level string. BC-integration data.
# '##COMPAT==>N<==' 'some string attached to it'
- mylinetest = myline.split("<==",1)
+ mylinetest = myline.split("<==", 1)
if len(mylinetest) == 2:
myline_potential = mylinetest[1]
mylinetest = mylinetest[0].split("##COMPAT==>")
@@ -159,7 +166,7 @@ def grabfile(myfilename, compat_level=0, recursive=0, remember_source_file=False
newlines.append(myline)
return newlines
-def map_dictlist_vals(func,myDict):
+def map_dictlist_vals(func, myDict):
"""Performs a function on each value of each key in a dictlist.
Returns a new dictlist."""
new_dl = {}
@@ -173,7 +180,7 @@ def stack_dictlist(original_dicts, incremental=0, incrementals=[], ignore_none=0
Stacks an array of dict-types into one array. Optionally merging or
overwriting matching key/value pairs for the dict[key]->list.
Returns a single dict. Higher index in lists is preferenced.
-
+
Example usage:
>>> from portage.util import stack_dictlist
>>> print stack_dictlist( [{'a':'b'},{'x':'y'}])
@@ -188,7 +195,7 @@ def stack_dictlist(original_dicts, incremental=0, incrementals=[], ignore_none=0
>>> { 'KEYWORDS':['alpha'] }
>>> print stack_dictlist( [a,b], incrementals=['KEYWORDS'])
>>> { 'KEYWORDS':['alpha'] }
-
+
@param original_dicts a list of (dictionary objects or None)
@type list
@param incremental True or false depending on whether new keys should overwrite
@@ -199,7 +206,7 @@ def stack_dictlist(original_dicts, incremental=0, incrementals=[], ignore_none=0
@type list
@param ignore_none Appears to be ignored, but probably was used long long ago.
@type boolean
-
+
"""
final_dict = {}
for mydict in original_dicts:
@@ -208,7 +215,7 @@ def stack_dictlist(original_dicts, incremental=0, incrementals=[], ignore_none=0
for y in mydict:
if not y in final_dict:
final_dict[y] = []
-
+
for thing in mydict[y]:
if thing:
if incremental or y in incrementals:
@@ -245,12 +252,13 @@ def stack_dicts(dicts, incremental=0, incrementals=[], ignore_none=0):
def append_repo(atom_list, repo_name, remember_source_file=False):
"""
Takes a list of valid atoms without repo spec and appends ::repo_name.
+ If an atom already has a repo part, then it is preserved (see bug #461948).
"""
if remember_source_file:
- return [(Atom(atom + "::" + repo_name, allow_wildcard=True, allow_repo=True), source) \
+ return [(atom.repo is not None and atom or atom.with_repo(repo_name), source) \
for atom, source in atom_list]
else:
- return [Atom(atom + "::" + repo_name, allow_wildcard=True, allow_repo=True) \
+ return [atom.repo is not None and atom or atom.with_repo(repo_name) \
for atom in atom_list]
def stack_lists(lists, incremental=1, remember_source_file=False,
@@ -334,7 +342,7 @@ def stack_lists(lists, incremental=1, remember_source_file=False,
def grabdict(myfilename, juststrings=0, empty=0, recursive=0, incremental=1):
"""
This function grabs the lines in a file, normalizes whitespace and returns lines in a dictionary
-
+
@param myfilename: file to process
@type myfilename: string (path)
@param juststrings: only return strings
@@ -350,9 +358,9 @@ def grabdict(myfilename, juststrings=0, empty=0, recursive=0, incremental=1):
1. Returns the lines in a file in a dictionary, for example:
'sys-apps/portage x86 amd64 ppc'
would return
- { "sys-apps/portage" : [ 'x86', 'amd64', 'ppc' ]
+ {"sys-apps/portage" : ['x86', 'amd64', 'ppc']}
"""
- newdict={}
+ newdict = {}
for x in grablines(myfilename, recursive):
#the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
#into single spaces.
@@ -379,52 +387,75 @@ def grabdict(myfilename, juststrings=0, empty=0, recursive=0, incremental=1):
newdict[k] = " ".join(v)
return newdict
-def read_corresponding_eapi_file(filename):
+_eapi_cache = {}
+
+def read_corresponding_eapi_file(filename, default="0"):
"""
Read the 'eapi' file from the directory 'filename' is in.
Returns "0" if the file is not present or invalid.
"""
- default = "0"
eapi_file = os.path.join(os.path.dirname(filename), "eapi")
try:
- f = io.open(_unicode_encode(eapi_file,
+ eapi = _eapi_cache[eapi_file]
+ except KeyError:
+ pass
+ else:
+ if eapi is None:
+ return default
+ return eapi
+
+ eapi = None
+ try:
+ with io.open(_unicode_encode(eapi_file,
encoding=_encodings['fs'], errors='strict'),
- mode='r', encoding=_encodings['repo.content'], errors='replace')
- lines = f.readlines()
+ mode='r', encoding=_encodings['repo.content'], errors='replace') as f:
+ lines = f.readlines()
if len(lines) == 1:
eapi = lines[0].rstrip("\n")
else:
writemsg(_("--- Invalid 'eapi' file (doesn't contain exactly one line): %s\n") % (eapi_file),
noiselevel=-1)
- eapi = default
- f.close()
except IOError:
- eapi = default
+ pass
+ _eapi_cache[eapi_file] = eapi
+ if eapi is None:
+ return default
return eapi
def grabdict_package(myfilename, juststrings=0, recursive=0, allow_wildcard=False, allow_repo=False,
verify_eapi=False, eapi=None):
""" Does the same thing as grabdict except it validates keys
with isvalidatom()"""
- pkgs=grabdict(myfilename, juststrings, empty=1, recursive=recursive)
- if not pkgs:
- return pkgs
- if verify_eapi and eapi is None:
- eapi = read_corresponding_eapi_file(myfilename)
- # We need to call keys() here in order to avoid the possibility of
- # "RuntimeError: dictionary changed size during iteration"
- # when an invalid atom is deleted.
+ if recursive:
+ file_list = _recursive_file_list(myfilename)
+ else:
+ file_list = [myfilename]
+
atoms = {}
- for k, v in pkgs.items():
- try:
- k = Atom(k, allow_wildcard=allow_wildcard, allow_repo=allow_repo, eapi=eapi)
- except InvalidAtom as e:
- writemsg(_("--- Invalid atom in %s: %s\n") % (myfilename, e),
- noiselevel=-1)
- else:
- atoms[k] = v
+ for filename in file_list:
+ d = grabdict(filename, juststrings=False,
+ empty=True, recursive=False, incremental=True)
+ if not d:
+ continue
+ if verify_eapi and eapi is None:
+ eapi = read_corresponding_eapi_file(myfilename)
+
+ for k, v in d.items():
+ try:
+ k = Atom(k, allow_wildcard=allow_wildcard,
+ allow_repo=allow_repo, eapi=eapi)
+ except InvalidAtom as e:
+ writemsg(_("--- Invalid atom in %s: %s\n") % (filename, e),
+ noiselevel=-1)
+ else:
+ atoms.setdefault(k, []).extend(v)
+
+ if juststrings:
+ for k, v in atoms.items():
+ atoms[k] = " ".join(v)
+
return atoms
def grabfile_package(myfilename, compatlevel=0, recursive=0, allow_wildcard=False, allow_repo=False,
@@ -450,7 +481,7 @@ def grabfile_package(myfilename, compatlevel=0, recursive=0, allow_wildcard=Fals
writemsg(_("--- Invalid atom in %s: %s\n") % (source_file, e),
noiselevel=-1)
else:
- if pkg_orig == str(pkg):
+ if pkg_orig == _unicode(pkg):
# normal atom, so return as Atom instance
if remember_source_file:
atoms.append((pkg, source_file))
@@ -464,35 +495,63 @@ def grabfile_package(myfilename, compatlevel=0, recursive=0, allow_wildcard=Fals
atoms.append(pkg_orig)
return atoms
-def grablines(myfilename, recursive=0, remember_source_file=False):
- mylines=[]
- if recursive and os.path.isdir(myfilename):
- if os.path.basename(myfilename) in _ignorecvs_dirs:
- return mylines
+def _recursive_basename_filter(f):
+ return not f.startswith(".") and not f.endswith("~")
+
+def _recursive_file_list(path):
+ # path may be a regular file or a directory
+
+ def onerror(e):
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied(path)
+
+ stack = [os.path.split(path)]
+
+ while stack:
+ parent, fname = stack.pop()
+ fullpath = os.path.join(parent, fname)
+
try:
- dirlist = os.listdir(myfilename)
+ st = os.stat(fullpath)
except OSError as e:
- if e.errno == PermissionDenied.errno:
- raise PermissionDenied(myfilename)
- elif e.errno in (errno.ENOENT, errno.ESTALE):
- return mylines
- else:
- raise
- dirlist.sort()
- for f in dirlist:
- if not f.startswith(".") and not f.endswith("~"):
- mylines.extend(grablines(
- os.path.join(myfilename, f), recursive, remember_source_file))
+ onerror(e)
+ continue
+
+ if stat.S_ISDIR(st.st_mode):
+ if fname in VCS_DIRS or not _recursive_basename_filter(fname):
+ continue
+ try:
+ children = os.listdir(fullpath)
+ except OSError as e:
+ onerror(e)
+ continue
+
+ # Sort in reverse, since we pop from the end of the stack.
+ # Include regular files in the stack, so files are sorted
+ # together with directories.
+ children.sort(reverse=True)
+ stack.extend((fullpath, x) for x in children)
+
+ elif stat.S_ISREG(st.st_mode):
+ if _recursive_basename_filter(fname):
+ yield fullpath
+
+def grablines(myfilename, recursive=0, remember_source_file=False):
+ mylines = []
+ if recursive:
+ for f in _recursive_file_list(myfilename):
+ mylines.extend(grablines(f, recursive=False,
+ remember_source_file=remember_source_file))
+
else:
try:
- myfile = io.open(_unicode_encode(myfilename,
+ with io.open(_unicode_encode(myfilename,
encoding=_encodings['fs'], errors='strict'),
- mode='r', encoding=_encodings['content'], errors='replace')
- if remember_source_file:
- mylines = [(line, myfilename) for line in myfile.readlines()]
- else:
- mylines = myfile.readlines()
- myfile.close()
+ mode='r', encoding=_encodings['content'], errors='replace') as myfile:
+ if remember_source_file:
+ mylines = [(line, myfilename) for line in myfile.readlines()]
+ else:
+ mylines = myfile.readlines()
except IOError as e:
if e.errno == PermissionDenied.errno:
raise PermissionDenied(myfilename)
@@ -502,7 +561,7 @@ def grablines(myfilename, recursive=0, remember_source_file=False):
raise
return mylines
-def writedict(mydict,myfilename,writekey=True):
+def writedict(mydict, myfilename, writekey=True):
"""Writes out a dict to a file; writekey=0 mode doesn't write out
the key and assumes all values are strings, not lists."""
lines = []
@@ -528,18 +587,44 @@ def shlex_split(s):
rval = [_unicode_decode(x) for x in rval]
return rval
-class _tolerant_shlex(shlex.shlex):
+class _getconfig_shlex(shlex.shlex):
+
+ def __init__(self, portage_tolerant=False, **kwargs):
+ shlex.shlex.__init__(self, **kwargs)
+ self.__portage_tolerant = portage_tolerant
+
+ def allow_sourcing(self, var_expand_map):
+ self.source = portage._native_string("source")
+ self.var_expand_map = var_expand_map
+
def sourcehook(self, newfile):
try:
+ newfile = varexpand(newfile, self.var_expand_map)
return shlex.shlex.sourcehook(self, newfile)
except EnvironmentError as e:
- writemsg(_("!!! Parse error in '%s': source command failed: %s\n") % \
- (self.infile, str(e)), noiselevel=-1)
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied(newfile)
+ if e.errno not in (errno.ENOENT, errno.ENOTDIR):
+ writemsg("open('%s', 'r'): %s\n" % (newfile, e), noiselevel=-1)
+ raise
+
+ msg = self.error_leader()
+ if e.errno == errno.ENOTDIR:
+ msg += _("%s: Not a directory") % newfile
+ else:
+ msg += _("%s: No such file or directory") % newfile
+
+ if self.__portage_tolerant:
+ writemsg("%s\n" % msg, noiselevel=-1)
+ else:
+ raise ParseError(msg)
return (newfile, io.StringIO())
_invalid_var_name_re = re.compile(r'^\d|\W')
-def getconfig(mycfg, tolerant=0, allow_sourcing=False, expand=True):
+def getconfig(mycfg, tolerant=False, allow_sourcing=False, expand=True,
+ recursive=False):
+
if isinstance(expand, dict):
# Some existing variable definitions have been
# passed in, for use in substitutions.
@@ -548,6 +633,21 @@ def getconfig(mycfg, tolerant=0, allow_sourcing=False, expand=True):
else:
expand_map = {}
mykeys = {}
+
+ if recursive:
+ # Emulate source commands so that syntax error messages
+ # can display real file names and line numbers.
+ if not expand:
+ expand_map = False
+ fname = None
+ for fname in _recursive_file_list(mycfg):
+ mykeys.update(getconfig(fname, tolerant=tolerant,
+ allow_sourcing=allow_sourcing, expand=expand_map,
+ recursive=False) or {})
+ if fname is None:
+ return None
+ return mykeys
+
f = None
try:
# NOTE: shlex doesn't support unicode objects with Python 2
@@ -572,49 +672,53 @@ def getconfig(mycfg, tolerant=0, allow_sourcing=False, expand=True):
if f is not None:
f.close()
+ # Since this file has unicode_literals enabled, and Python 2's
+ # shlex implementation does not support unicode, the following code
+ # uses _native_string() to encode unicode literals when necessary.
+
# Workaround for avoiding a silent error in shlex that is
# triggered by a source statement at the end of the file
# without a trailing newline after the source statement.
- if content and content[-1] != '\n':
- content += '\n'
+ if content and content[-1] != portage._native_string('\n'):
+ content += portage._native_string('\n')
# Warn about dos-style line endings since that prevents
# people from being able to source them with bash.
- if '\r' in content:
+ if portage._native_string('\r') in content:
writemsg(("!!! " + _("Please use dos2unix to convert line endings " + \
"in config file: '%s'") + "\n") % mycfg, noiselevel=-1)
lex = None
try:
- if tolerant:
- shlex_class = _tolerant_shlex
- else:
- shlex_class = shlex.shlex
# The default shlex.sourcehook() implementation
# only joins relative paths when the infile
# attribute is properly set.
- lex = shlex_class(content, infile=mycfg, posix=True)
- lex.wordchars = string.digits + string.ascii_letters + \
- "~!@#$%*_\:;?,./-+{}"
- lex.quotes="\"'"
+ lex = _getconfig_shlex(instream=content, infile=mycfg, posix=True,
+ portage_tolerant=tolerant)
+ lex.wordchars = portage._native_string(string.digits +
+ string.ascii_letters + "~!@#$%*_\:;?,./-+{}")
+ lex.quotes = portage._native_string("\"'")
if allow_sourcing:
- lex.source="source"
- while 1:
- key=lex.get_token()
+ lex.allow_sourcing(expand_map)
+
+ while True:
+ key = _unicode_decode(lex.get_token())
if key == "export":
- key = lex.get_token()
+ key = _unicode_decode(lex.get_token())
if key is None:
#normal end of file
- break;
- equ=lex.get_token()
- if (equ==''):
+ break
+
+ equ = _unicode_decode(lex.get_token())
+ if not equ:
msg = lex.error_leader() + _("Unexpected EOF")
if not tolerant:
raise ParseError(msg)
else:
writemsg("%s\n" % msg, noiselevel=-1)
return mykeys
- elif (equ!='='):
+
+ elif equ != "=":
msg = lex.error_leader() + \
_("Invalid token '%s' (not '=')") % (equ,)
if not tolerant:
@@ -622,7 +726,8 @@ def getconfig(mycfg, tolerant=0, allow_sourcing=False, expand=True):
else:
writemsg("%s\n" % msg, noiselevel=-1)
return mykeys
- val=lex.get_token()
+
+ val = _unicode_decode(lex.get_token())
if val is None:
msg = lex.error_leader() + \
_("Unexpected end of config file: variable '%s'") % (key,)
@@ -631,8 +736,6 @@ def getconfig(mycfg, tolerant=0, allow_sourcing=False, expand=True):
else:
writemsg("%s\n" % msg, noiselevel=-1)
return mykeys
- key = _unicode_decode(key)
- val = _unicode_decode(val)
if _invalid_var_name_re.search(key) is not None:
msg = lex.error_leader() + \
@@ -653,7 +756,7 @@ def getconfig(mycfg, tolerant=0, allow_sourcing=False, expand=True):
except Exception as e:
if isinstance(e, ParseError) or lex is None:
raise
- msg = _unicode_decode("%s%s") % (lex.error_leader(), e)
+ msg = "%s%s" % (lex.error_leader(), e)
writemsg("%s\n" % msg, noiselevel=-1)
raise
@@ -671,10 +774,10 @@ def varexpand(mystring, mydict=None, error_leader=None):
This code is used by the configfile code, as well as others (parser)
This would be a good bunch of code to port to C.
"""
- numvars=0
- #in single, double quotes
- insing=0
- indoub=0
+ numvars = 0
+ # in single, double quotes
+ insing = 0
+ indoub = 0
pos = 0
length = len(mystring)
newstring = []
@@ -686,7 +789,7 @@ def varexpand(mystring, mydict=None, error_leader=None):
else:
newstring.append("'") # Quote removal is handled by shlex.
insing=not insing
- pos=pos+1
+ pos += 1
continue
elif current == '"':
if (insing):
@@ -694,9 +797,9 @@ def varexpand(mystring, mydict=None, error_leader=None):
else:
newstring.append('"') # Quote removal is handled by shlex.
indoub=not indoub
- pos=pos+1
+ pos += 1
continue
- if (not insing):
+ if not insing:
#expansion time
if current == "\n":
#convert newlines to spaces
@@ -711,7 +814,7 @@ def varexpand(mystring, mydict=None, error_leader=None):
# escaped newline characters. Note that we don't handle
# escaped quotes here, since getconfig() uses shlex
# to handle that earlier.
- if (pos+1>=len(mystring)):
+ if pos + 1 >= len(mystring):
newstring.append(current)
break
else:
@@ -733,15 +836,15 @@ def varexpand(mystring, mydict=None, error_leader=None):
newstring.append(mystring[pos - 2:pos])
continue
elif current == "$":
- pos=pos+1
- if mystring[pos]=="{":
- pos=pos+1
- braced=True
+ pos += 1
+ if mystring[pos] == "{":
+ pos += 1
+ braced = True
else:
- braced=False
- myvstart=pos
+ braced = False
+ myvstart = pos
while mystring[pos] in _varexpand_word_chars:
- if (pos+1)>=len(mystring):
+ if pos + 1 >= len(mystring):
if braced:
msg = _varexpand_unexpected_eof_msg
if error_leader is not None:
@@ -749,20 +852,20 @@ def varexpand(mystring, mydict=None, error_leader=None):
writemsg(msg + "\n", noiselevel=-1)
return ""
else:
- pos=pos+1
+ pos += 1
break
- pos=pos+1
- myvarname=mystring[myvstart:pos]
+ pos += 1
+ myvarname = mystring[myvstart:pos]
if braced:
- if mystring[pos]!="}":
+ if mystring[pos] != "}":
msg = _varexpand_unexpected_eof_msg
if error_leader is not None:
msg = error_leader() + msg
writemsg(msg + "\n", noiselevel=-1)
return ""
else:
- pos=pos+1
- if len(myvarname)==0:
+ pos += 1
+ if len(myvarname) == 0:
msg = "$"
if braced:
msg += "{}"
@@ -771,7 +874,7 @@ def varexpand(mystring, mydict=None, error_leader=None):
msg = error_leader() + msg
writemsg(msg + "\n", noiselevel=-1)
return ""
- numvars=numvars+1
+ numvars += 1
if myvarname in mydict:
newstring.append(mydict[myvarname])
else:
@@ -786,9 +889,9 @@ def varexpand(mystring, mydict=None, error_leader=None):
# broken and removed, but can still be imported
pickle_write = None
-def pickle_read(filename,default=None,debug=0):
+def pickle_read(filename, default=None, debug=0):
if not os.access(filename, os.R_OK):
- writemsg(_("pickle_read(): File not readable. '")+filename+"'\n",1)
+ writemsg(_("pickle_read(): File not readable. '") + filename + "'\n", 1)
return default
data = None
try:
@@ -797,12 +900,12 @@ def pickle_read(filename,default=None,debug=0):
mypickle = pickle.Unpickler(myf)
data = mypickle.load()
myf.close()
- del mypickle,myf
- writemsg(_("pickle_read(): Loaded pickle. '")+filename+"'\n",1)
+ del mypickle, myf
+ writemsg(_("pickle_read(): Loaded pickle. '") + filename + "'\n", 1)
except SystemExit as e:
raise
except Exception as e:
- writemsg(_("!!! Failed to load pickle: ")+str(e)+"\n",1)
+ writemsg(_("!!! Failed to load pickle: ") + str(e) + "\n", 1)
data = default
return data
@@ -830,6 +933,9 @@ class cmp_sort_key(object):
list.sort(), making it easier to port code for python-3.0 compatibility.
It works by generating key objects which use the given cmp function to
implement their __lt__ method.
+
+ Beginning with Python 2.7 and 3.2, equivalent functionality is provided
+ by functools.cmp_to_key().
"""
__slots__ = ("_cmp_func",)
@@ -922,6 +1028,10 @@ def apply_permissions(filename, uid=-1, gid=-1, mode=-1, mask=-1,
modified = False
+ # Since Python 3.4, chown requires int type (no proxies).
+ uid = int(uid)
+ gid = int(gid)
+
if stat_cached is None:
try:
if follow_links:
@@ -1141,7 +1251,7 @@ class atomic_ofstream(ObjectProxy):
object.__setattr__(self, '_file',
open_func(_unicode_encode(tmp_name,
encoding=_encodings['fs'], errors='strict'),
- mode=mode, **kargs))
+ mode=mode, **portage._native_kwargs(kargs)))
return
except IOError as e:
if canonical_path == filename:
@@ -1223,7 +1333,7 @@ class atomic_ofstream(ObjectProxy):
self.close()
def __del__(self):
- """If the user does not explicitely call close(), it is
+ """If the user does not explicitly call close(), it is
assumed that an error has occurred, so we abort()."""
try:
f = object.__getattribute__(self, '_file')
@@ -1402,9 +1512,9 @@ class LazyItemsDict(UserDict):
lazy_item = self.lazy_items.get(k)
if lazy_item is not None:
if not lazy_item.singleton:
- raise TypeError(_unicode_decode("LazyItemsDict " + \
+ raise TypeError("LazyItemsDict " + \
"deepcopy is unsafe with lazy items that are " + \
- "not singletons: key=%s value=%s") % (k, lazy_item,))
+ "not singletons: key=%s value=%s" % (k, lazy_item,))
UserDict.__setitem__(result, k_copy, deepcopy(self[k], memo))
return result
@@ -1576,13 +1686,13 @@ def find_updated_config_files(target_root, config_protect):
"""
Return a tuple of configuration files that needs to be updated.
The tuple contains lists organized like this:
- [ protected_dir, file_list ]
+ [protected_dir, file_list]
If the protected config isn't a protected_dir but a procted_file, list is:
- [ protected_file, None ]
+ [protected_file, None]
If no configuration files needs to be updated, None is returned
"""
- os = _os_merge
+ encoding = _encodings['fs']
if config_protect:
# directories with some protect files in them
@@ -1614,10 +1724,24 @@ def find_updated_config_files(target_root, config_protect):
mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
os.path.split(x.rstrip(os.path.sep))
mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
- a = subprocess_getstatusoutput(mycommand)
-
- if a[0] == 0:
- files = a[1].split('\0')
+ cmd = shlex_split(mycommand)
+
+ if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000:
+ # Python 3.1 _execvp throws TypeError for non-absolute executable
+ # path passed as bytes (see http://bugs.python.org/issue8513).
+ fullname = portage.process.find_binary(cmd[0])
+ if fullname is None:
+ raise portage.exception.CommandNotFound(cmd[0])
+ cmd[0] = fullname
+
+ cmd = [_unicode_encode(arg, encoding=encoding, errors='strict')
+ for arg in cmd]
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ output = _unicode_decode(proc.communicate()[0], encoding=encoding)
+ status = proc.wait()
+ if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
+ files = output.split('\0')
# split always produces an empty string as the last element
if files and not files[-1]:
del files[-1]
diff --git a/pym/portage/util/_argparse.py b/pym/portage/util/_argparse.py
new file mode 100644
index 000000000..6ca785235
--- /dev/null
+++ b/pym/portage/util/_argparse.py
@@ -0,0 +1,42 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['ArgumentParser']
+
+try:
+ from argparse import ArgumentParser
+except ImportError:
+ # Compatibility with Python 2.6 and 3.1
+ from optparse import OptionGroup, OptionParser
+
+ from portage.localization import _
+
+ class ArgumentParser(object):
+ def __init__(self, **kwargs):
+ add_help = kwargs.pop("add_help", None)
+ if add_help is not None:
+ kwargs["add_help_option"] = add_help
+ parser = OptionParser(**kwargs)
+ self._parser = parser
+ self.add_argument = parser.add_option
+ self.print_help = parser.print_help
+ self.error = parser.error
+
+ def add_argument_group(self, title=None, **kwargs):
+ optiongroup = OptionGroup(self._parser, title, **kwargs)
+ self._parser.add_option_group(optiongroup)
+ return _ArgumentGroup(optiongroup)
+
+ def parse_known_args(self, args=None, namespace=None):
+ return self._parser.parse_args(args, namespace)
+
+ def parse_args(self, args=None, namespace=None):
+ args, argv = self.parse_known_args(args, namespace)
+ if argv:
+ msg = _('unrecognized arguments: %s')
+ self.error(msg % ' '.join(argv))
+ return args
+
+ class _ArgumentGroup(object):
+ def __init__(self, optiongroup):
+ self.add_argument = optiongroup.add_option
diff --git a/pym/portage/util/_async/AsyncScheduler.py b/pym/portage/util/_async/AsyncScheduler.py
new file mode 100644
index 000000000..9b96c6f36
--- /dev/null
+++ b/pym/portage/util/_async/AsyncScheduler.py
@@ -0,0 +1,102 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from _emerge.AsynchronousTask import AsynchronousTask
+from _emerge.PollScheduler import PollScheduler
+
+class AsyncScheduler(AsynchronousTask, PollScheduler):
+
+ def __init__(self, max_jobs=None, max_load=None, **kwargs):
+ AsynchronousTask.__init__(self)
+ PollScheduler.__init__(self, **kwargs)
+
+ if max_jobs is None:
+ max_jobs = 1
+ self._max_jobs = max_jobs
+ self._max_load = max_load
+ self._error_count = 0
+ self._running_tasks = set()
+ self._remaining_tasks = True
+ self._term_check_id = None
+ self._loadavg_check_id = None
+
+ def _poll(self):
+ if not (self._is_work_scheduled() or self._keep_scheduling()):
+ self.wait()
+ return self.returncode
+
+ def _cancel(self):
+ self._terminated.set()
+ self._termination_check()
+
+ def _terminate_tasks(self):
+ for task in list(self._running_tasks):
+ task.cancel()
+
+ def _next_task(self):
+ raise NotImplementedError(self)
+
+ def _keep_scheduling(self):
+ return self._remaining_tasks and not self._terminated.is_set()
+
+ def _running_job_count(self):
+ return len(self._running_tasks)
+
+ def _schedule_tasks(self):
+ while self._keep_scheduling() and self._can_add_job():
+ try:
+ task = self._next_task()
+ except StopIteration:
+ self._remaining_tasks = False
+ else:
+ self._running_tasks.add(task)
+ task.scheduler = self._sched_iface
+ task.addExitListener(self._task_exit)
+ task.start()
+
+ # Triggers cleanup and exit listeners if there's nothing left to do.
+ self.poll()
+
+ def _task_exit(self, task):
+ self._running_tasks.discard(task)
+ if task.returncode != os.EX_OK:
+ self._error_count += 1
+ self._schedule()
+
+ def _start(self):
+ self._term_check_id = self._event_loop.idle_add(self._termination_check)
+ if self._max_load is not None and \
+ self._loadavg_latency is not None and \
+ (self._max_jobs is True or self._max_jobs > 1):
+ # We have to schedule periodically, in case the load
+ # average has changed since the last call.
+ self._loadavg_check_id = self._event_loop.timeout_add(
+ self._loadavg_latency, self._schedule)
+ self._schedule()
+
+ def _wait(self):
+ # Loop while there are jobs to be scheduled.
+ while self._keep_scheduling():
+ self._event_loop.iteration()
+
+ # Clean shutdown of previously scheduled jobs. In the
+ # case of termination, this allows for basic cleanup
+ # such as flushing of buffered output to logs.
+ while self._is_work_scheduled():
+ self._event_loop.iteration()
+
+ if self._term_check_id is not None:
+ self._event_loop.source_remove(self._term_check_id)
+ self._term_check_id = None
+
+ if self._loadavg_check_id is not None:
+ self._event_loop.source_remove(self._loadavg_check_id)
+ self._loadavg_check_id = None
+
+ if self._error_count > 0:
+ self.returncode = 1
+ else:
+ self.returncode = os.EX_OK
+
+ return self.returncode
diff --git a/pym/portage/util/_async/FileCopier.py b/pym/portage/util/_async/FileCopier.py
new file mode 100644
index 000000000..27e5ab4c0
--- /dev/null
+++ b/pym/portage/util/_async/FileCopier.py
@@ -0,0 +1,17 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage import shutil
+from portage.util._async.ForkProcess import ForkProcess
+
+class FileCopier(ForkProcess):
+ """
+ Asynchronously copy a file.
+ """
+
+ __slots__ = ('src_path', 'dest_path')
+
+ def _run(self):
+ shutil.copy(self.src_path, self.dest_path)
+ return os.EX_OK
diff --git a/pym/portage/util/_async/FileDigester.py b/pym/portage/util/_async/FileDigester.py
new file mode 100644
index 000000000..881c69280
--- /dev/null
+++ b/pym/portage/util/_async/FileDigester.py
@@ -0,0 +1,73 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.checksum import perform_multiple_checksums
+from portage.util._async.ForkProcess import ForkProcess
+from _emerge.PipeReader import PipeReader
+
+class FileDigester(ForkProcess):
+ """
+ Asynchronously generate file digests. Pass in file_path and
+ hash_names, and after successful execution, the digests
+ attribute will be a dict containing all of the requested
+ digests.
+ """
+
+ __slots__ = ('file_path', 'digests', 'hash_names',
+ '_digest_pipe_reader', '_digest_pw')
+
+ def _start(self):
+ pr, pw = os.pipe()
+ self.fd_pipes = {}
+ self.fd_pipes[pw] = pw
+ self._digest_pw = pw
+ self._digest_pipe_reader = PipeReader(
+ input_files={"input":pr},
+ scheduler=self.scheduler)
+ self._digest_pipe_reader.addExitListener(self._digest_pipe_reader_exit)
+ self._digest_pipe_reader.start()
+ ForkProcess._start(self)
+ os.close(pw)
+
+ def _run(self):
+ digests = perform_multiple_checksums(self.file_path,
+ hashes=self.hash_names)
+
+ buf = "".join("%s=%s\n" % item
+ for item in digests.items()).encode('utf_8')
+
+ while buf:
+ buf = buf[os.write(self._digest_pw, buf):]
+
+ return os.EX_OK
+
+ def _parse_digests(self, data):
+
+ digests = {}
+ for line in data.decode('utf_8').splitlines():
+ parts = line.split('=', 1)
+ if len(parts) == 2:
+ digests[parts[0]] = parts[1]
+
+ self.digests = digests
+
+ def _pipe_logger_exit(self, pipe_logger):
+ # Ignore this event, since we want to ensure that we
+ # exit only after _digest_pipe_reader has reached EOF.
+ self._pipe_logger = None
+
+ def _digest_pipe_reader_exit(self, pipe_reader):
+ self._parse_digests(pipe_reader.getvalue())
+ self._digest_pipe_reader = None
+ self._unregister()
+ self.wait()
+
+ def _unregister(self):
+ ForkProcess._unregister(self)
+
+ pipe_reader = self._digest_pipe_reader
+ if pipe_reader is not None:
+ self._digest_pipe_reader = None
+ pipe_reader.removeExitListener(self._digest_pipe_reader_exit)
+ pipe_reader.cancel()
diff --git a/pym/portage/util/_async/ForkProcess.py b/pym/portage/util/_async/ForkProcess.py
new file mode 100644
index 000000000..25f72d308
--- /dev/null
+++ b/pym/portage/util/_async/ForkProcess.py
@@ -0,0 +1,65 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import signal
+import sys
+import traceback
+
+import portage
+from portage import os
+from _emerge.SpawnProcess import SpawnProcess
+
+class ForkProcess(SpawnProcess):
+
+ __slots__ = ()
+
+ def _spawn(self, args, fd_pipes=None, **kwargs):
+ """
+ Fork a subprocess, apply local settings, and call fetch().
+ """
+
+ parent_pid = os.getpid()
+ pid = None
+ try:
+ pid = os.fork()
+
+ if pid != 0:
+ if not isinstance(pid, int):
+ raise AssertionError(
+ "fork returned non-integer: %s" % (repr(pid),))
+ return [pid]
+
+ rval = 1
+ try:
+
+ # Use default signal handlers in order to avoid problems
+ # killing subprocesses as reported in bug #353239.
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+ portage.locks._close_fds()
+ # We don't exec, so use close_fds=False
+ # (see _setup_pipes docstring).
+ portage.process._setup_pipes(fd_pipes, close_fds=False)
+
+ rval = self._run()
+ except SystemExit:
+ raise
+ except:
+ traceback.print_exc()
+ # os._exit() skips stderr flush!
+ sys.stderr.flush()
+ finally:
+ os._exit(rval)
+
+ finally:
+ if pid == 0 or (pid is None and os.getpid() != parent_pid):
+ # Call os._exit() from a finally block in order
+ # to suppress any finally blocks from earlier
+ # in the call stack (see bug #345289). This
+ # finally block has to be setup before the fork
+ # in order to avoid a race condition.
+ os._exit(1)
+
+ def _run(self):
+ raise NotImplementedError(self)
diff --git a/pym/portage/util/_async/PipeLogger.py b/pym/portage/util/_async/PipeLogger.py
new file mode 100644
index 000000000..aa605d94d
--- /dev/null
+++ b/pym/portage/util/_async/PipeLogger.py
@@ -0,0 +1,163 @@
+# Copyright 2008-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import fcntl
+import errno
+import gzip
+import sys
+
+import portage
+from portage import os, _encodings, _unicode_encode
+from _emerge.AbstractPollTask import AbstractPollTask
+
+class PipeLogger(AbstractPollTask):
+
+ """
+ This can be used for logging output of a child process,
+ optionally outputing to log_file_path and/or stdout_fd. It can
+ also monitor for EOF on input_fd, which may be used to detect
+ termination of a child process. If log_file_path ends with
+ '.gz' then the log file is written with compression.
+ """
+
+ __slots__ = ("input_fd", "log_file_path", "stdout_fd") + \
+ ("_log_file", "_log_file_real", "_reg_id")
+
+ def _start(self):
+
+ log_file_path = self.log_file_path
+ if log_file_path is not None:
+
+ self._log_file = open(_unicode_encode(log_file_path,
+ encoding=_encodings['fs'], errors='strict'), mode='ab')
+ if log_file_path.endswith('.gz'):
+ self._log_file_real = self._log_file
+ self._log_file = gzip.GzipFile(filename='', mode='ab',
+ fileobj=self._log_file)
+
+ portage.util.apply_secpass_permissions(log_file_path,
+ uid=portage.portage_uid, gid=portage.portage_gid,
+ mode=0o660)
+
+ if isinstance(self.input_fd, int):
+ fd = self.input_fd
+ else:
+ fd = self.input_fd.fileno()
+
+ fcntl.fcntl(fd, fcntl.F_SETFL,
+ fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(fd, fcntl.F_SETFD,
+ fcntl.fcntl(fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ self._reg_id = self.scheduler.io_add_watch(fd,
+ self._registered_events, self._output_handler)
+ self._registered = True
+
+ def _cancel(self):
+ self._unregister()
+ if self.returncode is None:
+ self.returncode = self._cancelled_returncode
+
+ def _wait(self):
+ if self.returncode is not None:
+ return self.returncode
+ self._wait_loop()
+ self.returncode = os.EX_OK
+ return self.returncode
+
+ def _output_handler(self, fd, event):
+
+ background = self.background
+ stdout_fd = self.stdout_fd
+ log_file = self._log_file
+
+ while True:
+ buf = self._read_buf(fd, event)
+
+ if buf is None:
+ # not a POLLIN event, EAGAIN, etc...
+ break
+
+ if not buf:
+ # EOF
+ self._unregister()
+ self.wait()
+ break
+
+ else:
+ if not background and stdout_fd is not None:
+ failures = 0
+ stdout_buf = buf
+ while stdout_buf:
+ try:
+ stdout_buf = \
+ stdout_buf[os.write(stdout_fd, stdout_buf):]
+ except OSError as e:
+ if e.errno != errno.EAGAIN:
+ raise
+ del e
+ failures += 1
+ if failures > 50:
+ # Avoid a potentially infinite loop. In
+ # most cases, the failure count is zero
+ # and it's unlikely to exceed 1.
+ raise
+
+ # This means that a subprocess has put an inherited
+ # stdio file descriptor (typically stdin) into
+ # O_NONBLOCK mode. This is not acceptable (see bug
+ # #264435), so revert it. We need to use a loop
+ # here since there's a race condition due to
+ # parallel processes being able to change the
+ # flags on the inherited file descriptor.
+ # TODO: When possible, avoid having child processes
+ # inherit stdio file descriptors from portage
+ # (maybe it can't be avoided with
+ # PROPERTIES=interactive).
+ fcntl.fcntl(stdout_fd, fcntl.F_SETFL,
+ fcntl.fcntl(stdout_fd,
+ fcntl.F_GETFL) ^ os.O_NONBLOCK)
+
+ if log_file is not None:
+ log_file.write(buf)
+ log_file.flush()
+
+ self._unregister_if_appropriate(event)
+
+ return True
+
+ def _unregister(self):
+
+ if self._reg_id is not None:
+ self.scheduler.source_remove(self._reg_id)
+ self._reg_id = None
+
+ if self.input_fd is not None:
+ if isinstance(self.input_fd, int):
+ os.close(self.input_fd)
+ else:
+ self.input_fd.close()
+ self.input_fd = None
+
+ if self.stdout_fd is not None:
+ os.close(self.stdout_fd)
+ self.stdout_fd = None
+
+ if self._log_file is not None:
+ self._log_file.close()
+ self._log_file = None
+
+ if self._log_file_real is not None:
+ # Avoid "ResourceWarning: unclosed file" since python 3.2.
+ self._log_file_real.close()
+ self._log_file_real = None
+
+ self._registered = False
diff --git a/pym/portage/util/_async/PipeReaderBlockingIO.py b/pym/portage/util/_async/PipeReaderBlockingIO.py
new file mode 100644
index 000000000..b06adf6ed
--- /dev/null
+++ b/pym/portage/util/_async/PipeReaderBlockingIO.py
@@ -0,0 +1,91 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+try:
+ import threading
+except ImportError:
+ # dummy_threading will not suffice
+ threading = None
+
+from portage import os
+from _emerge.AbstractPollTask import AbstractPollTask
+
+class PipeReaderBlockingIO(AbstractPollTask):
+ """
+ Reads output from one or more files and saves it in memory, for
+ retrieval via the getvalue() method. This is driven by a thread
+ for each input file, in order to support blocking IO. This may
+ be useful for using threads to handle blocking IO with Jython,
+ since Jython lacks the fcntl module which is needed for
+ non-blocking IO (see http://bugs.jython.org/issue1074).
+ """
+
+ __slots__ = ("input_files", "_read_data", "_terminate",
+ "_threads", "_thread_rlock")
+
+ def _start(self):
+ self._terminate = threading.Event()
+ self._threads = {}
+ self._read_data = []
+
+ self._registered = True
+ self._thread_rlock = threading.RLock()
+ with self._thread_rlock:
+ for f in self.input_files.values():
+ t = threading.Thread(target=self._reader_thread, args=(f,))
+ t.daemon = True
+ t.start()
+ self._threads[f] = t
+
+ def _reader_thread(self, f):
+ try:
+ terminated = self._terminate.is_set
+ except AttributeError:
+ # Jython 2.7.0a2
+ terminated = self._terminate.isSet
+ bufsize = self._bufsize
+ while not terminated():
+ buf = f.read(bufsize)
+ with self._thread_rlock:
+ if terminated():
+ break
+ elif buf:
+ self._read_data.append(buf)
+ else:
+ del self._threads[f]
+ if not self._threads:
+ # Thread-safe callback to EventLoop
+ self.scheduler.idle_add(self._eof)
+ break
+ f.close()
+
+ def _eof(self):
+ self._registered = False
+ if self.returncode is None:
+ self.returncode = os.EX_OK
+ self.wait()
+ return False
+
+ def _cancel(self):
+ self._terminate.set()
+ self._registered = False
+ if self.returncode is None:
+ self.returncode = self._cancelled_returncode
+ self.wait()
+
+ def _wait(self):
+ if self.returncode is not None:
+ return self.returncode
+ self._wait_loop()
+ self.returncode = os.EX_OK
+ return self.returncode
+
+ def getvalue(self):
+ """Retrieve the entire contents"""
+ with self._thread_rlock:
+ return b''.join(self._read_data)
+
+ def close(self):
+ """Free the memory buffer."""
+ with self._thread_rlock:
+ self._read_data = None
diff --git a/pym/portage/util/_async/PopenProcess.py b/pym/portage/util/_async/PopenProcess.py
new file mode 100644
index 000000000..2fc56d295
--- /dev/null
+++ b/pym/portage/util/_async/PopenProcess.py
@@ -0,0 +1,33 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.SubProcess import SubProcess
+
+class PopenProcess(SubProcess):
+
+ __slots__ = ("pipe_reader", "proc",)
+
+ def _start(self):
+
+ self.pid = self.proc.pid
+ self._registered = True
+
+ if self.pipe_reader is None:
+ self._reg_id = self.scheduler.child_watch_add(
+ self.pid, self._child_watch_cb)
+ else:
+ try:
+ self.pipe_reader.scheduler = self.scheduler
+ except AttributeError:
+ pass
+ self.pipe_reader.addExitListener(self._pipe_reader_exit)
+ self.pipe_reader.start()
+
+ def _pipe_reader_exit(self, pipe_reader):
+ self._reg_id = self.scheduler.child_watch_add(
+ self.pid, self._child_watch_cb)
+
+ def _child_watch_cb(self, pid, condition, user_data=None):
+ self._reg_id = None
+ self._waitpid_cb(pid, condition)
+ self.wait()
diff --git a/pym/portage/util/_async/SchedulerInterface.py b/pym/portage/util/_async/SchedulerInterface.py
new file mode 100644
index 000000000..2ab668ee4
--- /dev/null
+++ b/pym/portage/util/_async/SchedulerInterface.py
@@ -0,0 +1,79 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import gzip
+import errno
+
+from portage import _encodings
+from portage import _unicode_encode
+from portage.util import writemsg_level
+from ..SlotObject import SlotObject
+
+class SchedulerInterface(SlotObject):
+
+ _event_loop_attrs = ("IO_ERR", "IO_HUP", "IO_IN",
+ "IO_NVAL", "IO_OUT", "IO_PRI",
+ "child_watch_add", "idle_add", "io_add_watch",
+ "iteration", "source_remove", "timeout_add")
+
+ __slots__ = _event_loop_attrs + ("_event_loop", "_is_background")
+
+ def __init__(self, event_loop, is_background=None, **kwargs):
+ SlotObject.__init__(self, **kwargs)
+ self._event_loop = event_loop
+ if is_background is None:
+ is_background = self._return_false
+ self._is_background = is_background
+ for k in self._event_loop_attrs:
+ setattr(self, k, getattr(event_loop, k))
+
+ @staticmethod
+ def _return_false():
+ return False
+
+ def output(self, msg, log_path=None, background=None,
+ level=0, noiselevel=-1):
+ """
+ Output msg to stdout if not self._is_background(). If log_path
+ is not None then append msg to the log (appends with
+ compression if the filename extension of log_path corresponds
+ to a supported compression type).
+ """
+
+ global_background = self._is_background()
+ if background is None or global_background:
+ # Use the global value if the task does not have a local
+ # background value. For example, parallel-fetch tasks run
+ # in the background while other tasks concurrently run in
+ # the foreground.
+ background = global_background
+
+ msg_shown = False
+ if not background:
+ writemsg_level(msg, level=level, noiselevel=noiselevel)
+ msg_shown = True
+
+ if log_path is not None:
+ try:
+ f = open(_unicode_encode(log_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='ab')
+ f_real = f
+ except IOError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ if not msg_shown:
+ writemsg_level(msg, level=level, noiselevel=noiselevel)
+ else:
+
+ if log_path.endswith('.gz'):
+ # NOTE: The empty filename argument prevents us from
+ # triggering a bug in python3 which causes GzipFile
+ # to raise AttributeError if fileobj.name is bytes
+ # instead of unicode.
+ f = gzip.GzipFile(filename='', mode='ab', fileobj=f)
+
+ f.write(_unicode_encode(msg))
+ f.close()
+ if f_real is not f:
+ f_real.close()
diff --git a/pym/portage/util/_async/TaskScheduler.py b/pym/portage/util/_async/TaskScheduler.py
new file mode 100644
index 000000000..35b3875a4
--- /dev/null
+++ b/pym/portage/util/_async/TaskScheduler.py
@@ -0,0 +1,20 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from .AsyncScheduler import AsyncScheduler
+
+class TaskScheduler(AsyncScheduler):
+
+ """
+ A simple way to handle scheduling of AbstractPollTask instances. Simply
+ pass a task iterator into the constructor and call start(). Use the
+ poll, wait, or addExitListener methods to be notified when all of the
+ tasks have completed.
+ """
+
+ def __init__(self, task_iter, **kwargs):
+ AsyncScheduler.__init__(self, **kwargs)
+ self._task_iter = task_iter
+
+ def _next_task(self):
+ return next(self._task_iter)
diff --git a/pym/portage/util/_async/__init__.py b/pym/portage/util/_async/__init__.py
new file mode 100644
index 000000000..418ad862b
--- /dev/null
+++ b/pym/portage/util/_async/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/pym/portage/util/_async/run_main_scheduler.py b/pym/portage/util/_async/run_main_scheduler.py
new file mode 100644
index 000000000..10fed34b3
--- /dev/null
+++ b/pym/portage/util/_async/run_main_scheduler.py
@@ -0,0 +1,41 @@
+
+import signal
+
+def run_main_scheduler(scheduler):
+ """
+ Start and run an AsyncScheduler (or compatible object), and handle
+ SIGINT or SIGTERM by calling its terminate() method and waiting
+ for it to clean up after itself. If SIGINT or SIGTERM is received,
+ return signum, else return None. Any previous SIGINT or SIGTERM
+ signal handlers are automatically saved and restored before
+ returning.
+ """
+
+ received_signal = []
+
+ def sighandler(signum, frame):
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ received_signal.append(signum)
+ scheduler.terminate()
+
+ earlier_sigint_handler = signal.signal(signal.SIGINT, sighandler)
+ earlier_sigterm_handler = signal.signal(signal.SIGTERM, sighandler)
+
+ try:
+ scheduler.start()
+ scheduler.wait()
+ finally:
+ # Restore previous handlers
+ if earlier_sigint_handler is not None:
+ signal.signal(signal.SIGINT, earlier_sigint_handler)
+ else:
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ if earlier_sigterm_handler is not None:
+ signal.signal(signal.SIGTERM, earlier_sigterm_handler)
+ else:
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+ if received_signal:
+ return received_signal[0]
+ return None
diff --git a/pym/portage/util/_ctypes.py b/pym/portage/util/_ctypes.py
new file mode 100644
index 000000000..aeceebcca
--- /dev/null
+++ b/pym/portage/util/_ctypes.py
@@ -0,0 +1,47 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+try:
+ import ctypes
+ import ctypes.util
+except ImportError:
+ ctypes = None
+else:
+ try:
+ ctypes.cdll
+ except AttributeError:
+ ctypes = None
+
+_library_names = {}
+
+def find_library(name):
+ """
+ Calls ctype.util.find_library() if the ctypes module is available,
+ and otherwise returns None. Results are cached for future invocations.
+ """
+ filename = _library_names.get(name)
+ if filename is None:
+ if ctypes is not None:
+ filename = ctypes.util.find_library(name)
+ if filename is None:
+ filename = False
+ _library_names[name] = filename
+
+ if filename is False:
+ return None
+ return filename
+
+_library_handles = {}
+
+def LoadLibrary(name):
+ """
+ Calls ctypes.cdll.LoadLibrary(name) if the ctypes module is available,
+ and otherwise returns None. Results are cached for future invocations.
+ """
+ handle = _library_handles.get(name)
+
+ if handle is None and ctypes is not None:
+ handle = ctypes.CDLL(name, use_errno=True)
+ _library_handles[name] = handle
+
+ return handle
diff --git a/pym/portage/util/_desktop_entry.py b/pym/portage/util/_desktop_entry.py
index 790178013..0b4954735 100644
--- a/pym/portage/util/_desktop_entry.py
+++ b/pym/portage/util/_desktop_entry.py
@@ -1,7 +1,8 @@
-# Copyright 2012 Gentoo Foundation
+# Copyright 2012-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import io
+import re
import subprocess
import sys
@@ -10,7 +11,9 @@ try:
except ImportError:
from ConfigParser import Error as ConfigParserError, RawConfigParser
+import portage
from portage import _encodings, _unicode_encode, _unicode_decode
+from portage.util import writemsg
def parse_desktop_entry(path):
"""
@@ -31,45 +34,71 @@ def parse_desktop_entry(path):
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'],
errors='replace') as f:
- read_file(f)
+ content = f.read()
+
+ # In Python 3.2, read_file does not support bytes in file names
+ # (see bug #429544), so use StringIO to hide the file name.
+ read_file(io.StringIO(content))
return parser
-_ignored_service_errors = (
- 'error: required key "Name" in group "Desktop Entry" is not present',
- 'error: key "Actions" is present in group "Desktop Entry", but the type is "Service" while this key is only valid for type "Application"',
- 'error: key "MimeType" is present in group "Desktop Entry", but the type is "Service" while this key is only valid for type "Application"',
+_trivial_warnings = re.compile(r' looks redundant with value ')
+
+_ignored_errors = (
+ # Ignore error for emacs.desktop:
+ # https://bugs.freedesktop.org/show_bug.cgi?id=35844#c6
+ 'error: (will be fatal in the future): value "TextEditor" in key "Categories" in group "Desktop Entry" requires another category to be present among the following categories: Utility',
+ 'warning: key "Encoding" in group "Desktop Entry" is deprecated'
+)
+
+_ShowIn_exemptions = (
+ # See bug #480586.
+ 'contains an unregistered value "Pantheon"',
)
def validate_desktop_entry(path):
args = ["desktop-file-validate", path]
- if sys.hexversion < 0x3000000 or sys.hexversion >= 0x3020000:
- # Python 3.1 does not support bytes in Popen args.
- args = [_unicode_encode(x, errors='strict') for x in args]
+
+ if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000:
+ # Python 3.1 _execvp throws TypeError for non-absolute executable
+ # path passed as bytes (see http://bugs.python.org/issue8513).
+ fullname = portage.process.find_binary(args[0])
+ if fullname is None:
+ raise portage.exception.CommandNotFound(args[0])
+ args[0] = fullname
+
+ args = [_unicode_encode(x, errors='strict') for x in args]
proc = subprocess.Popen(args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output_lines = _unicode_decode(proc.communicate()[0]).splitlines()
proc.wait()
if output_lines:
- try:
- desktop_entry = parse_desktop_entry(path)
- except ConfigParserError:
- pass
- else:
- if desktop_entry.has_section("Desktop Entry"):
- try:
- entry_type = desktop_entry.get("Desktop Entry", "Type")
- except ConfigParserError:
- pass
- else:
- if entry_type == "Service":
- # Filter false errors for Type=Service (bug #414125).
- filtered_output = []
- for line in output_lines:
- if line[len(path)+2:] in _ignored_service_errors:
- continue
- filtered_output.append(line)
- output_lines = filtered_output
+ filtered_output = []
+ for line in output_lines:
+ msg = line[len(path)+2:]
+ # "hint:" output is new in desktop-file-utils-0.21
+ if msg.startswith('hint: ') or msg in _ignored_errors:
+ continue
+ if 'for key "NotShowIn" in group "Desktop Entry"' in msg or \
+ 'for key "OnlyShowIn" in group "Desktop Entry"' in msg:
+ exempt = False
+ for s in _ShowIn_exemptions:
+ if s in msg:
+ exempt = True
+ break
+ if exempt:
+ continue
+ filtered_output.append(line)
+ output_lines = filtered_output
+
+ if output_lines:
+ output_lines = [line for line in output_lines
+ if _trivial_warnings.search(line) is None]
return output_lines
+
+if __name__ == "__main__":
+ for arg in sys.argv[1:]:
+ for line in validate_desktop_entry(arg):
+ writemsg(line + "\n", noiselevel=-1)
diff --git a/pym/portage/util/_dyn_libs/LinkageMapELF.py b/pym/portage/util/_dyn_libs/LinkageMapELF.py
index e71ac735a..3920f9487 100644
--- a/pym/portage/util/_dyn_libs/LinkageMapELF.py
+++ b/pym/portage/util/_dyn_libs/LinkageMapELF.py
@@ -1,4 +1,4 @@
-# Copyright 1998-2011 Gentoo Foundation
+# Copyright 1998-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import errno
@@ -26,7 +26,7 @@ class LinkageMapELF(object):
_soname_map_class = slot_dict_class(
("consumers", "providers"), prefix="")
- class _obj_properies_class(object):
+ class _obj_properties_class(object):
__slots__ = ("arch", "needed", "runpaths", "soname", "alt_paths",
"owner",)
@@ -316,7 +316,7 @@ class LinkageMapELF(object):
myprops = obj_properties.get(obj_key)
if myprops is None:
indexed = False
- myprops = self._obj_properies_class(
+ myprops = self._obj_properties_class(
arch, needed, path, soname, [], owner)
obj_properties[obj_key] = myprops
# All object paths are added into the obj_properties tuple.
@@ -678,7 +678,7 @@ class LinkageMapELF(object):
rValue[soname].add(provider)
return rValue
- def findConsumers(self, obj, exclude_providers=None):
+ def findConsumers(self, obj, exclude_providers=None, greedy=True):
"""
Find consumers of an object or object key.
@@ -715,6 +715,9 @@ class LinkageMapELF(object):
'/usr/lib/libssl.so.0.9.8'), and return True if the library is
owned by a provider which is planned for removal.
@type exclude_providers: collection
+ @param greedy: If True, then include consumers that are satisfied
+ by alternative providers, otherwise omit them. Default is True.
+ @type greedy: Boolean
@rtype: set of strings (example: set(['/bin/foo', '/usr/bin/bar']))
@return: The return value is a soname -> set-of-library-paths, where
set-of-library-paths satisfy soname.
@@ -769,16 +772,19 @@ class LinkageMapELF(object):
defpath_keys = set(self._path_key(x) for x in self._defpath)
satisfied_consumer_keys = set()
if soname_node is not None:
- if exclude_providers is not None:
+ if exclude_providers is not None or not greedy:
relevant_dir_keys = set()
for provider_key in soname_node.providers:
+ if not greedy and provider_key == obj_key:
+ continue
provider_objs = self._obj_properties[provider_key].alt_paths
for p in provider_objs:
provider_excluded = False
- for excluded_provider_isowner in exclude_providers:
- if excluded_provider_isowner(p):
- provider_excluded = True
- break
+ if exclude_providers is not None:
+ for excluded_provider_isowner in exclude_providers:
+ if excluded_provider_isowner(p):
+ provider_excluded = True
+ break
if not provider_excluded:
# This provider is not excluded. It will
# satisfy a consumer of this soname if it
diff --git a/pym/portage/util/_dyn_libs/PreservedLibsRegistry.py b/pym/portage/util/_dyn_libs/PreservedLibsRegistry.py
index 4bc64dbfe..a422ffefd 100644
--- a/pym/portage/util/_dyn_libs/PreservedLibsRegistry.py
+++ b/pym/portage/util/_dyn_libs/PreservedLibsRegistry.py
@@ -1,4 +1,4 @@
-# Copyright 1998-2012 Gentoo Foundation
+# Copyright 1998-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import errno
@@ -25,6 +25,7 @@ from portage.versions import cpv_getkey
from portage.locks import lockfile, unlockfile
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
basestring = str
class PreservedLibsRegistry(object):
diff --git a/pym/portage/util/_dyn_libs/display_preserved_libs.py b/pym/portage/util/_dyn_libs/display_preserved_libs.py
new file mode 100644
index 000000000..b16478d2b
--- /dev/null
+++ b/pym/portage/util/_dyn_libs/display_preserved_libs.py
@@ -0,0 +1,98 @@
+# Copyright 2007-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import logging
+
+import portage
+from portage.output import colorize
+
+def display_preserved_libs(vardb):
+
+ MAX_DISPLAY = 3
+
+ plibdata = vardb._plib_registry.getPreservedLibs()
+ linkmap = vardb._linkmap
+ consumer_map = {}
+ owners = {}
+
+ try:
+ linkmap.rebuild()
+ except portage.exception.CommandNotFound as e:
+ portage.util.writemsg_level("!!! Command Not Found: %s\n" % (e,),
+ level=logging.ERROR, noiselevel=-1)
+ else:
+ search_for_owners = set()
+ for cpv in plibdata:
+ internal_plib_keys = set(linkmap._obj_key(f) \
+ for f in plibdata[cpv])
+ for f in plibdata[cpv]:
+ if f in consumer_map:
+ continue
+ consumers = []
+ for c in linkmap.findConsumers(f, greedy=False):
+ # Filter out any consumers that are also preserved libs
+ # belonging to the same package as the provider.
+ if linkmap._obj_key(c) not in internal_plib_keys:
+ consumers.append(c)
+ consumers.sort()
+ consumer_map[f] = consumers
+ search_for_owners.update(consumers[:MAX_DISPLAY+1])
+
+ owners = {}
+ for f in search_for_owners:
+ owner_set = set()
+ for owner in linkmap.getOwners(f):
+ owner_dblink = vardb._dblink(owner)
+ if owner_dblink.exists():
+ owner_set.add(owner_dblink)
+ if owner_set:
+ owners[f] = owner_set
+
+ all_preserved = set()
+ all_preserved.update(*plibdata.values())
+
+ for cpv in plibdata:
+ print(colorize("WARN", ">>>") + " package: %s" % cpv)
+ samefile_map = {}
+ for f in plibdata[cpv]:
+ obj_key = linkmap._obj_key(f)
+ alt_paths = samefile_map.get(obj_key)
+ if alt_paths is None:
+ alt_paths = set()
+ samefile_map[obj_key] = alt_paths
+ alt_paths.add(f)
+
+ for alt_paths in samefile_map.values():
+ alt_paths = sorted(alt_paths)
+ for p in alt_paths:
+ print(colorize("WARN", " * ") + " - %s" % (p,))
+ f = alt_paths[0]
+ consumers = consumer_map.get(f, [])
+ consumers_non_preserved = [c for c in consumers
+ if c not in all_preserved]
+ if consumers_non_preserved:
+ # Filter the consumers that are preserved libraries, since
+ # they don't need to be rebuilt (see bug #461908).
+ consumers = consumers_non_preserved
+
+ if len(consumers) == MAX_DISPLAY + 1:
+ # Display 1 extra consumer, instead of displaying
+ # "used by 1 other files".
+ max_display = MAX_DISPLAY + 1
+ else:
+ max_display = MAX_DISPLAY
+ for c in consumers[:max_display]:
+ if c in all_preserved:
+ # The owner is displayed elsewhere due to having
+ # its libs preserved, so distinguish this special
+ # case (see bug #461908).
+ owners_desc = "preserved"
+ else:
+ owners_desc = ", ".join(x.mycpv for x in owners.get(c, []))
+ print(colorize("WARN", " * ") + " used by %s (%s)" % \
+ (c, owners_desc))
+ if len(consumers) > max_display:
+ print(colorize("WARN", " * ") + " used by %d other files" %
+ (len(consumers) - max_display))
diff --git a/pym/portage/util/_eventloop/EventLoop.py b/pym/portage/util/_eventloop/EventLoop.py
index bbbce5261..9ffcc74d9 100644
--- a/pym/portage/util/_eventloop/EventLoop.py
+++ b/pym/portage/util/_eventloop/EventLoop.py
@@ -1,20 +1,37 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import errno
-import fcntl
import logging
import os
import select
import signal
+import sys
import time
+try:
+ import fcntl
+except ImportError:
+ # http://bugs.jython.org/issue1074
+ fcntl = None
+
+try:
+ import threading
+except ImportError:
+ import dummy_threading as threading
+
from portage.util import writemsg_level
from ..SlotObject import SlotObject
from .PollConstants import PollConstants
from .PollSelectAdapter import PollSelectAdapter
class EventLoop(object):
+ """
+ An event loop, intended to be compatible with the GLib event loop.
+ Call the iteration method in order to execute one iteration of the
+ loop. The idle_add and timeout_add methods serve as thread-safe
+ means to interact with the loop's thread.
+ """
supports_multiprocessing = True
@@ -43,7 +60,9 @@ class EventLoop(object):
that global_event_loop does not need constructor arguments)
@type main: bool
"""
- self._use_signal = main
+ self._use_signal = main and fcntl is not None
+ self._thread_rlock = threading.RLock()
+ self._thread_condition = threading.Condition(self._thread_rlock)
self._poll_event_queue = []
self._poll_event_handlers = {}
self._poll_event_handler_ids = {}
@@ -52,14 +71,48 @@ class EventLoop(object):
self._idle_callbacks = {}
self._timeout_handlers = {}
self._timeout_interval = None
- self._poll_obj = create_poll_instance()
- self.IO_ERR = PollConstants.POLLERR
- self.IO_HUP = PollConstants.POLLHUP
- self.IO_IN = PollConstants.POLLIN
- self.IO_NVAL = PollConstants.POLLNVAL
- self.IO_OUT = PollConstants.POLLOUT
- self.IO_PRI = PollConstants.POLLPRI
+ self._poll_obj = None
+ try:
+ select.epoll
+ except AttributeError:
+ pass
+ else:
+ try:
+ epoll_obj = select.epoll()
+ except IOError:
+ # This happens with Linux 2.4 kernels:
+ # IOError: [Errno 38] Function not implemented
+ pass
+ else:
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000 and fcntl is not None:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(epoll_obj.fileno(), fcntl.F_SETFD,
+ fcntl.fcntl(epoll_obj.fileno(),
+ fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ self._poll_obj = _epoll_adapter(epoll_obj)
+ self.IO_ERR = select.EPOLLERR
+ self.IO_HUP = select.EPOLLHUP
+ self.IO_IN = select.EPOLLIN
+ self.IO_NVAL = 0
+ self.IO_OUT = select.EPOLLOUT
+ self.IO_PRI = select.EPOLLPRI
+
+ if self._poll_obj is None:
+ self._poll_obj = create_poll_instance()
+ self.IO_ERR = PollConstants.POLLERR
+ self.IO_HUP = PollConstants.POLLHUP
+ self.IO_IN = PollConstants.POLLIN
+ self.IO_NVAL = PollConstants.POLLNVAL
+ self.IO_OUT = PollConstants.POLLOUT
+ self.IO_PRI = PollConstants.POLLPRI
self._child_handlers = {}
self._sigchld_read = None
@@ -67,6 +120,14 @@ class EventLoop(object):
self._sigchld_src_id = None
self._pid = os.getpid()
+ def _new_source_id(self):
+ """
+ Generate a new source id. This method is thread-safe.
+ """
+ with self._thread_rlock:
+ self._event_handler_id += 1
+ return self._event_handler_id
+
def _poll(self, timeout=None):
"""
All poll() calls pass through here. The poll events
@@ -85,9 +146,11 @@ class EventLoop(object):
try:
self._poll_event_queue.extend(self._poll_obj.poll(timeout))
break
- except select.error as e:
+ except (IOError, select.error) as e:
# Silently handle EINTR, which is normal when we have
- # received a signal such as SIGINT.
+ # received a signal such as SIGINT (epoll objects may
+ # raise IOError rather than select.error, at least in
+ # Python 3.2).
if not (e.args and e.args[0] == errno.EINTR):
writemsg_level("\n!!! select error: %s\n" % (e,),
level=logging.ERROR, noiselevel=-1)
@@ -101,7 +164,19 @@ class EventLoop(object):
def iteration(self, *args):
"""
- Like glib.MainContext.iteration(), runs a single iteration.
+ Like glib.MainContext.iteration(), runs a single iteration. In order
+ to avoid blocking forever when may_block is True (the default),
+ callers must be careful to ensure that at least one of the following
+ conditions is met:
+ 1) An event source or timeout is registered which is guaranteed
+ to trigger at least on event (a call to an idle function
+ only counts as an event if it returns a False value which
+ causes it to stop being called)
+ 2) Another thread is guaranteed to call one of the thread-safe
+ methods which notify iteration to stop waiting (such as
+ idle_add or timeout_add).
+ These rules ensure that iteration is able to block until an event
+ arrives, without doing any busy waiting that would waste CPU time.
@type may_block: bool
@param may_block: if True the call may block waiting for an event
(default is True).
@@ -120,23 +195,32 @@ class EventLoop(object):
event_queue = self._poll_event_queue
event_handlers = self._poll_event_handlers
events_handled = 0
+ timeouts_checked = False
if not event_handlers:
- if self._run_timeouts():
- events_handled += 1
- if not event_handlers:
- if not events_handled and may_block and \
- self._timeout_interval is not None:
+ with self._thread_condition:
+ if self._run_timeouts():
+ events_handled += 1
+ timeouts_checked = True
+ if not event_handlers and not events_handled and may_block:
# Block so that we don't waste cpu time by looping too
# quickly. This makes EventLoop useful for code that needs
# to wait for timeout callbacks regardless of whether or
# not any IO handlers are currently registered.
- try:
- self._poll(timeout=self._timeout_interval)
- except StopIteration:
- pass
+ timeout = self._get_poll_timeout()
+ if timeout is None:
+ wait_timeout = None
+ else:
+ wait_timeout = float(timeout) / 1000
+ # NOTE: In order to avoid a possible infinite wait when
+ # wait_timeout is None, the previous _run_timeouts()
+ # call must have returned False *with* _thread_condition
+ # acquired. Otherwise, we would risk going to sleep after
+ # our only notify event has already passed.
+ self._thread_condition.wait(wait_timeout)
if self._run_timeouts():
events_handled += 1
+ timeouts_checked = True
# If any timeouts have executed, then return immediately,
# in order to minimize latency in termination of iteration
@@ -147,14 +231,18 @@ class EventLoop(object):
if not event_queue:
if may_block:
- if self._child_handlers:
- if self._timeout_interval is None:
- timeout = self._sigchld_interval
- else:
- timeout = min(self._sigchld_interval,
- self._timeout_interval)
- else:
- timeout = self._timeout_interval
+ timeout = self._get_poll_timeout()
+
+ # Avoid blocking for IO if there are any timeout
+ # or idle callbacks available to process.
+ if timeout != 0 and not timeouts_checked:
+ if self._run_timeouts():
+ events_handled += 1
+ timeouts_checked = True
+ if events_handled:
+ # Minimize latency for loops controlled
+ # by timeout or idle callback events.
+ timeout = 0
else:
timeout = 0
@@ -170,17 +258,37 @@ class EventLoop(object):
while event_queue:
events_handled += 1
f, event = event_queue.pop()
- x = event_handlers[f]
+ try:
+ x = event_handlers[f]
+ except KeyError:
+ # This is known to be triggered by the epoll
+ # implementation in qemu-user-1.2.2, and appears
+ # to be harmless (see bug #451326).
+ continue
if not x.callback(f, event, *x.args):
self.source_remove(x.source_id)
- # Run timeouts last, in order to minimize latency in
- # termination of iteration loops that they may control.
- if self._run_timeouts():
- events_handled += 1
+ if not timeouts_checked:
+ if self._run_timeouts():
+ events_handled += 1
+ timeouts_checked = True
return bool(events_handled)
+ def _get_poll_timeout(self):
+
+ with self._thread_rlock:
+ if self._child_handlers:
+ if self._timeout_interval is None:
+ timeout = self._sigchld_interval
+ else:
+ timeout = min(self._sigchld_interval,
+ self._timeout_interval)
+ else:
+ timeout = self._timeout_interval
+
+ return timeout
+
def child_watch_add(self, pid, callback, data=None):
"""
Like glib.child_watch_add(), sets callback to be called with the
@@ -201,18 +309,29 @@ class EventLoop(object):
@rtype: int
@return: an integer ID
"""
- self._event_handler_id += 1
- source_id = self._event_handler_id
+ source_id = self._new_source_id()
self._child_handlers[source_id] = self._child_callback_class(
callback=callback, data=data, pid=pid, source_id=source_id)
if self._use_signal:
if self._sigchld_read is None:
self._sigchld_read, self._sigchld_write = os.pipe()
+
fcntl.fcntl(self._sigchld_read, fcntl.F_SETFL,
fcntl.fcntl(self._sigchld_read,
fcntl.F_GETFL) | os.O_NONBLOCK)
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(self._sigchld_read, fcntl.F_SETFD,
+ fcntl.fcntl(self._sigchld_read,
+ fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
# The IO watch is dynamically registered and unregistered as
# needed, since we don't want to consider it as a valid source
# of events when there are no child listeners. It's important
@@ -276,22 +395,25 @@ class EventLoop(object):
"""
Like glib.idle_add(), if callback returns False it is
automatically removed from the list of event sources and will
- not be called again.
+ not be called again. This method is thread-safe.
@type callback: callable
@param callback: a function to call
@rtype: int
@return: an integer ID
"""
- self._event_handler_id += 1
- source_id = self._event_handler_id
- self._idle_callbacks[source_id] = self._idle_callback_class(
- args=args, callback=callback, source_id=source_id)
+ with self._thread_condition:
+ source_id = self._new_source_id()
+ self._idle_callbacks[source_id] = self._idle_callback_class(
+ args=args, callback=callback, source_id=source_id)
+ self._thread_condition.notify()
return source_id
def _run_idle_callbacks(self):
+ # assumes caller has acquired self._thread_rlock
if not self._idle_callbacks:
- return
+ return False
+ state_change = 0
# Iterate of our local list, since self._idle_callbacks can be
# modified during the exection of these callbacks.
for x in list(self._idle_callbacks.values()):
@@ -304,26 +426,32 @@ class EventLoop(object):
x.calling = True
try:
if not x.callback(*x.args):
+ state_change += 1
self.source_remove(x.source_id)
finally:
x.calling = False
+ return bool(state_change)
+
def timeout_add(self, interval, function, *args):
"""
Like glib.timeout_add(), interval argument is the number of
milliseconds between calls to your function, and your function
should return False to stop being called, or True to continue
being called. Any additional positional arguments given here
- are passed to your function when it's called.
+ are passed to your function when it's called. This method is
+ thread-safe.
"""
- self._event_handler_id += 1
- source_id = self._event_handler_id
- self._timeout_handlers[source_id] = \
- self._timeout_handler_class(
- interval=interval, function=function, args=args,
- source_id=source_id, timestamp=time.time())
- if self._timeout_interval is None or self._timeout_interval > interval:
- self._timeout_interval = interval
+ with self._thread_condition:
+ source_id = self._new_source_id()
+ self._timeout_handlers[source_id] = \
+ self._timeout_handler_class(
+ interval=interval, function=function, args=args,
+ source_id=source_id, timestamp=time.time())
+ if self._timeout_interval is None or \
+ self._timeout_interval > interval:
+ self._timeout_interval = interval
+ self._thread_condition.notify()
return source_id
def _run_timeouts(self):
@@ -333,37 +461,40 @@ class EventLoop(object):
if self._poll_child_processes():
calls += 1
- self._run_idle_callbacks()
-
- if not self._timeout_handlers:
- return bool(calls)
+ with self._thread_rlock:
- ready_timeouts = []
- current_time = time.time()
- for x in self._timeout_handlers.values():
- elapsed_seconds = current_time - x.timestamp
- # elapsed_seconds < 0 means the system clock has been adjusted
- if elapsed_seconds < 0 or \
- (x.interval - 1000 * elapsed_seconds) <= 0:
- ready_timeouts.append(x)
+ if self._run_idle_callbacks():
+ calls += 1
- # Iterate of our local list, since self._timeout_handlers can be
- # modified during the exection of these callbacks.
- for x in ready_timeouts:
- if x.source_id not in self._timeout_handlers:
- # it got cancelled while executing another timeout
- continue
- if x.calling:
- # don't call it recursively
- continue
- calls += 1
- x.calling = True
- try:
- x.timestamp = time.time()
- if not x.function(*x.args):
- self.source_remove(x.source_id)
- finally:
- x.calling = False
+ if not self._timeout_handlers:
+ return bool(calls)
+
+ ready_timeouts = []
+ current_time = time.time()
+ for x in self._timeout_handlers.values():
+ elapsed_seconds = current_time - x.timestamp
+ # elapsed_seconds < 0 means the system clock has been adjusted
+ if elapsed_seconds < 0 or \
+ (x.interval - 1000 * elapsed_seconds) <= 0:
+ ready_timeouts.append(x)
+
+ # Iterate of our local list, since self._timeout_handlers can be
+ # modified during the exection of these callbacks.
+ for x in ready_timeouts:
+ if x.source_id not in self._timeout_handlers:
+ # it got cancelled while executing another timeout
+ continue
+ if x.calling:
+ # don't call it recursively
+ continue
+ calls += 1
+ x.calling = True
+ try:
+ x.timestamp = time.time()
+ if not x.function(*x.args):
+ self.source_remove(x.source_id)
+ finally:
+ x.calling = False
return bool(calls)
@@ -385,8 +516,7 @@ class EventLoop(object):
"""
if f in self._poll_event_handlers:
raise AssertionError("fd %d is already registered" % f)
- self._event_handler_id += 1
- source_id = self._event_handler_id
+ source_id = self._new_source_id()
self._poll_event_handler_ids[source_id] = f
self._poll_event_handlers[f] = self._io_handler_class(
args=args, callback=callback, f=f, source_id=source_id)
@@ -406,18 +536,21 @@ class EventLoop(object):
self.source_remove(self._sigchld_src_id)
self._sigchld_src_id = None
return True
- idle_callback = self._idle_callbacks.pop(reg_id, None)
- if idle_callback is not None:
- return True
- timeout_handler = self._timeout_handlers.pop(reg_id, None)
- if timeout_handler is not None:
- if timeout_handler.interval == self._timeout_interval:
- if self._timeout_handlers:
- self._timeout_interval = \
- min(x.interval for x in self._timeout_handlers.values())
- else:
- self._timeout_interval = None
- return True
+
+ with self._thread_rlock:
+ idle_callback = self._idle_callbacks.pop(reg_id, None)
+ if idle_callback is not None:
+ return True
+ timeout_handler = self._timeout_handlers.pop(reg_id, None)
+ if timeout_handler is not None:
+ if timeout_handler.interval == self._timeout_interval:
+ if self._timeout_handlers:
+ self._timeout_interval = min(x.interval
+ for x in self._timeout_handlers.values())
+ else:
+ self._timeout_interval = None
+ return True
+
f = self._poll_event_handler_ids.pop(reg_id, None)
if f is None:
return False
@@ -467,7 +600,12 @@ def can_poll_device():
return _can_poll_device
p = select.poll()
- p.register(dev_null.fileno(), PollConstants.POLLIN)
+ try:
+ p.register(dev_null.fileno(), PollConstants.POLLIN)
+ except TypeError:
+ # Jython: Object 'org.python.core.io.FileIO@f8f175' is not watchable
+ _can_poll_device = False
+ return _can_poll_device
invalid_request = False
for f, event in p.poll():
@@ -488,3 +626,37 @@ def create_poll_instance():
if can_poll_device():
return select.poll()
return PollSelectAdapter()
+
+class _epoll_adapter(object):
+ """
+ Wraps a select.epoll instance in order to make it compatible
+ with select.poll instances. This is necessary since epoll instances
+ interpret timeout arguments differently. Note that the file descriptor
+ that is associated with an epoll instance will close automatically when
+ it is garbage collected, so it's not necessary to close it explicitly.
+ """
+ __slots__ = ('_epoll_obj',)
+
+ def __init__(self, epoll_obj):
+ self._epoll_obj = epoll_obj
+
+ def register(self, fd, *args):
+ self._epoll_obj.register(fd, *args)
+
+ def unregister(self, fd):
+ self._epoll_obj.unregister(fd)
+
+ def poll(self, *args):
+ if len(args) > 1:
+ raise TypeError(
+ "poll expected at most 2 arguments, got " + \
+ repr(1 + len(args)))
+ timeout = -1
+ if args:
+ timeout = args[0]
+ if timeout is None or timeout < 0:
+ timeout = -1
+ elif timeout != 0:
+ timeout = float(timeout) / 1000
+
+ return self._epoll_obj.poll(timeout)
diff --git a/pym/portage/util/_eventloop/PollSelectAdapter.py b/pym/portage/util/_eventloop/PollSelectAdapter.py
index 17e63d918..244788c57 100644
--- a/pym/portage/util/_eventloop/PollSelectAdapter.py
+++ b/pym/portage/util/_eventloop/PollSelectAdapter.py
@@ -64,7 +64,7 @@ class PollSelectAdapter(object):
if timeout is not None and timeout < 0:
timeout = None
if timeout is not None:
- select_args.append(timeout / 1000)
+ select_args.append(float(timeout) / 1000)
select_events = select.select(*select_args)
poll_events = []
diff --git a/pym/portage/util/_get_vm_info.py b/pym/portage/util/_get_vm_info.py
new file mode 100644
index 000000000..e8ad93805
--- /dev/null
+++ b/pym/portage/util/_get_vm_info.py
@@ -0,0 +1,80 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os
+import platform
+import subprocess
+
+from portage import _unicode_decode
+
+def get_vm_info():
+
+ vm_info = {}
+
+ if platform.system() == 'Linux':
+ try:
+ proc = subprocess.Popen(["free"],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ pass
+ else:
+ output = _unicode_decode(proc.communicate()[0])
+ if proc.wait() == os.EX_OK:
+ for line in output.splitlines():
+ line = line.split()
+ if len(line) < 2:
+ continue
+ if line[0] == "Mem:":
+ try:
+ vm_info["ram.total"] = int(line[1]) * 1024
+ except ValueError:
+ pass
+ if len(line) > 3:
+ try:
+ vm_info["ram.free"] = int(line[3]) * 1024
+ except ValueError:
+ pass
+ elif line[0] == "Swap:":
+ try:
+ vm_info["swap.total"] = int(line[1]) * 1024
+ except ValueError:
+ pass
+ if len(line) > 3:
+ try:
+ vm_info["swap.free"] = int(line[3]) * 1024
+ except ValueError:
+ pass
+
+ else:
+
+ try:
+ proc = subprocess.Popen(["sysctl", "-a"],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ pass
+ else:
+ output = _unicode_decode(proc.communicate()[0])
+ if proc.wait() == os.EX_OK:
+ for line in output.splitlines():
+ line = line.split(":", 1)
+ if len(line) != 2:
+ continue
+ line[1] = line[1].strip()
+ if line[0] == "hw.physmem":
+ try:
+ vm_info["ram.total"] = int(line[1])
+ except ValueError:
+ pass
+ elif line[0] == "vm.swap_total":
+ try:
+ vm_info["swap.total"] = int(line[1])
+ except ValueError:
+ pass
+ elif line[0] == "Free Memory Pages":
+ if line[1][-1] == "K":
+ try:
+ vm_info["ram.free"] = int(line[1][:-1]) * 1024
+ except ValueError:
+ pass
+
+ return vm_info
diff --git a/pym/portage/util/_info_files.py b/pym/portage/util/_info_files.py
new file mode 100644
index 000000000..fabf74b0f
--- /dev/null
+++ b/pym/portage/util/_info_files.py
@@ -0,0 +1,138 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import logging
+import re
+import stat
+import subprocess
+
+import portage
+from portage import os
+
+def chk_updated_info_files(root, infodirs, prev_mtimes):
+
+ if os.path.exists("/usr/bin/install-info"):
+ out = portage.output.EOutput()
+ regen_infodirs = []
+ for z in infodirs:
+ if z == '':
+ continue
+ inforoot = portage.util.normalize_path(root + z)
+ if os.path.isdir(inforoot) and \
+ not [x for x in os.listdir(inforoot) \
+ if x.startswith('.keepinfodir')]:
+ infomtime = os.stat(inforoot)[stat.ST_MTIME]
+ if inforoot not in prev_mtimes or \
+ prev_mtimes[inforoot] != infomtime:
+ regen_infodirs.append(inforoot)
+
+ if not regen_infodirs:
+ portage.util.writemsg_stdout("\n")
+ if portage.util.noiselimit >= 0:
+ out.einfo("GNU info directory index is up-to-date.")
+ else:
+ portage.util.writemsg_stdout("\n")
+ if portage.util.noiselimit >= 0:
+ out.einfo("Regenerating GNU info directory index...")
+
+ dir_extensions = ("", ".gz", ".bz2")
+ icount = 0
+ badcount = 0
+ errmsg = ""
+ for inforoot in regen_infodirs:
+ if inforoot == '':
+ continue
+
+ if not os.path.isdir(inforoot) or \
+ not os.access(inforoot, os.W_OK):
+ continue
+
+ file_list = os.listdir(inforoot)
+ file_list.sort()
+ dir_file = os.path.join(inforoot, "dir")
+ moved_old_dir = False
+ processed_count = 0
+ for x in file_list:
+ if x.startswith(".") or \
+ os.path.isdir(os.path.join(inforoot, x)):
+ continue
+ if x.startswith("dir"):
+ skip = False
+ for ext in dir_extensions:
+ if x == "dir" + ext or \
+ x == "dir" + ext + ".old":
+ skip = True
+ break
+ if skip:
+ continue
+ if processed_count == 0:
+ for ext in dir_extensions:
+ try:
+ os.rename(dir_file + ext, dir_file + ext + ".old")
+ moved_old_dir = True
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ processed_count += 1
+ try:
+ proc = subprocess.Popen(
+ ['/usr/bin/install-info',
+ '--dir-file=%s' % os.path.join(inforoot, "dir"),
+ os.path.join(inforoot, x)],
+ env=dict(os.environ, LANG="C", LANGUAGE="C"),
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ myso = None
+ else:
+ myso = portage._unicode_decode(
+ proc.communicate()[0]).rstrip("\n")
+ proc.wait()
+ existsstr = "already exists, for file `"
+ if myso:
+ if re.search(existsstr, myso):
+ # Already exists... Don't increment the count for this.
+ pass
+ elif myso[:44] == "install-info: warning: no info dir entry in ":
+ # This info file doesn't contain a DIR-header: install-info produces this
+ # (harmless) warning (the --quiet switch doesn't seem to work).
+ # Don't increment the count for this.
+ pass
+ else:
+ badcount += 1
+ errmsg += myso + "\n"
+ icount += 1
+
+ if moved_old_dir and not os.path.exists(dir_file):
+ # We didn't generate a new dir file, so put the old file
+ # back where it was originally found.
+ for ext in dir_extensions:
+ try:
+ os.rename(dir_file + ext + ".old", dir_file + ext)
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+
+ # Clean dir.old cruft so that they don't prevent
+ # unmerge of otherwise empty directories.
+ for ext in dir_extensions:
+ try:
+ os.unlink(dir_file + ext + ".old")
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+
+ #update mtime so we can potentially avoid regenerating.
+ prev_mtimes[inforoot] = os.stat(inforoot)[stat.ST_MTIME]
+
+ if badcount:
+ out.eerror("Processed %d info files; %d errors." % \
+ (icount, badcount))
+ portage.util.writemsg_level(errmsg,
+ level=logging.ERROR, noiselevel=-1)
+ else:
+ if icount > 0 and portage.util.noiselimit >= 0:
+ out.einfo("Processed %d info files." % (icount,))
diff --git a/pym/portage/util/_path.py b/pym/portage/util/_path.py
new file mode 100644
index 000000000..6fbcb438c
--- /dev/null
+++ b/pym/portage/util/_path.py
@@ -0,0 +1,27 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import stat
+
+from portage import os
+from portage.exception import PermissionDenied
+
+def exists_raise_eaccess(path):
+ try:
+ os.stat(path)
+ except OSError as e:
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied("stat('%s')" % path)
+ return False
+ else:
+ return True
+
+def isdir_raise_eaccess(path):
+ try:
+ st = os.stat(path)
+ except OSError as e:
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied("stat('%s')" % path)
+ return False
+ else:
+ return stat.S_ISDIR(st.st_mode)
diff --git a/pym/portage/util/_urlopen.py b/pym/portage/util/_urlopen.py
index 307624bc4..4cfe183b1 100644
--- a/pym/portage/util/_urlopen.py
+++ b/pym/portage/util/_urlopen.py
@@ -1,7 +1,11 @@
-# Copyright 2012 Gentoo Foundation
+# Copyright 2012-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+import io
import sys
+from datetime import datetime
+from time import mktime
+from email.utils import formatdate, parsedate
try:
from urllib.request import urlopen as _urlopen
@@ -14,29 +18,75 @@ except ImportError:
import urllib2 as urllib_request
from urllib import splituser as urllib_parse_splituser
-def urlopen(url):
- try:
- return _urlopen(url)
- except SystemExit:
- raise
- except Exception:
- if sys.hexversion < 0x3000000:
- raise
- parse_result = urllib_parse.urlparse(url)
- if parse_result.scheme not in ("http", "https") or \
- not parse_result.username:
- raise
-
- return _new_urlopen(url)
-
-def _new_urlopen(url):
- # This is experimental code for bug #413983.
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ long = int
+
+# to account for the difference between TIMESTAMP of the index' contents
+# and the file-'mtime'
+TIMESTAMP_TOLERANCE = 5
+
+def urlopen(url, if_modified_since=None):
parse_result = urllib_parse.urlparse(url)
- netloc = urllib_parse_splituser(parse_result.netloc)[1]
- url = urllib_parse.urlunparse((parse_result.scheme, netloc, parse_result.path, parse_result.params, parse_result.query, parse_result.fragment))
- password_manager = urllib_request.HTTPPasswordMgrWithDefaultRealm()
- if parse_result.username is not None:
- password_manager.add_password(None, url, parse_result.username, parse_result.password)
- auth_handler = urllib_request.HTTPBasicAuthHandler(password_manager)
- opener = urllib_request.build_opener(auth_handler)
- return opener.open(url)
+ if parse_result.scheme not in ("http", "https"):
+ return _urlopen(url)
+ else:
+ netloc = urllib_parse_splituser(parse_result.netloc)[1]
+ url = urllib_parse.urlunparse((parse_result.scheme, netloc, parse_result.path, parse_result.params, parse_result.query, parse_result.fragment))
+ password_manager = urllib_request.HTTPPasswordMgrWithDefaultRealm()
+ request = urllib_request.Request(url)
+ request.add_header('User-Agent', 'Gentoo Portage')
+ if if_modified_since:
+ request.add_header('If-Modified-Since', _timestamp_to_http(if_modified_since))
+ if parse_result.username is not None:
+ password_manager.add_password(None, url, parse_result.username, parse_result.password)
+ auth_handler = CompressedResponseProcessor(password_manager)
+ opener = urllib_request.build_opener(auth_handler)
+ hdl = opener.open(request)
+ if hdl.headers.get('last-modified', ''):
+ try:
+ add_header = hdl.headers.add_header
+ except AttributeError:
+ # Python 2
+ add_header = hdl.headers.addheader
+ add_header('timestamp', _http_to_timestamp(hdl.headers.get('last-modified')))
+ return hdl
+
+def _timestamp_to_http(timestamp):
+ dt = datetime.fromtimestamp(float(long(timestamp)+TIMESTAMP_TOLERANCE))
+ stamp = mktime(dt.timetuple())
+ return formatdate(timeval=stamp, localtime=False, usegmt=True)
+
+def _http_to_timestamp(http_datetime_string):
+ tuple = parsedate(http_datetime_string)
+ timestamp = mktime(tuple)
+ return str(long(timestamp))
+
+class CompressedResponseProcessor(urllib_request.HTTPBasicAuthHandler):
+ # Handler for compressed responses.
+
+ def http_request(self, req):
+ req.add_header('Accept-Encoding', 'bzip2,gzip,deflate')
+ return req
+ https_request = http_request
+
+ def http_response(self, req, response):
+ decompressed = None
+ if response.headers.get('content-encoding') == 'bzip2':
+ import bz2
+ decompressed = io.BytesIO(bz2.decompress(response.read()))
+ elif response.headers.get('content-encoding') == 'gzip':
+ from gzip import GzipFile
+ decompressed = GzipFile(fileobj=io.BytesIO(response.read()), mode='r')
+ elif response.headers.get('content-encoding') == 'deflate':
+ import zlib
+ try:
+ decompressed = io.BytesIO(zlib.decompress(response.read()))
+ except zlib.error: # they ignored RFC1950
+ decompressed = io.BytesIO(zlib.decompress(response.read(), -zlib.MAX_WBITS))
+ if decompressed:
+ old_response = response
+ response = urllib_request.addinfourl(decompressed, old_response.headers, old_response.url, old_response.code)
+ response.msg = old_response.msg
+ return response
+ https_response = http_response
diff --git a/pym/portage/util/digraph.py b/pym/portage/util/digraph.py
index f3ae658c9..4a9cb43b6 100644
--- a/pym/portage/util/digraph.py
+++ b/pym/portage/util/digraph.py
@@ -1,12 +1,13 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = ['digraph']
from collections import deque
import sys
-from portage import _unicode_decode
from portage.util import writemsg
class digraph(object):
@@ -16,24 +17,24 @@ class digraph(object):
def __init__(self):
"""Create an empty digraph"""
-
+
# { node : ( { child : priority } , { parent : priority } ) }
self.nodes = {}
self.order = []
def add(self, node, parent, priority=0):
"""Adds the specified node with the specified parent.
-
+
If the dep is a soft-dep and the node already has a hard
relationship to the parent, the relationship is left as hard."""
-
+
if node not in self.nodes:
self.nodes[node] = ({}, {}, node)
self.order.append(node)
-
+
if not parent:
return
-
+
if parent not in self.nodes:
self.nodes[parent] = ({}, {}, parent)
self.order.append(parent)
@@ -46,19 +47,29 @@ class digraph(object):
priorities.append(priority)
priorities.sort()
+ def discard(self, node):
+ """
+ Like remove(), except it doesn't raises KeyError if the
+ node doesn't exist.
+ """
+ try:
+ self.remove(node)
+ except KeyError:
+ pass
+
def remove(self, node):
"""Removes the specified node from the digraph, also removing
and ties to other nodes in the digraph. Raises KeyError if the
node doesn't exist."""
-
+
if node not in self.nodes:
raise KeyError(node)
-
+
for parent in self.nodes[node][1]:
del self.nodes[parent][0][node]
for child in self.nodes[node][0]:
del self.nodes[child][1][node]
-
+
del self.nodes[node]
self.order.remove(node)
@@ -157,10 +168,10 @@ class digraph(object):
def leaf_nodes(self, ignore_priority=None):
"""Return all nodes that have no children
-
+
If ignore_soft_deps is True, soft deps are not counted as
children in calculations."""
-
+
leaf_nodes = []
if ignore_priority is None:
for node in self.order:
@@ -191,10 +202,10 @@ class digraph(object):
def root_nodes(self, ignore_priority=None):
"""Return all nodes that have no parents.
-
+
If ignore_soft_deps is True, soft deps are not counted as
parents in calculations."""
-
+
root_nodes = []
if ignore_priority is None:
for node in self.order:
@@ -272,18 +283,17 @@ class digraph(object):
def debug_print(self):
def output(s):
writemsg(s, noiselevel=-1)
- # Use _unicode_decode() to force unicode format
+ # Use unicode_literals to force unicode format
# strings for python-2.x safety, ensuring that
# node.__unicode__() is used when necessary.
for node in self.nodes:
- output(_unicode_decode("%s ") % (node,))
+ output("%s " % (node,))
if self.nodes[node][0]:
output("depends on\n")
else:
output("(no children)\n")
for child, priorities in self.nodes[node][0].items():
- output(_unicode_decode(" %s (%s)\n") % \
- (child, priorities[-1],))
+ output(" %s (%s)\n" % (child, priorities[-1],))
def bfs(self, start, ignore_priority=None):
if start not in self:
diff --git a/pym/portage/util/env_update.py b/pym/portage/util/env_update.py
index ace4077f7..c0a93a83b 100644
--- a/pym/portage/util/env_update.py
+++ b/pym/portage/util/env_update.py
@@ -1,16 +1,17 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
__all__ = ['env_update']
import errno
+import glob
import io
import stat
import sys
import time
import portage
-from portage import os, _encodings, _unicode_encode
+from portage import os, _encodings, _unicode_decode, _unicode_encode
from portage.checksum import prelink_capable
from portage.data import ostype
from portage.exception import ParseError
@@ -23,6 +24,7 @@ from portage.dbapi.vartree import vartree
from portage.package.ebuild.config import config
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
long = int
def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None,
@@ -88,7 +90,8 @@ def _env_update(makelinks, target_root, prev_mtimes, contents, env,
eprefix = settings.get("EPREFIX", "")
eprefix_lstrip = eprefix.lstrip(os.sep)
- envd_dir = os.path.join(target_root, eprefix_lstrip, "etc", "env.d")
+ eroot = normalize_path(os.path.join(target_root, eprefix_lstrip)).rstrip(os.sep) + os.sep
+ envd_dir = os.path.join(eroot, "etc", "env.d")
ensure_dirs(envd_dir, mode=0o755)
fns = listdir(envd_dir, EmptyOnError=1)
fns.sort()
@@ -164,15 +167,14 @@ def _env_update(makelinks, target_root, prev_mtimes, contents, env,
they won't be overwritten by this dict.update call."""
env.update(myconfig)
- ldsoconf_path = os.path.join(
- target_root, eprefix_lstrip, "etc", "ld.so.conf")
+ ldsoconf_path = os.path.join(eroot, "etc", "ld.so.conf")
try:
myld = io.open(_unicode_encode(ldsoconf_path,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['content'], errors='replace')
- myldlines=myld.readlines()
+ myldlines = myld.readlines()
myld.close()
- oldld=[]
+ oldld = []
for x in myldlines:
#each line has at least one char (a newline)
if x[:1] == "#":
@@ -193,20 +195,34 @@ def _env_update(makelinks, target_root, prev_mtimes, contents, env,
myfd.write(x + "\n")
myfd.close()
+ potential_lib_dirs = set()
+ for lib_dir_glob in ('usr/lib*', 'lib*'):
+ x = os.path.join(eroot, lib_dir_glob)
+ for y in glob.glob(_unicode_encode(x,
+ encoding=_encodings['fs'], errors='strict')):
+ try:
+ y = _unicode_decode(y,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ if os.path.basename(y) != 'libexec':
+ potential_lib_dirs.add(y[len(eroot):])
+
# Update prelink.conf if we are prelink-enabled
if prelink_capable:
- newprelink = atomic_ofstream(os.path.join(
- target_root, eprefix_lstrip, "etc", "prelink.conf"))
+ prelink_d = os.path.join(eroot, 'etc', 'prelink.conf.d')
+ ensure_dirs(prelink_d)
+ newprelink = atomic_ofstream(os.path.join(prelink_d, 'portage.conf'))
newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
newprelink.write("# contents of /etc/env.d directory\n")
- for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]:
- newprelink.write("-l %s\n" % (x,));
- prelink_paths = []
- prelink_paths += specials.get("LDPATH", [])
- prelink_paths += specials.get("PATH", [])
- prelink_paths += specials.get("PRELINK_PATH", [])
- prelink_path_mask = specials.get("PRELINK_PATH_MASK", [])
+ for x in sorted(potential_lib_dirs) + ['bin', 'sbin']:
+ newprelink.write('-l /%s\n' % (x,));
+ prelink_paths = set()
+ prelink_paths |= set(specials.get('LDPATH', []))
+ prelink_paths |= set(specials.get('PATH', []))
+ prelink_paths |= set(specials.get('PRELINK_PATH', []))
+ prelink_path_mask = specials.get('PRELINK_PATH_MASK', [])
for x in prelink_paths:
if not x:
continue
@@ -227,12 +243,26 @@ def _env_update(makelinks, target_root, prev_mtimes, contents, env,
newprelink.write("-b %s\n" % (x,))
newprelink.close()
+ # Migration code path. If /etc/prelink.conf was generated by us, then
+ # point it to the new stuff until the prelink package re-installs.
+ prelink_conf = os.path.join(eroot, 'etc', 'prelink.conf')
+ try:
+ with open(_unicode_encode(prelink_conf,
+ encoding=_encodings['fs'], errors='strict'), 'rb') as f:
+ if f.readline() == b'# prelink.conf autogenerated by env-update; make all changes to\n':
+ f = atomic_ofstream(prelink_conf)
+ f.write('-c /etc/prelink.conf.d/*.conf\n')
+ f.close()
+ except IOError as e:
+ if e.errno != errno.ENOENT:
+ raise
+
current_time = long(time.time())
mtime_changed = False
+
lib_dirs = set()
- for lib_dir in set(specials["LDPATH"] + \
- ['usr/lib','usr/lib64','usr/lib32','lib','lib64','lib32']):
- x = os.path.join(target_root, eprefix_lstrip, lib_dir.lstrip(os.sep))
+ for lib_dir in set(specials['LDPATH']) | potential_lib_dirs:
+ x = os.path.join(eroot, lib_dir.lstrip(os.sep))
try:
newldpathtime = os.stat(x)[stat.ST_MTIME]
lib_dirs.add(normalize_path(x))
@@ -292,7 +322,7 @@ def _env_update(makelinks, target_root, prev_mtimes, contents, env,
writemsg_level(_(">>> Regenerating %setc/ld.so.cache...\n") % \
(target_root,))
os.system("cd / ; %s -X -r '%s'" % (ldconfig, target_root))
- elif ostype in ("FreeBSD","DragonFly"):
+ elif ostype in ("FreeBSD", "DragonFly"):
writemsg_level(_(">>> Regenerating %svar/run/ld-elf.so.hints...\n") % \
target_root)
os.system(("cd / ; %s -elf -i " + \
@@ -308,11 +338,10 @@ def _env_update(makelinks, target_root, prev_mtimes, contents, env,
cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"
#create /etc/profile.env for bash support
- outfile = atomic_ofstream(os.path.join(
- target_root, eprefix_lstrip, "etc", "profile.env"))
+ outfile = atomic_ofstream(os.path.join(eroot, "etc", "profile.env"))
outfile.write(penvnotice)
- env_keys = [ x for x in env if x != "LDPATH" ]
+ env_keys = [x for x in env if x != "LDPATH"]
env_keys.sort()
for k in env_keys:
v = env[k]
@@ -323,8 +352,7 @@ def _env_update(makelinks, target_root, prev_mtimes, contents, env,
outfile.close()
#create /etc/csh.env for (t)csh support
- outfile = atomic_ofstream(os.path.join(
- target_root, eprefix_lstrip, "etc", "csh.env"))
+ outfile = atomic_ofstream(os.path.join(eroot, "etc", "csh.env"))
outfile.write(cenvnotice)
for x in env_keys:
outfile.write("setenv %s '%s'\n" % (x, env[x]))
diff --git a/pym/portage/util/lafilefixer.py b/pym/portage/util/lafilefixer.py
index 54ff20de5..2562d9a77 100644
--- a/pym/portage/util/lafilefixer.py
+++ b/pym/portage/util/lafilefixer.py
@@ -11,7 +11,7 @@ from portage.exception import InvalidData
# This an re-implementaion of dev-util/lafilefixer-0.5.
# rewrite_lafile() takes the contents of an lafile as a string
# It then parses the dependency_libs and inherited_linker_flags
-# entries.
+# entries.
# We insist on dependency_libs being present. inherited_linker_flags
# is optional.
# There are strict rules about the syntax imposed by libtool's libltdl.
@@ -21,7 +21,7 @@ from portage.exception import InvalidData
# lafilefixer does).
# What it does:
# * Replaces all .la files with absolut paths in dependency_libs with
-# corresponding -l* and -L* entries
+# corresponding -l* and -L* entries
# (/usr/lib64/libfoo.la -> -L/usr/lib64 -lfoo)
# * Moves various flags (see flag_re below) to inherited_linker_flags,
# if such an entry was present.
@@ -36,7 +36,7 @@ from portage.exception import InvalidData
dep_libs_re = re.compile(b"dependency_libs='(?P<value>[^']*)'$")
inh_link_flags_re = re.compile(b"inherited_linker_flags='(?P<value>[^']*)'$")
-#regexes for replacing stuff in -L entries.
+#regexes for replacing stuff in -L entries.
#replace 'X11R6/lib' and 'local/lib' with 'lib', no idea what's this about.
X11_local_sub = re.compile(b"X11R6/lib|local/lib")
#get rid of the '..'
@@ -129,11 +129,11 @@ def rewrite_lafile(contents):
#This allows us to place all -L entries at the beginning
#of 'dependency_libs'.
ladir = dep_libs_entry
-
+
ladir = X11_local_sub.sub(b"lib", ladir)
ladir = pkgconfig_sub1.sub(b"usr", ladir)
ladir = pkgconfig_sub2.sub(b"\g<usrlib>", ladir)
-
+
if ladir not in libladir:
libladir.append(ladir)
diff --git a/pym/portage/util/listdir.py b/pym/portage/util/listdir.py
index c2628cbfe..2012e145f 100644
--- a/pym/portage/util/listdir.py
+++ b/pym/portage/util/listdir.py
@@ -1,36 +1,33 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
__all__ = ['cacheddir', 'listdir']
import errno
import stat
-import time
+import sys
+
+if sys.hexversion < 0x3000000:
+ from itertools import izip as zip
from portage import os
+from portage.const import VCS_DIRS
from portage.exception import DirectoryNotFound, PermissionDenied, PortageException
-from portage.util import normalize_path, writemsg
-
-_ignorecvs_dirs = ('CVS', 'RCS', 'SCCS', '.svn', '.git')
+from portage.util import normalize_path
+
+# The global dircache is no longer supported, since it could
+# be a memory leak for API consumers. Any cacheddir callers
+# should use higher-level caches instead, when necessary.
+# TODO: Remove dircache variable after stable portage does
+# not use is (keep it for now, in case API consumers clear
+# it manually).
dircache = {}
-cacheHit = 0
-cacheMiss = 0
-cacheStale = 0
def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True):
- global cacheHit,cacheMiss,cacheStale
mypath = normalize_path(my_original_path)
- if mypath in dircache:
- cacheHit += 1
- cached_mtime, list, ftype = dircache[mypath]
- else:
- cacheMiss += 1
- cached_mtime, list, ftype = -1, [], []
try:
pathstat = os.stat(mypath)
- if stat.S_ISDIR(pathstat[stat.ST_MODE]):
- mtime = pathstat.st_mtime
- else:
+ if not stat.S_ISDIR(pathstat.st_mode):
raise DirectoryNotFound(mypath)
except EnvironmentError as e:
if e.errno == PermissionDenied.errno:
@@ -39,19 +36,16 @@ def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymli
return [], []
except PortageException:
return [], []
- # Python retuns mtime in seconds, so if it was changed in the last few seconds, it could be invalid
- if mtime != cached_mtime or time.time() - mtime < 4:
- if mypath in dircache:
- cacheStale += 1
+ else:
try:
- list = os.listdir(mypath)
+ fpaths = os.listdir(mypath)
except EnvironmentError as e:
if e.errno != errno.EACCES:
raise
del e
raise PermissionDenied(mypath)
ftype = []
- for x in list:
+ for x in fpaths:
try:
if followSymlinks:
pathstat = os.stat(mypath+"/"+x)
@@ -68,23 +62,22 @@ def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymli
ftype.append(3)
except (IOError, OSError):
ftype.append(3)
- dircache[mypath] = mtime, list, ftype
-
- ret_list = []
- ret_ftype = []
- for x in range(0, len(list)):
- if list[x] in ignorelist:
- pass
- elif ignorecvs:
- if list[x][:2] != ".#" and \
- not (ftype[x] == 1 and list[x] in _ignorecvs_dirs):
- ret_list.append(list[x])
- ret_ftype.append(ftype[x])
- else:
- ret_list.append(list[x])
- ret_ftype.append(ftype[x])
-
- writemsg("cacheddirStats: H:%d/M:%d/S:%d\n" % (cacheHit, cacheMiss, cacheStale),10)
+
+ if ignorelist or ignorecvs:
+ ret_list = []
+ ret_ftype = []
+ for file_path, file_type in zip(fpaths, ftype):
+ if file_path in ignorelist:
+ pass
+ elif ignorecvs:
+ if file_path[:2] != ".#" and \
+ not (file_type == 1 and file_path in VCS_DIRS):
+ ret_list.append(file_path)
+ ret_ftype.append(file_type)
+ else:
+ ret_list = fpaths
+ ret_ftype = ftype
+
return ret_list, ret_ftype
def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelist=[], followSymlinks=True,
@@ -98,7 +91,7 @@ def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelis
@type recursive: Boolean
@param filesonly; Only return files, not more directories
@type filesonly: Boolean
- @param ignorecvs: Ignore CVS directories ('CVS','SCCS','.svn','.git')
+ @param ignorecvs: Ignore VCS directories
@type ignorecvs: Boolean
@param ignorelist: List of filenames/directories to exclude
@type ignorelist: List
@@ -112,40 +105,35 @@ def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelis
@return: A list of files and directories (or just files or just directories) or an empty list.
"""
- list, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks)
+ fpaths, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks)
- if list is None:
- list=[]
+ if fpaths is None:
+ fpaths = []
if ftype is None:
- ftype=[]
+ ftype = []
if not (filesonly or dirsonly or recursive):
- return list
+ return fpaths
if recursive:
- x=0
- while x<len(ftype):
- if ftype[x] == 1:
- l,f = cacheddir(mypath+"/"+list[x], ignorecvs, ignorelist, EmptyOnError,
- followSymlinks)
-
- l=l[:]
- for y in range(0,len(l)):
- l[y]=list[x]+"/"+l[y]
- list=list+l
- ftype=ftype+f
- x+=1
+ stack = list(zip(fpaths, ftype))
+ fpaths = []
+ ftype = []
+ while stack:
+ file_path, file_type = stack.pop()
+ fpaths.append(file_path)
+ ftype.append(file_type)
+ if file_type == 1:
+ subdir_list, subdir_types = cacheddir(
+ os.path.join(mypath, file_path), ignorecvs,
+ ignorelist, EmptyOnError, followSymlinks)
+ stack.extend((os.path.join(file_path, x), x_type)
+ for x, x_type in zip(subdir_list, subdir_types))
+
if filesonly:
- rlist=[]
- for x in range(0,len(ftype)):
- if ftype[x]==0:
- rlist=rlist+[list[x]]
+ fpaths = [x for x, x_type in zip(fpaths, ftype) if x_type == 0]
+
elif dirsonly:
- rlist = []
- for x in range(0, len(ftype)):
- if ftype[x] == 1:
- rlist = rlist + [list[x]]
- else:
- rlist=list
+ fpaths = [x for x, x_type in zip(fpaths, ftype) if x_type == 1]
- return rlist
+ return fpaths
diff --git a/pym/portage/util/movefile.py b/pym/portage/util/movefile.py
index 10577b565..452e77f0d 100644
--- a/pym/portage/util/movefile.py
+++ b/pym/portage/util/movefile.py
@@ -1,18 +1,22 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = ['movefile']
import errno
+import fnmatch
import os as _os
import shutil as _shutil
import stat
+import sys
import subprocess
import textwrap
import portage
from portage import bsd_chflags, _encodings, _os_overrides, _selinux, \
- _unicode_decode, _unicode_encode, _unicode_func_wrapper,\
+ _unicode_decode, _unicode_encode, _unicode_func_wrapper, \
_unicode_module_wrapper
from portage.const import MOVE_BINARY
from portage.exception import OperationNotSupported
@@ -24,43 +28,113 @@ def _apply_stat(src_stat, dest):
_os.chown(dest, src_stat.st_uid, src_stat.st_gid)
_os.chmod(dest, stat.S_IMODE(src_stat.st_mode))
+_xattr_excluder_cache = {}
+
+def _get_xattr_excluder(pattern):
+
+ try:
+ value = _xattr_excluder_cache[pattern]
+ except KeyError:
+ value = _xattr_excluder(pattern)
+ _xattr_excluder_cache[pattern] = value
+
+ return value
+
+class _xattr_excluder(object):
+
+ __slots__ = ('_pattern_split',)
+
+ def __init__(self, pattern):
+
+ if pattern is None:
+ self._pattern_split = None
+ else:
+ pattern = pattern.split()
+ if not pattern:
+ self._pattern_split = None
+ else:
+ pattern.sort()
+ self._pattern_split = tuple(pattern)
+
+ def __call__(self, attr):
+
+ if self._pattern_split is None:
+ return False
+
+ match = fnmatch.fnmatch
+ for x in self._pattern_split:
+ if match(attr, x):
+ return True
+
+ return False
+
if hasattr(_os, "getxattr"):
# Python >=3.3 and GNU/Linux
- def _copyxattr(src, dest):
- for attr in _os.listxattr(src):
+ def _copyxattr(src, dest, exclude=None):
+
+ try:
+ attrs = _os.listxattr(src)
+ except OSError as e:
+ if e.errno != OperationNotSupported.errno:
+ raise
+ attrs = ()
+ if attrs:
+ if exclude is not None and isinstance(attrs[0], bytes):
+ exclude = exclude.encode(_encodings['fs'])
+ exclude = _get_xattr_excluder(exclude)
+
+ for attr in attrs:
+ if exclude(attr):
+ continue
try:
_os.setxattr(dest, attr, _os.getxattr(src, attr))
raise_exception = False
except OSError:
raise_exception = True
if raise_exception:
- raise OperationNotSupported("Filesystem containing file '%s' does not support extended attributes" % dest)
+ raise OperationNotSupported(_("Filesystem containing file '%s' "
+ "does not support extended attribute '%s'") %
+ (_unicode_decode(dest), _unicode_decode(attr)))
else:
try:
import xattr
except ImportError:
xattr = None
if xattr is not None:
- def _copyxattr(src, dest):
- for attr in xattr.list(src):
+ def _copyxattr(src, dest, exclude=None):
+
+ try:
+ attrs = xattr.list(src)
+ except IOError as e:
+ if e.errno != OperationNotSupported.errno:
+ raise
+ attrs = ()
+
+ if attrs:
+ if exclude is not None and isinstance(attrs[0], bytes):
+ exclude = exclude.encode(_encodings['fs'])
+ exclude = _get_xattr_excluder(exclude)
+
+ for attr in attrs:
+ if exclude(attr):
+ continue
try:
xattr.set(dest, attr, xattr.get(src, attr))
raise_exception = False
except IOError:
raise_exception = True
if raise_exception:
- raise OperationNotSupported("Filesystem containing file '%s' does not support extended attributes" % dest)
+ raise OperationNotSupported(_("Filesystem containing file '%s' "
+ "does not support extended attribute '%s'") %
+ (_unicode_decode(dest), _unicode_decode(attr)))
else:
- _devnull = open("/dev/null", "wb")
try:
- subprocess.call(["getfattr", "--version"], stdout=_devnull)
- subprocess.call(["setfattr", "--version"], stdout=_devnull)
- _has_getfattr_and_setfattr = True
+ with open(_os.devnull, 'wb') as f:
+ subprocess.call(["getfattr", "--version"], stdout=f)
+ subprocess.call(["setfattr", "--version"], stdout=f)
except OSError:
- _has_getfattr_and_setfattr = False
- _devnull.close()
- if _has_getfattr_and_setfattr:
- def _copyxattr(src, dest):
+ def _copyxattr(src, dest, exclude=None):
+ # TODO: implement exclude
getfattr_process = subprocess.Popen(["getfattr", "-d", "--absolute-names", src], stdout=subprocess.PIPE)
getfattr_process.wait()
extended_attributes = getfattr_process.stdout.readlines()
@@ -72,14 +146,15 @@ else:
if setfattr_process.returncode != 0:
raise OperationNotSupported("Filesystem containing file '%s' does not support extended attributes" % dest)
else:
- def _copyxattr(src, dest):
+ def _copyxattr(src, dest, exclude=None):
pass
def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
hardlink_candidates=None, encoding=_encodings['fs']):
"""moves a file from src to dest, preserving all permissions and attributes; mtime will
- be preserved even when moving across filesystems. Returns true on success and false on
- failure. Move is atomic."""
+ be preserved even when moving across filesystems. Returns mtime as integer on success
+ and None on failure. mtime is expressed in seconds in Python <3.3 and nanoseconds in
+ Python >=3.3. Move is atomic."""
if mysettings is None:
mysettings = portage.settings
@@ -102,22 +177,22 @@ def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
try:
if not sstat:
- sstat=os.lstat(src)
+ sstat = os.lstat(src)
except SystemExit as e:
raise
except Exception as e:
writemsg("!!! %s\n" % _("Stating source file failed... movefile()"),
noiselevel=-1)
- writemsg(_unicode_decode("!!! %s\n") % (e,), noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
return None
- destexists=1
+ destexists = 1
try:
- dstat=os.lstat(dest)
+ dstat = os.lstat(dest)
except (OSError, IOError):
- dstat=os.lstat(os.path.dirname(dest))
- destexists=0
+ dstat = os.lstat(os.path.dirname(dest))
+ destexists = 0
if bsd_chflags:
if destexists and dstat.st_flags != 0:
@@ -132,7 +207,7 @@ def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
if stat.S_ISLNK(dstat[stat.ST_MODE]):
try:
os.unlink(dest)
- destexists=0
+ destexists = 0
except SystemExit as e:
raise
except Exception as e:
@@ -140,7 +215,7 @@ def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
if stat.S_ISLNK(sstat[stat.ST_MODE]):
try:
- target=os.readlink(src)
+ target = os.readlink(src)
if mysettings and "D" in mysettings and \
target.startswith(mysettings["D"]):
target = target[len(mysettings["D"])-1:]
@@ -159,17 +234,32 @@ def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
if e.errno not in (errno.ENOENT, errno.EEXIST) or \
target != os.readlink(dest):
raise
- lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
- # utime() only works on the target of a symlink, so it's not
- # possible to perserve mtime on symlinks.
- return os.lstat(dest)[stat.ST_MTIME]
+ lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID])
+
+ try:
+ _os.unlink(src_bytes)
+ except OSError:
+ pass
+
+ if sys.hexversion >= 0x3030000:
+ try:
+ os.utime(dest, ns=(sstat.st_mtime_ns, sstat.st_mtime_ns), follow_symlinks=False)
+ except NotImplementedError:
+ # utimensat() and lutimes() missing in libc.
+ return os.stat(dest, follow_symlinks=False).st_mtime_ns
+ else:
+ return sstat.st_mtime_ns
+ else:
+ # utime() in Python <3.3 only works on the target of a symlink, so it's not
+ # possible to preserve mtime on symlinks.
+ return os.lstat(dest)[stat.ST_MTIME]
except SystemExit as e:
raise
except Exception as e:
writemsg("!!! %s\n" % _("failed to properly create symlink:"),
noiselevel=-1)
writemsg("!!! %s -> %s\n" % (dest, target), noiselevel=-1)
- writemsg(_unicode_decode("!!! %s\n") % (e,), noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
return None
hardlinked = False
@@ -204,9 +294,13 @@ def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
writemsg("!!! %s\n" % (e,), noiselevel=-1)
return None
hardlinked = True
+ try:
+ _os.unlink(src_bytes)
+ except OSError:
+ pass
break
- renamefailed=1
+ renamefailed = 1
if hardlinked:
renamefailed = False
if not hardlinked and (selinux_enabled or sstat.st_dev == dstat.st_dev):
@@ -214,14 +308,14 @@ def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
if selinux_enabled:
selinux.rename(src, dest)
else:
- os.rename(src,dest)
- renamefailed=0
+ os.rename(src, dest)
+ renamefailed = 0
except OSError as e:
if e.errno != errno.EXDEV:
# Some random error.
writemsg("!!! %s\n" % _("Failed to move %(src)s to %(dest)s") %
{"src": src, "dest": dest}, noiselevel=-1)
- writemsg(_unicode_decode("!!! %s\n") % (e,), noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
return None
# Invalid cross-device-link 'bind' mounted or actually Cross-Device
if renamefailed:
@@ -233,7 +327,8 @@ def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
_copyfile(src_bytes, dest_tmp_bytes)
if xattr_enabled:
try:
- _copyxattr(src_bytes, dest_tmp_bytes)
+ _copyxattr(src_bytes, dest_tmp_bytes,
+ exclude=mysettings.get("PORTAGE_XATTR_EXCLUDE", "security.* system.nfs4_acl"))
except SystemExit:
raise
except:
@@ -252,7 +347,7 @@ def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
except Exception as e:
writemsg("!!! %s\n" % _('copy %(src)s -> %(dest)s failed.') %
{"src": src, "dest": dest}, noiselevel=-1)
- writemsg(_unicode_decode("!!! %s\n") % (e,), noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
return None
else:
#we don't yet handle special, so we need to fall back to /bin/mv
@@ -265,35 +360,54 @@ def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
writemsg("!!! %s\n" % a, noiselevel=-1)
return None # failure
- # Always use stat_obj[stat.ST_MTIME] for the integral timestamp which
- # is returned, since the stat_obj.st_mtime float attribute rounds *up*
+ # In Python <3.3 always use stat_obj[stat.ST_MTIME] for the integral timestamp
+ # which is returned, since the stat_obj.st_mtime float attribute rounds *up*
# if the nanosecond part of the timestamp is 999999881 ns or greater.
try:
if hardlinked:
- newmtime = os.stat(dest)[stat.ST_MTIME]
+ if sys.hexversion >= 0x3030000:
+ newmtime = os.stat(dest).st_mtime_ns
+ else:
+ newmtime = os.stat(dest)[stat.ST_MTIME]
else:
# Note: It is not possible to preserve nanosecond precision
# (supported in POSIX.1-2008 via utimensat) with the IEEE 754
# double precision float which only has a 53 bit significand.
if newmtime is not None:
- os.utime(dest, (newmtime, newmtime))
+ if sys.hexversion >= 0x3030000:
+ os.utime(dest, ns=(newmtime, newmtime))
+ else:
+ os.utime(dest, (newmtime, newmtime))
else:
- newmtime = sstat[stat.ST_MTIME]
+ if sys.hexversion >= 0x3030000:
+ newmtime = sstat.st_mtime_ns
+ else:
+ newmtime = sstat[stat.ST_MTIME]
if renamefailed:
- # If rename succeeded then timestamps are automatically
- # preserved with complete precision because the source
- # and destination inode are the same. Otherwise, round
- # down to the nearest whole second since python's float
- # st_mtime cannot be used to preserve the st_mtim.tv_nsec
- # field with complete precision. Note that we have to use
- # stat_obj[stat.ST_MTIME] here because the float
- # stat_obj.st_mtime rounds *up* sometimes.
- os.utime(dest, (newmtime, newmtime))
+ if sys.hexversion >= 0x3030000:
+ # If rename succeeded then timestamps are automatically
+ # preserved with complete precision because the source
+ # and destination inodes are the same. Otherwise, manually
+ # update timestamps with nanosecond precision.
+ os.utime(dest, ns=(newmtime, newmtime))
+ else:
+ # If rename succeeded then timestamps are automatically
+ # preserved with complete precision because the source
+ # and destination inodes are the same. Otherwise, round
+ # down to the nearest whole second since python's float
+ # st_mtime cannot be used to preserve the st_mtim.tv_nsec
+ # field with complete precision. Note that we have to use
+ # stat_obj[stat.ST_MTIME] here because the float
+ # stat_obj.st_mtime rounds *up* sometimes.
+ os.utime(dest, (newmtime, newmtime))
except OSError:
# The utime can fail here with EPERM even though the move succeeded.
# Instead of failing, use stat to return the mtime if possible.
try:
- newmtime = os.stat(dest)[stat.ST_MTIME]
+ if sys.hexversion >= 0x3030000:
+ newmtime = os.stat(dest).st_mtime_ns
+ else:
+ newmtime = os.stat(dest)[stat.ST_MTIME]
except OSError as e:
writemsg(_("!!! Failed to stat in movefile()\n"), noiselevel=-1)
writemsg("!!! %s\n" % dest, noiselevel=-1)
diff --git a/pym/portage/util/whirlpool.py b/pym/portage/util/whirlpool.py
index c696f6fc0..170ae73f8 100644
--- a/pym/portage/util/whirlpool.py
+++ b/pym/portage/util/whirlpool.py
@@ -639,6 +639,8 @@ def WhirlpoolInit(ctx):
return
def WhirlpoolAdd(source, sourceBits, ctx):
+ if not isinstance(source, bytes):
+ raise TypeError("Expected %s, got %s" % (bytes, type(source)))
if sys.hexversion < 0x3000000:
source = [ord(s)&0xff for s in source]
diff --git a/pym/portage/util/writeable_check.py b/pym/portage/util/writeable_check.py
new file mode 100644
index 000000000..e6ddce680
--- /dev/null
+++ b/pym/portage/util/writeable_check.py
@@ -0,0 +1,79 @@
+#-*- coding:utf-8 -*-
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+"""
+Methods to check whether Portage is going to write to read-only filesystems.
+Since the methods are not portable across different OSes, each OS needs its
+own method. To expand RO checking for different OSes, add a method which
+accepts a list of directories and returns a list of mounts which need to be
+remounted RW, then add "elif ostype == (the ostype value for your OS)" to
+get_ro_checker().
+"""
+from __future__ import unicode_literals
+
+import io
+import logging
+import re
+
+from portage import _encodings
+from portage.util import writemsg_level
+from portage.localization import _
+from portage.data import ostype
+
+
+def get_ro_checker():
+ """
+ Uses the system type to find an appropriate method for testing whether Portage
+ is going to write to any read-only filesystems.
+
+ @return:
+ 1. A method for testing for RO filesystems appropriate to the current system.
+ """
+ return _CHECKERS.get(ostype, empty_ro_checker)
+
+
+def linux_ro_checker(dir_list):
+ """
+ Use /proc/mounts to check that no directories installed by the ebuild are set
+ to be installed to a read-only filesystem.
+
+ @param dir_list: A list of directories installed by the ebuild.
+ @type dir_list: List
+ @return:
+ 1. A list of filesystems which are both set to be written to and are mounted
+ read-only, may be empty.
+ """
+ ro_filesystems = set()
+
+ try:
+ with io.open("/proc/mounts", mode='r', encoding=_encodings['content'],
+ errors='replace') as f:
+ roregex = re.compile(r'(\A|,)ro(\Z|,)')
+ for line in f:
+ if roregex.search(line.split(" ")[3].strip()) is not None:
+ romount = line.split(" ")[1].strip()
+ ro_filesystems.add(romount)
+
+ # If /proc/mounts can't be read, assume that there are no RO
+ # filesystems and return.
+ except EnvironmentError:
+ writemsg_level(_("!!! /proc/mounts cannot be read"),
+ level=logging.WARNING, noiselevel=-1)
+ return []
+
+ return set.intersection(ro_filesystems, set(dir_list))
+
+
+def empty_ro_checker(dir_list):
+ """
+ Always returns [], this is the fallback function if the system does not have
+ an ro_checker method defined.
+ """
+ return []
+
+
+# _CHECKERS is a map from ostype output to the appropriate function to return
+# in get_ro_checker.
+_CHECKERS = {
+ "Linux": linux_ro_checker,
+}
diff --git a/pym/portage/versions.py b/pym/portage/versions.py
index 27947532b..2c9fe5bda 100644
--- a/pym/portage/versions.py
+++ b/pym/portage/versions.py
@@ -1,7 +1,9 @@
# versions.py -- core Portage functionality
-# Copyright 1998-2012 Gentoo Foundation
+# Copyright 1998-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = [
'best', 'catpkgsplit', 'catsplit',
'cpv_getkey', 'cpv_getversion', 'cpv_sort_key', 'pkgcmp', 'pkgsplit',
@@ -19,7 +21,6 @@ else:
import portage
portage.proxy.lazyimport.lazyimport(globals(),
- 'portage.dep:_get_slot_re',
'portage.repository.config:_gen_valid_repo',
'portage.util:cmp_sort_key',
)
@@ -32,6 +33,10 @@ _unknown_repo = "__unknown__"
# \w is [a-zA-Z0-9_]
+# PMS 3.1.3: A slot name may contain any of the characters [A-Za-z0-9+_.-].
+# It must not begin with a hyphen or a dot.
+_slot = r'([\w+][\w+.-]*)'
+
# 2.1.1 A category name may contain any of the characters [A-Za-z0-9+_.-].
# It must not begin with a hyphen or a dot.
_cat = r'[\w+][\w+.-]*'
@@ -66,6 +71,24 @@ suffix_regexp = re.compile("^(alpha|beta|rc|pre|p)(\\d*)$")
suffix_value = {"pre": -2, "p": 0, "alpha": -4, "beta": -3, "rc": -1}
endversion_keys = ["pre", "p", "alpha", "beta", "rc"]
+_slot_re_cache = {}
+
+def _get_slot_re(eapi_attrs):
+ cache_key = eapi_attrs.slot_operator
+ slot_re = _slot_re_cache.get(cache_key)
+ if slot_re is not None:
+ return slot_re
+
+ if eapi_attrs.slot_operator:
+ slot_re = _slot + r'(/' + _slot + r')?'
+ else:
+ slot_re = _slot
+
+ slot_re = re.compile('^' + slot_re + '$', re.VERBOSE | re.UNICODE)
+
+ _slot_re_cache[cache_key] = slot_re
+ return slot_re
+
_pv_re_cache = {}
def _get_pv_re(eapi_attrs):
@@ -79,18 +102,18 @@ def _get_pv_re(eapi_attrs):
else:
pv_re = _pv['dots_disallowed_in_PN']
- pv_re = re.compile('^' + pv_re + '$', re.VERBOSE)
+ pv_re = re.compile(r'^' + pv_re + r'$', re.VERBOSE | re.UNICODE)
_pv_re_cache[cache_key] = pv_re
return pv_re
def ververify(myver, silent=1):
if ver_regexp.match(myver):
- return 1
+ return True
else:
if not silent:
print(_("!!! syntax error in version: %s") % myver)
- return 0
+ return False
def vercmp(ver1, ver2, silent=1):
"""
@@ -292,7 +315,7 @@ def _pkgsplit(mypkg, eapi=None):
return (m.group('pn'), m.group('ver'), rev)
-_cat_re = re.compile('^%s$' % _cat)
+_cat_re = re.compile('^%s$' % _cat, re.UNICODE)
_missing_cat = 'null'
def catpkgsplit(mydata, silent=1, eapi=None):
@@ -314,11 +337,11 @@ def catpkgsplit(mydata, silent=1, eapi=None):
except AttributeError:
pass
mysplit = mydata.split('/', 1)
- p_split=None
- if len(mysplit)==1:
+ p_split = None
+ if len(mysplit) == 1:
cat = _missing_cat
p_split = _pkgsplit(mydata, eapi=eapi)
- elif len(mysplit)==2:
+ elif len(mysplit) == 2:
cat = mysplit[0]
if _cat_re.match(cat) is not None:
p_split = _pkgsplit(mysplit[1], eapi=eapi)
@@ -337,14 +360,23 @@ class _pkg_str(_unicode):
manually convert them to a plain unicode object first.
"""
- def __new__(cls, cpv, slot=None, repo=None, eapi=None):
+ def __new__(cls, cpv, metadata=None, settings=None, eapi=None,
+ repo=None, slot=None):
return _unicode.__new__(cls, cpv)
- def __init__(self, cpv, slot=None, repo=None, eapi=None):
+ def __init__(self, cpv, metadata=None, settings=None, eapi=None,
+ repo=None, slot=None):
if not isinstance(cpv, _unicode):
# Avoid TypeError from _unicode.__init__ with PyPy.
cpv = _unicode_decode(cpv)
_unicode.__init__(cpv)
+ if metadata is not None:
+ self.__dict__['_metadata'] = metadata
+ slot = metadata.get('SLOT', slot)
+ repo = metadata.get('repository', repo)
+ eapi = metadata.get('EAPI', eapi)
+ if settings is not None:
+ self.__dict__['_settings'] = settings
if eapi is not None:
self.__dict__['eapi'] = eapi
self.__dict__['cpv_split'] = catpkgsplit(cpv, eapi=eapi)
@@ -363,19 +395,19 @@ class _pkg_str(_unicode):
if slot_match is None:
# Avoid an InvalidAtom exception when creating SLOT atoms
self.__dict__['slot'] = '0'
- self.__dict__['slot_abi'] = '0'
+ self.__dict__['sub_slot'] = '0'
self.__dict__['slot_invalid'] = slot
else:
- if eapi_attrs.slot_abi:
+ if eapi_attrs.slot_operator:
slot_split = slot.split("/")
self.__dict__['slot'] = slot_split[0]
if len(slot_split) > 1:
- self.__dict__['slot_abi'] = slot_split[1]
+ self.__dict__['sub_slot'] = slot_split[1]
else:
- self.__dict__['slot_abi'] = slot_split[0]
+ self.__dict__['sub_slot'] = slot_split[0]
else:
self.__dict__['slot'] = slot
- self.__dict__['slot_abi'] = slot
+ self.__dict__['sub_slot'] = slot
if repo is not None:
repo = _gen_valid_repo(repo)
@@ -387,6 +419,25 @@ class _pkg_str(_unicode):
raise AttributeError("_pkg_str instances are immutable",
self.__class__, name, value)
+ @property
+ def stable(self):
+ try:
+ return self._stable
+ except AttributeError:
+ try:
+ metadata = self._metadata
+ settings = self._settings
+ except AttributeError:
+ raise AttributeError('stable')
+ if not settings.local_config:
+ # Since repoman uses different config instances for
+ # different profiles, our local instance does not
+ # refer to the correct profile.
+ raise AssertionError('invalid context')
+ stable = settings._isStable(self)
+ self.__dict__['_stable'] = stable
+ return stable
+
def pkgsplit(mypkg, silent=1, eapi=None):
"""
@param mypkg: either a pv or cpv
@@ -488,7 +539,7 @@ def cpv_sort_key(eapi=None):
return cmp_sort_key(cmp_cpv)
def catsplit(mydep):
- return mydep.split("/", 1)
+ return mydep.split("/", 1)
def best(mymatches, eapi=None):
"""Accepts None arguments; assumes matches are valid."""
diff --git a/pym/portage/xml/metadata.py b/pym/portage/xml/metadata.py
index f820e5414..fcd9dc0e3 100644
--- a/pym/portage/xml/metadata.py
+++ b/pym/portage/xml/metadata.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
"""Provides an easy-to-use python interface to Gentoo's metadata.xml file.
@@ -28,6 +28,8 @@
'Thomas Mills Hinkle'
"""
+from __future__ import unicode_literals
+
__all__ = ('MetaDataXML',)
import sys
@@ -58,8 +60,7 @@ except (ImportError, SystemError, RuntimeError, Exception):
import re
import xml.etree.ElementTree
-import portage
-from portage import os, _unicode_decode
+from portage import _encodings, _unicode_encode
from portage.util import unique_everseen
class _MetadataTreeBuilder(xml.etree.ElementTree.TreeBuilder):
@@ -203,12 +204,13 @@ class MetaDataXML(object):
self._xml_tree = None
try:
- self._xml_tree = etree.parse(metadata_xml_path,
+ self._xml_tree = etree.parse(_unicode_encode(metadata_xml_path,
+ encoding=_encodings['fs'], errors='strict'),
parser=etree.XMLParser(target=_MetadataTreeBuilder()))
except ImportError:
pass
except ExpatError as e:
- raise SyntaxError(_unicode_decode("%s") % (e,))
+ raise SyntaxError("%s" % (e,))
if isinstance(herds, etree.ElementTree):
herds_etree = herds
@@ -241,7 +243,8 @@ class MetaDataXML(object):
if self._herdstree is None:
try:
- self._herdstree = etree.parse(self._herds_path,
+ self._herdstree = etree.parse(_unicode_encode(self._herds_path,
+ encoding=_encodings['fs'], errors='strict'),
parser=etree.XMLParser(target=_MetadataTreeBuilder()))
except (ImportError, IOError, SyntaxError):
return None
diff --git a/pym/portage/xpak.py b/pym/portage/xpak.py
index 73f84ab75..b4567be05 100644
--- a/pym/portage/xpak.py
+++ b/pym/portage/xpak.py
@@ -1,4 +1,4 @@
-# Copyright 2001-2012 Gentoo Foundation
+# Copyright 2001-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
@@ -15,10 +15,12 @@
# (integer) == encodeint(integer) ===> 4 characters (big-endian copy)
# '+' means concatenate the fields ===> All chunks are strings
-__all__ = ['addtolist', 'decodeint', 'encodeint', 'getboth',
+__all__ = [
+ 'addtolist', 'decodeint', 'encodeint', 'getboth',
'getindex', 'getindex_mem', 'getitem', 'listindex',
'searchindex', 'tbz2', 'xpak_mem', 'xpak', 'xpand',
- 'xsplit', 'xsplit_mem']
+ 'xsplit', 'xsplit_mem',
+]
import array
import errno
diff --git a/pym/repoman/checks.py b/pym/repoman/checks.py
index ca4c260b1..8032b28df 100644
--- a/pym/repoman/checks.py
+++ b/pym/repoman/checks.py
@@ -1,10 +1,12 @@
# repoman: Checks
-# Copyright 2007-2012 Gentoo Foundation
+# Copyright 2007-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
"""This module contains functions used in Repoman to ascertain the quality
and correctness of an ebuild."""
+from __future__ import unicode_literals
+
import codecs
from itertools import chain
import re
@@ -13,8 +15,7 @@ import repoman.errors as errors
import portage
from portage.eapi import eapi_supports_prefix, eapi_has_implicit_rdepend, \
eapi_has_src_prepare_and_src_configure, eapi_has_dosed_dohard, \
- eapi_exports_AA
-from portage.const import _ENABLE_INHERIT_CHECK
+ eapi_exports_AA, eapi_has_pkg_pretend
class LineCheck(object):
"""Run a check on a line of an ebuild."""
@@ -69,7 +70,7 @@ class EbuildHeader(LineCheck):
Copyright header errors
CVS header errors
License header errors
-
+
Args:
modification_year - Year the ebuild was last modified
"""
@@ -112,7 +113,7 @@ class EbuildWhitespace(LineCheck):
ignore_line = re.compile(r'(^$)|(^(\t)*#)')
ignore_comment = False
leading_spaces = re.compile(r'^[\S\t]')
- trailing_whitespace = re.compile(r'.*([\S]$)')
+ trailing_whitespace = re.compile(r'.*([\S]$)')
def check(self, num, line):
if self.leading_spaces.match(line) is None:
@@ -162,6 +163,9 @@ class EbuildQuote(LineCheck):
"GAMES_DATADIR_BASE", "GAMES_SYSCONFDIR", "GAMES_STATEDIR",
"GAMES_LOGDIR", "GAMES_BINDIR"]
+ # variables for multibuild.eclass
+ var_names += ["BUILD_DIR"]
+
var_names = "(%s)" % "|".join(var_names)
var_reference = re.compile(r'\$(\{'+var_names+'\}|' + \
var_names + '\W)')
@@ -169,7 +173,7 @@ class EbuildQuote(LineCheck):
r'\}?[^"\'\s]*(\s|$)')
cond_begin = re.compile(r'(^|\s+)\[\[($|\\$|\s+)')
cond_end = re.compile(r'(^|\s+)\]\]($|\\$|\s+)')
-
+
def check(self, num, line):
if self.var_reference.search(line) is None:
return
@@ -221,21 +225,13 @@ class EbuildAssignment(LineCheck):
"""Ensure ebuilds don't assign to readonly variables."""
repoman_check_name = 'variable.readonly'
-
readonly_assignment = re.compile(r'^\s*(export\s+)?(A|CATEGORY|P|PV|PN|PR|PVR|PF|D|WORKDIR|FILESDIR|FEATURES|USE)=')
- line_continuation = re.compile(r'([^#]*\S)(\s+|\t)\\$')
- ignore_line = re.compile(r'(^$)|(^(\t)*#)')
- ignore_comment = False
-
- def __init__(self):
- self.previous_line = None
def check(self, num, line):
match = self.readonly_assignment.match(line)
e = None
- if match and (not self.previous_line or not self.line_continuation.match(self.previous_line)):
+ if match is not None:
e = errors.READONLY_ASSIGNMENT_ERROR
- self.previous_line = line
return e
class Eapi3EbuildAssignment(EbuildAssignment):
@@ -247,11 +243,11 @@ class Eapi3EbuildAssignment(EbuildAssignment):
return eapi_supports_prefix(eapi)
class EbuildNestedDie(LineCheck):
- """Check ebuild for nested die statements (die statements in subshells"""
-
+ """Check ebuild for nested die statements (die statements in subshells)"""
+
repoman_check_name = 'ebuild.nesteddie'
nesteddie_re = re.compile(r'^[^#]*\s\(\s[^)]*\bdie\b')
-
+
def check(self, num, line):
if self.nesteddie_re.match(line):
return errors.NESTED_DIE_ERROR
@@ -296,7 +292,7 @@ class EapiDefinition(LineCheck):
_eapi_re = portage._pms_eapi_re
def new(self, pkg):
- self._cached_eapi = pkg.metadata['EAPI']
+ self._cached_eapi = pkg.eapi
self._parsed_eapi = None
self._eapi_line_num = None
@@ -386,13 +382,18 @@ class InheritDeprecated(LineCheck):
# deprecated eclass : new eclass (False if no new eclass)
deprecated_classes = {
"bash-completion": "bash-completion-r1",
+ "boost-utils": False,
+ "distutils": "distutils-r1",
"gems": "ruby-fakegem",
"git": "git-2",
+ "mono": "mono-env",
"mozconfig-2": "mozconfig-3",
"mozcoreconf": "mozcoreconf-2",
"php-ext-pecl-r1": "php-ext-pecl-r2",
"php-ext-source-r1": "php-ext-source-r2",
"php-pear": "php-pear-r1",
+ "python": "python-r1 / python-single-r1 / python-any-r1",
+ "python-distutils-ng": "python-r1 + distutils-r1",
"qt3": False,
"qt4": "qt4-r2",
"ruby": "ruby-ng",
@@ -471,13 +472,13 @@ class InheritEclass(LineCheck):
self._inherit_re = re.compile(r'^(\s*|.*[|&]\s*)\binherit\s(.*\s)?%s(\s|$)' % inherit_re)
# Match when the function is preceded only by leading whitespace, a
# shell operator such as (, {, |, ||, or &&, or optional variable
- # setting(s). This prevents false postives in things like elog
+ # setting(s). This prevents false positives in things like elog
# messages, as reported in bug #413285.
self._func_re = re.compile(r'(^|[|&{(])\s*(\w+=.*)?\b(' + '|'.join(funcs) + r')\b')
def new(self, pkg):
self.repoman_check_name = 'inherit.missing'
- # We can't use pkg.inherited because that tells us all the eclass that
+ # We can't use pkg.inherited because that tells us all the eclasses that
# have been inherited and not just the ones we inherit directly.
self._inherit = False
self._func_call = False
@@ -486,6 +487,7 @@ class InheritEclass(LineCheck):
self._disabled = any(x in inherited for x in self._exempt_eclasses)
else:
self._disabled = False
+ self._eapi = pkg.eapi
def check(self, num, line):
if not self._inherit:
@@ -494,10 +496,14 @@ class InheritEclass(LineCheck):
if self._disabled or self._ignore_missing:
return
s = self._func_re.search(line)
- if s:
- self._func_call = True
- return '%s.eclass is not inherited, but "%s" found at line: %s' % \
- (self._eclass, s.group(3), '%d')
+ if s is not None:
+ func_name = s.group(3)
+ eapi_func = _eclass_eapi_functions.get(func_name)
+ if eapi_func is None or not eapi_func(self._eapi):
+ self._func_call = True
+ return ('%s.eclass is not inherited, '
+ 'but "%s" found at line: %s') % \
+ (self._eclass, func_name, '%d')
elif not self._func_call:
self._func_call = self._func_re.search(line)
@@ -506,6 +512,10 @@ class InheritEclass(LineCheck):
self.repoman_check_name = 'inherit.unused'
yield 'no function called from %s.eclass; please drop' % self._eclass
+_eclass_eapi_functions = {
+ "usex" : lambda eapi: eapi not in ("0", "1", "2", "3", "4", "4-python", "4-slot-abi")
+}
+
# eclasses that export ${ECLASS}_src_(compile|configure|install)
_eclass_export_functions = (
'ant-tasks', 'apache-2', 'apache-module', 'aspell-dict',
@@ -558,8 +568,7 @@ _eclass_info = {
'funcs': (
'estack_push', 'estack_pop', 'eshopts_push', 'eshopts_pop',
'eumask_push', 'eumask_pop', 'epatch', 'epatch_user',
- 'emktemp', 'edos2unix', 'in_iuse', 'use_if_iuse', 'usex',
- 'makeopts_jobs'
+ 'emktemp', 'edos2unix', 'in_iuse', 'use_if_iuse', 'usex'
),
'comprehensive': False,
@@ -589,8 +598,16 @@ _eclass_info = {
),
# These are "eclasses are the whole ebuild" type thing.
- 'exempt_eclasses': _eclass_export_functions + ('autotools', 'libtool'),
+ 'exempt_eclasses': _eclass_export_functions + ('autotools', 'libtool',
+ 'multilib-minimal'),
+
+ 'comprehensive': False
+ },
+ 'multiprocessing': {
+ 'funcs': (
+ 'makeopts_jobs',
+ ),
'comprehensive': False
},
@@ -617,49 +634,6 @@ _eclass_info = {
}
}
-if not _ENABLE_INHERIT_CHECK:
- # Since the InheritEclass check is experimental, in the stable branch
- # we emulate the old eprefixify.defined and inherit.autotools checks.
- _eclass_info = {
- 'autotools': {
- 'funcs': (
- 'eaclocal', 'eautoconf', 'eautoheader',
- 'eautomake', 'eautoreconf', '_elibtoolize',
- 'eautopoint'
- ),
- 'comprehensive': True,
- 'ignore_missing': True,
- 'exempt_eclasses': ('git', 'git-2', 'subversion', 'autotools-utils')
- },
-
- 'prefix': {
- 'funcs': (
- 'eprefixify',
- ),
- 'comprehensive': False
- }
- }
-
-class IUseUndefined(LineCheck):
- """
- Make sure the ebuild defines IUSE (style guideline
- says to define IUSE even when empty).
- """
-
- repoman_check_name = 'IUSE.undefined'
- _iuse_def_re = re.compile(r'^IUSE=.*')
-
- def new(self, pkg):
- self._iuse_def = None
-
- def check(self, num, line):
- if self._iuse_def is None:
- self._iuse_def = self._iuse_def_re.match(line)
-
- def end(self):
- if self._iuse_def is None:
- yield 'IUSE is not defined'
-
class EMakeParallelDisabled(PhaseCheck):
"""Check for emake -j1 calls which disable parallelization."""
repoman_check_name = 'upstream.workaround'
@@ -684,8 +658,8 @@ class NoAsNeeded(LineCheck):
error = errors.NO_AS_NEEDED
class PreserveOldLib(LineCheck):
- """Check for calls to the preserve_old_lib function."""
- repoman_check_name = 'upstream.workaround'
+ """Check for calls to the deprecated preserve_old_lib function."""
+ repoman_check_name = 'ebuild.minorsyn'
re = re.compile(r'.*preserve_old_lib')
error = errors.PRESERVE_OLD_LIB
@@ -757,6 +731,21 @@ class DeprecatedHasq(LineCheck):
re = re.compile(r'(^|.*\b)hasq\b')
error = errors.HASQ_ERROR
+# EAPI <2 checks
+class UndefinedSrcPrepareSrcConfigurePhases(LineCheck):
+ repoman_check_name = 'EAPI.incompatible'
+ src_configprepare_re = re.compile(r'\s*(src_configure|src_prepare)\s*\(\)')
+
+ def check_eapi(self, eapi):
+ return not eapi_has_src_prepare_and_src_configure(eapi)
+
+ def check(self, num, line):
+ m = self.src_configprepare_re.match(line)
+ if m is not None:
+ return ("'%s'" % m.group(1)) + \
+ " phase is not defined in EAPI < 2 on line: %d"
+
+
# EAPI-3 checks
class Eapi3DeprecatedFuncs(LineCheck):
repoman_check_name = 'EAPI.deprecated'
@@ -771,6 +760,20 @@ class Eapi3DeprecatedFuncs(LineCheck):
return ("'%s'" % m.group(1)) + \
" has been deprecated in EAPI=3 on line: %d"
+# EAPI <4 checks
+class UndefinedPkgPretendPhase(LineCheck):
+ repoman_check_name = 'EAPI.incompatible'
+ pkg_pretend_re = re.compile(r'\s*(pkg_pretend)\s*\(\)')
+
+ def check_eapi(self, eapi):
+ return not eapi_has_pkg_pretend(eapi)
+
+ def check(self, num, line):
+ m = self.pkg_pretend_re.match(line)
+ if m is not None:
+ return ("'%s'" % m.group(1)) + \
+ " phase is not defined in EAPI < 4 on line: %d"
+
# EAPI-4 checks
class Eapi4IncompatibleFuncs(LineCheck):
repoman_check_name = 'EAPI.incompatible'
@@ -803,7 +806,7 @@ class PortageInternal(LineCheck):
repoman_check_name = 'portage.internal'
ignore_comment = True
# Match when the command is preceded only by leading whitespace or a shell
- # operator such as (, {, |, ||, or &&. This prevents false postives in
+ # operator such as (, {, |, ||, or &&. This prevents false positives in
# things like elog messages, as reported in bug #413285.
re = re.compile(r'^(\s*|.*[|&{(]+\s*)\b(ecompress|ecompressdir|env-update|prepall|prepalldocs|preplib)\b')
@@ -813,19 +816,52 @@ class PortageInternal(LineCheck):
if m is not None:
return ("'%s'" % m.group(2)) + " called on line: %d"
-_constant_checks = tuple(chain((c() for c in (
- EbuildHeader, EbuildWhitespace, EbuildBlankLine, EbuildQuote,
- EbuildAssignment, Eapi3EbuildAssignment, EbuildUselessDodoc,
- EbuildUselessCdS, EbuildNestedDie,
- EbuildPatches, EbuildQuotedA, EapiDefinition,
- ImplicitRuntimeDeps, IUseUndefined,
- EMakeParallelDisabled, EMakeParallelDisabledViaMAKEOPTS, NoAsNeeded,
- DeprecatedBindnowFlags, SrcUnpackPatches, WantAutoDefaultValue,
- SrcCompileEconf, Eapi3DeprecatedFuncs, NoOffsetWithHelpers,
- Eapi4IncompatibleFuncs, Eapi4GoneVars, BuiltWithUse,
- PreserveOldLib, SandboxAddpredict, PortageInternal,
- DeprecatedUseq, DeprecatedHasq)),
- (InheritEclass(k, **kwargs) for k, kwargs in _eclass_info.items())))
+class PortageInternalVariableAssignment(LineCheck):
+ repoman_check_name = 'portage.internal'
+ internal_assignment = re.compile(r'\s*(export\s+)?(EXTRA_ECONF|EXTRA_EMAKE)\+?=')
+
+ def check(self, num, line):
+ match = self.internal_assignment.match(line)
+ e = None
+ if match is not None:
+ e = 'Assignment to variable %s' % match.group(2)
+ e += ' on line: %d'
+ return e
+
+_base_check_classes = (InheritEclass, LineCheck, PhaseCheck)
+_constant_checks = None
+
+def _init(experimental_inherit=False):
+
+ global _constant_checks, _eclass_info
+
+ if not experimental_inherit:
+ # Emulate the old eprefixify.defined and inherit.autotools checks.
+ _eclass_info = {
+ 'autotools': {
+ 'funcs': (
+ 'eaclocal', 'eautoconf', 'eautoheader',
+ 'eautomake', 'eautoreconf', '_elibtoolize',
+ 'eautopoint'
+ ),
+ 'comprehensive': True,
+ 'ignore_missing': True,
+ 'exempt_eclasses': ('git', 'git-2', 'subversion', 'autotools-utils')
+ },
+
+ 'prefix': {
+ 'funcs': (
+ 'eprefixify',
+ ),
+ 'comprehensive': False
+ }
+ }
+
+ _constant_checks = tuple(chain((v() for k, v in globals().items()
+ if isinstance(v, type) and issubclass(v, LineCheck) and
+ v not in _base_check_classes),
+ (InheritEclass(k, **portage._native_kwargs(kwargs))
+ for k, kwargs in _eclass_info.items())))
_here_doc_re = re.compile(r'.*\s<<[-]?(\w+)$')
_ignore_comment_re = re.compile(r'^\s*#')
@@ -833,6 +869,8 @@ _ignore_comment_re = re.compile(r'^\s*#')
def run_checks(contents, pkg):
unicode_escape_codec = codecs.lookup('unicode_escape')
unicode_escape = lambda x: unicode_escape_codec.decode(x)[0]
+ if _constant_checks is None:
+ _init()
checks = _constant_checks
here_doc_delim = None
multiline = None
@@ -888,17 +926,18 @@ def run_checks(contents, pkg):
multiline = line
continue
- # Finally we have a full line to parse.
- is_comment = _ignore_comment_re.match(line) is not None
- for lc in checks:
- if is_comment and lc.ignore_comment:
- continue
- if lc.check_eapi(pkg.metadata['EAPI']):
- ignore = lc.ignore_line
- if not ignore or not ignore.match(line):
- e = lc.check(num, line)
- if e:
- yield lc.repoman_check_name, e % (num + 1)
+ if not line.endswith("#nowarn\n"):
+ # Finally we have a full line to parse.
+ is_comment = _ignore_comment_re.match(line) is not None
+ for lc in checks:
+ if is_comment and lc.ignore_comment:
+ continue
+ if lc.check_eapi(pkg.eapi):
+ ignore = lc.ignore_line
+ if not ignore or not ignore.match(line):
+ e = lc.check(num, line)
+ if e:
+ yield lc.repoman_check_name, e % (num + 1)
for lc in checks:
i = lc.end()
diff --git a/pym/repoman/errors.py b/pym/repoman/errors.py
index c515502c4..3833be671 100644
--- a/pym/repoman/errors.py
+++ b/pym/repoman/errors.py
@@ -1,7 +1,9 @@
# repoman: Error Messages
-# Copyright 2007-2011 Gentoo Foundation
+# Copyright 2007-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
COPYRIGHT_ERROR = 'Invalid Gentoo Copyright on line: %d'
LICENSE_ERROR = 'Invalid Gentoo/GPL License on line: %d'
CVS_HEADER_ERROR = 'Malformed CVS Header on line: %d'
@@ -17,7 +19,7 @@ EMAKE_PARALLEL_DISABLED_VIA_MAKEOPTS = 'Upstream parallel compilation bug (MAKEO
DEPRECATED_BINDNOW_FLAGS = 'Deprecated bindnow-flags call on line: %d'
EAPI_DEFINED_AFTER_INHERIT = 'EAPI defined after inherit on line: %d'
NO_AS_NEEDED = 'Upstream asneeded linking bug (no-as-needed on line: %d)'
-PRESERVE_OLD_LIB = 'Upstream ABI change workaround on line: %d'
+PRESERVE_OLD_LIB = 'Ebuild calls deprecated preserve_old_lib on line: %d'
BUILT_WITH_USE = 'built_with_use on line: %d'
NO_OFFSET_WITH_HELPERS = "Helper function is used with D, ROOT, ED, EROOT or EPREFIX on line :%d"
SANDBOX_ADDPREDICT = 'Ebuild calls addpredict on line: %d'
diff --git a/pym/repoman/herdbase.py b/pym/repoman/herdbase.py
index fcf58b36c..c5b88ff17 100644
--- a/pym/repoman/herdbase.py
+++ b/pym/repoman/herdbase.py
@@ -1,8 +1,10 @@
# -*- coding: utf-8 -*-
# repoman: Herd database analysis
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2 or later
+from __future__ import unicode_literals
+
import errno
import xml.etree.ElementTree
try:
@@ -17,6 +19,8 @@ except (ImportError, SystemError, RuntimeError, Exception):
# modules, so that ImportModulesTestCase can succeed (or
# possibly alert us about unexpected import failures).
pass
+
+from portage import _encodings, _unicode_encode
from portage.exception import FileNotFound, ParseError, PermissionDenied
__all__ = [
@@ -56,11 +60,12 @@ def make_herd_base(filename):
all_emails = set()
try:
- xml_tree = xml.etree.ElementTree.parse(filename,
+ xml_tree = xml.etree.ElementTree.parse(_unicode_encode(filename,
+ encoding=_encodings['fs'], errors='strict'),
parser=xml.etree.ElementTree.XMLParser(
target=_HerdsTreeBuilder()))
except ExpatError as e:
- raise ParseError("metadata.xml: " + str(e))
+ raise ParseError("metadata.xml: %s" % (e,))
except EnvironmentError as e:
func_call = "open('%s')" % filename
if e.errno == errno.EACCES:
diff --git a/pym/repoman/utilities.py b/pym/repoman/utilities.py
index 013858a6d..aec61fe2f 100644
--- a/pym/repoman/utilities.py
+++ b/pym/repoman/utilities.py
@@ -1,11 +1,11 @@
# repoman: Utilities
-# Copyright 2007-2012 Gentoo Foundation
+# Copyright 2007-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
"""This module contains utility functions to help repoman find ebuilds to
scan"""
-from __future__ import print_function
+from __future__ import print_function, unicode_literals
__all__ = [
"detect_vcs_conflicts",
@@ -25,6 +25,7 @@ __all__ = [
"UpdateChangeLog"
]
+import collections
import errno
import io
from itertools import chain
@@ -33,18 +34,20 @@ import pwd
import re
import stat
import sys
+import subprocess
import time
import textwrap
import difflib
from tempfile import mkstemp
+import portage
from portage import os
from portage import shutil
-from portage import subprocess_getstatusoutput
from portage import _encodings
from portage import _unicode_decode
from portage import _unicode_encode
from portage import output
+from portage.const import BASH_BINARY
from portage.localization import _
from portage.output import red, green
from portage.process import find_binary
@@ -71,22 +74,31 @@ def detect_vcs_conflicts(options, vcs):
Returns:
None (calls sys.exit on fatal problems)
"""
- retval = ("","")
+
+ cmd = None
if vcs == 'cvs':
logging.info("Performing a " + output.green("cvs -n up") + \
" with a little magic grep to check for updates.")
- retval = subprocess_getstatusoutput("cvs -n up 2>/dev/null | " + \
+ cmd = "cvs -n up 2>/dev/null | " + \
"egrep '^[^\?] .*' | " + \
- "egrep -v '^. .*/digest-[^/]+|^cvs server: .* -- ignored$'")
+ "egrep -v '^. .*/digest-[^/]+|^cvs server: .* -- ignored$'"
if vcs == 'svn':
logging.info("Performing a " + output.green("svn status -u") + \
" with a little magic grep to check for updates.")
- retval = subprocess_getstatusoutput("svn status -u 2>&1 | " + \
+ cmd = "svn status -u 2>&1 | " + \
"egrep -v '^. +.*/digest-[^/]+' | " + \
- "head -n-1")
-
- if vcs in ['cvs', 'svn']:
- mylines = retval[1].splitlines()
+ "head -n-1"
+
+ if cmd is not None:
+ # Use Popen instead of getstatusoutput(), in order to avoid
+ # unicode handling problems (see bug #310789).
+ args = [BASH_BINARY, "-c", cmd]
+ args = [_unicode_encode(x) for x in args]
+ proc = subprocess.Popen(args, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ out = _unicode_decode(proc.communicate()[0])
+ proc.wait()
+ mylines = out.splitlines()
myupdates = []
for line in mylines:
if not line:
@@ -98,7 +110,7 @@ def detect_vcs_conflicts(options, vcs):
logging.error(red("!!! Please fix the following issues reported " + \
"from cvs: ")+green("(U,P,M,A,R,D are ok)"))
logging.error(red("!!! Note: This is a pretend/no-modify pass..."))
- logging.error(retval[1])
+ logging.error(out)
sys.exit(1)
elif vcs == 'cvs' and line[0] in "UP":
myupdates.append(line[2:])
@@ -298,12 +310,12 @@ def format_qa_output(formatter, stats, fails, dofull, dofail, options, qawarning
# we only want key value pairs where value > 0
for category, number in \
filter(lambda myitem: myitem[1] > 0, iter(stats.items())):
- formatter.add_literal_data(_unicode_decode(" " + category.ljust(30)))
+ formatter.add_literal_data(" " + category.ljust(30))
if category in qawarnings:
formatter.push_style("WARN")
else:
formatter.push_style("BAD")
- formatter.add_literal_data(_unicode_decode(str(number)))
+ formatter.add_literal_data("%s" % number)
formatter.pop_style()
formatter.add_line_break()
if not dofull:
@@ -314,10 +326,54 @@ def format_qa_output(formatter, stats, fails, dofull, dofail, options, qawarning
if not full and len(fails_list) > 12:
fails_list = fails_list[:12]
for failure in fails_list:
- formatter.add_literal_data(_unicode_decode(" " + failure))
+ formatter.add_literal_data(" " + failure)
formatter.add_line_break()
+def format_qa_output_column(formatter, stats, fails, dofull, dofail, options, qawarnings):
+ """Helper function that formats output in a machine-parseable column format
+
+ @param formatter: an instance of Formatter
+ @type formatter: Formatter
+ @param path: dict of qa status items
+ @type path: dict
+ @param fails: dict of qa status failures
+ @type fails: dict
+ @param dofull: Whether to print full results or a summary
+ @type dofull: boolean
+ @param dofail: Whether failure was hard or soft
+ @type dofail: boolean
+ @param options: The command-line options provided to repoman
+ @type options: Namespace
+ @param qawarnings: the set of warning types
+ @type qawarnings: set
+ @return: None (modifies formatter)
+ """
+ full = options.mode == 'full'
+ for category, number in stats.items():
+ # we only want key value pairs where value > 0
+ if number < 1:
+ continue
+
+ formatter.add_literal_data("NumberOf " + category + " ")
+ if category in qawarnings:
+ formatter.push_style("WARN")
+ else:
+ formatter.push_style("BAD")
+ formatter.add_literal_data("%s" % number)
+ formatter.pop_style()
+ formatter.add_line_break()
+ if not dofull:
+ if not full and dofail and category in qawarnings:
+ # warnings are considered noise when there are failures
+ continue
+ fails_list = fails[category]
+ if not full and len(fails_list) > 12:
+ fails_list = fails_list[:12]
+ for failure in fails_list:
+ formatter.add_literal_data(category + " " + failure)
+ formatter.add_line_break()
+
def editor_is_executable(editor):
"""
Given an EDITOR string, validate that it refers to
@@ -367,10 +423,11 @@ def get_commit_message_with_editor(editor, message=None):
if not (os.WIFEXITED(retval) and os.WEXITSTATUS(retval) == os.EX_OK):
return None
try:
- mylines = io.open(_unicode_encode(filename,
+ with io.open(_unicode_encode(filename,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['content'], errors='replace'
- ).readlines()
+ ) as f:
+ mylines = f.readlines()
except OSError as e:
if e.errno != errno.ENOENT:
raise
@@ -427,7 +484,7 @@ def FindPortdir(settings):
portdir = None
portdir_overlay = None
location = os.getcwd()
- pwd = os.environ.get('PWD', '')
+ pwd = _unicode_decode(os.environ.get('PWD', ''), encoding=_encodings['fs'])
if pwd and pwd != location and os.path.realpath(pwd) == location:
# getcwd() returns the canonical path but that makes it hard for repoman to
# orient itself if the user has symlinks in their portage tree structure.
@@ -449,7 +506,7 @@ def FindPortdir(settings):
if location[-1] != "/":
location += "/"
- for overlay in settings["PORTDIR_OVERLAY"].split():
+ for overlay in portage.util.shlex_split(settings["PORTDIR_OVERLAY"]):
overlay = os.path.realpath(overlay)
try:
s = os.stat(overlay)
@@ -509,6 +566,28 @@ def FindPortdir(settings):
return [normalize_path(x) for x in (portdir, portdir_overlay, location)]
+_vcs_type = collections.namedtuple('_vcs_type',
+ 'name dir_name')
+
+_FindVCS_data = (
+ _vcs_type(
+ name = 'git',
+ dir_name = '.git'
+ ),
+ _vcs_type(
+ name = 'bzr',
+ dir_name = '.bzr'
+ ),
+ _vcs_type(
+ name = 'hg',
+ dir_name = '.hg'
+ ),
+ _vcs_type(
+ name = 'svn',
+ dir_name = '.svn'
+ )
+)
+
def FindVCS():
""" Try to figure out in what VCS' working tree we are. """
@@ -520,14 +599,13 @@ def FindVCS():
pathprep = ''
while depth is None or depth > 0:
- if os.path.isdir(os.path.join(pathprep, '.git')):
- retvcs.append('git')
- if os.path.isdir(os.path.join(pathprep, '.bzr')):
- retvcs.append('bzr')
- if os.path.isdir(os.path.join(pathprep, '.hg')):
- retvcs.append('hg')
- if os.path.isdir(os.path.join(pathprep, '.svn')): # >=1.7
- retvcs.append('svn')
+ for vcs_type in _FindVCS_data:
+ vcs_dir = os.path.join(pathprep, vcs_type.dir_name)
+ if os.path.isdir(vcs_dir):
+ logging.debug('FindVCS: found %(name)s dir: %(vcs_dir)s' %
+ {'name': vcs_type.name,
+ 'vcs_dir': os.path.abspath(vcs_dir)})
+ retvcs.append(vcs_type.name)
if retvcs:
break
@@ -763,7 +841,7 @@ def UpdateChangeLog(pkgdir, user, msg, skel_path, category, package,
line = line.replace('<PACKAGE_NAME>', package)
line = _update_copyright_year(year, line)
header_lines.append(line)
- header_lines.append(_unicode_decode('\n'))
+ header_lines.append('\n')
clskel_file.close()
# write new ChangeLog entry
@@ -773,10 +851,10 @@ def UpdateChangeLog(pkgdir, user, msg, skel_path, category, package,
if not fn.endswith('.ebuild'):
continue
ebuild = fn.split(os.sep)[-1][0:-7]
- clnew_lines.append(_unicode_decode('*%s (%s)\n' % (ebuild, date)))
+ clnew_lines.append('*%s (%s)\n' % (ebuild, date))
newebuild = True
if newebuild:
- clnew_lines.append(_unicode_decode('\n'))
+ clnew_lines.append('\n')
trivial_files = ('ChangeLog', 'Manifest')
display_new = ['+' + elem for elem in new
if elem not in trivial_files]
@@ -803,19 +881,19 @@ def UpdateChangeLog(pkgdir, user, msg, skel_path, category, package,
for line in textwrap.wrap(mesg, 80, \
initial_indent=' ', subsequent_indent=' ', \
break_on_hyphens=False):
- clnew_lines.append(_unicode_decode('%s\n' % line))
+ clnew_lines.append('%s\n' % line)
for line in textwrap.wrap(msg, 80, \
initial_indent=' ', subsequent_indent=' '):
- clnew_lines.append(_unicode_decode('%s\n' % line))
+ clnew_lines.append('%s\n' % line)
# Don't append a trailing newline if the file is new.
if clold_file is not None:
- clnew_lines.append(_unicode_decode('\n'))
+ clnew_lines.append('\n')
f = io.open(f, mode='w', encoding=_encodings['repo.content'],
errors='backslashreplace')
for line in clnew_lines:
- f.write(_unicode_decode(line))
+ f.write(line)
# append stuff from old ChangeLog
if clold_file is not None:
diff --git a/runtests.sh b/runtests.sh
index f65bb619f..7999220b9 100755
--- a/runtests.sh
+++ b/runtests.sh
@@ -1,8 +1,10 @@
#!/bin/bash
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-PYTHON_VERSIONS="2.6 2.7 2.7-pypy-1.8 2.7-pypy-1.9 3.1 3.2 3.3"
+# These are the versions we care about. The rest are just "nice to have".
+PYTHON_SUPPORTED_VERSIONS="2.6 2.7 3.2 3.3 3.4"
+PYTHON_VERSIONS="2.6 2.7 2.7-pypy-1.8 2.7-pypy-1.9 2.7-pypy-2.0 3.1 3.2 3.3 3.4 3.5"
# has to be run from portage root dir
cd "${0%/*}" || exit 1
@@ -28,15 +30,18 @@ interrupted() {
trap interrupted SIGINT
unused_args=()
+IGNORE_MISSING_VERSIONS=true
while [ $# -gt 0 ] ; do
case "$1" in
--python-versions=*)
PYTHON_VERSIONS=${1#--python-versions=}
+ IGNORE_MISSING_VERSIONS=false
;;
--python-versions)
shift
PYTHON_VERSIONS=$1
+ IGNORE_MISSING_VERSIONS=false
;;
*)
unused_args[${#unused_args[@]}]=$1
@@ -44,11 +49,16 @@ while [ $# -gt 0 ] ; do
esac
shift
done
+if [[ ${PYTHON_VERSIONS} == "supported" ]] ; then
+ PYTHON_VERSIONS=${PYTHON_SUPPORTED_VERSIONS}
+fi
set -- "${unused_args[@]}"
eprefix=${PORTAGE_OVERRIDE_EPREFIX}
exit_status="0"
+found_versions=()
+status_array=()
for version in ${PYTHON_VERSIONS}; do
if [[ $version =~ ^([[:digit:]]+\.[[:digit:]]+)-pypy-([[:digit:]]+\.[[:digit:]]+)$ ]] ; then
executable=${eprefix}/usr/bin/pypy-c${BASH_REMATCH[2]}
@@ -57,12 +67,43 @@ for version in ${PYTHON_VERSIONS}; do
fi
if [[ -x "${executable}" ]]; then
echo -e "${GOOD}Testing with Python ${version}...${NORMAL}"
- if ! "${executable}" -Wd pym/portage/tests/runTests "$@" ; then
+ "${executable}" -b -Wd pym/portage/tests/runTests "$@"
+ status=$?
+ status_array[${#status_array[@]}]=${status}
+ found_versions[${#found_versions[@]}]=${version}
+ if [ ${status} -ne 0 ] ; then
echo -e "${BAD}Testing with Python ${version} failed${NORMAL}"
exit_status="1"
fi
echo
+ elif [[ ${IGNORE_MISSING_VERSIONS} != "true" ]] ; then
+ echo -e "${BAD}Could not find requested Python ${version}${NORMAL}"
+ exit_status="1"
fi
done
+if [ ${#status_array[@]} -gt 0 ] ; then
+ max_len=0
+ for version in ${found_versions[@]} ; do
+ [ ${#version} -gt ${max_len} ] && max_len=${#version}
+ done
+ (( columns = max_len + 2 ))
+ (( columns >= 7 )) || columns=7
+ printf "\nSummary:\n\n"
+ printf "| %-${columns}s | %s\n|" "Version" "Status"
+ (( total_cols = columns + 11 ))
+ eval "printf -- '-%.0s' {1..${total_cols}}"
+ printf "\n"
+ row=0
+ for version in ${found_versions[@]} ; do
+ if [ ${status_array[${row}]} -eq 0 ] ; then
+ status="success"
+ else
+ status="fail"
+ fi
+ printf "| %-${columns}s | %s\n" "${version}" "${status}"
+ (( row++ ))
+ done
+fi
+
exit ${exit_status}
diff --git a/tabcheck.py b/tabcheck.py
index 1699e9883..2d45cdead 100755
--- a/tabcheck.py
+++ b/tabcheck.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python -O
+#!/usr/bin/python -bO
import tabnanny,sys